repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rchuppala/usc_agent | src/usc-agent-dev/common/source/pyang/pyang/translators/schemanode.py | 1 | 10232 | # Copyright (c) 2013 by Ladislav Lhotka, CZ.NIC <[email protected]>
#
# Python class representing a node in a RELAX NG schema.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from xml.sax.saxutils import escape
class SchemaNode(object):
"""This class represents a node in a RELAX NG schema.
The details are tailored to the specific features of the hybrid
DSDL schema generated from YANG modules, but the class may be
reasonably used for representing any other RELAX NG schema.
Specific types of nodes are created using class methods below.
Instance variables:
* `self.attr` - dictionary of XML attributes. Keys are attribute
names and values attribute values.
* `self.children` - list of child nodes.
* `self.default` - default value (only for "element" nodes)
* `self.interleave` - boolean flag determining the interleave
status. If True, the children of `self` will end up inside
<interleave>.
* `self.keys` - list of QNames of YANG list keys (only for "_list_"
nodes having children).
* `self.keymap` - dictionary of key nodes (only for "_list_" nodes
having children). The keys of the dictionary are the QNames of
YANG list keys.
* `self.minEl` - minimum number of items (only for "_list_" nodes).
* `self.maxEl` - maximum number of items (only for "_list_" nodes).
* `self.name` - name of the schema node (XML element name).
* `self.occur` - specifies the occurrence status using integer
values: 0=optional, 1=implicit, 2=mandatory, 3=presence.
* `self.parent` - parent node.
* `self.text` - text content.
"""
def element(cls, name, parent=None, interleave=None, occur=0):
"""Create an element node."""
node = cls("element", parent, interleave=interleave)
node.attr["name"] = name
node.occur = occur
return node
element = classmethod(element)
def leaf_list(cls, name, parent=None, interleave=None):
"""Create _list_ node for a leaf-list."""
node = cls("_list_", parent, interleave=interleave)
node.attr["name"] = name
node.keys = None
node.minEl = "0"
node.maxEl = None
node.occur = 3
return node
leaf_list = classmethod(leaf_list)
def list(cls, name, parent=None, interleave=None):
"""Create _list_ node for a list."""
node = cls.leaf_list(name, parent, interleave=interleave)
node.keys = []
node.keymap = {}
return node
list = classmethod(list)
def choice(cls, parent=None, occur=0):
"""Create choice node."""
node = cls("choice", parent)
node.occur = occur
node.default_case = None
return node
choice = classmethod(choice)
def case(cls, parent=None):
"""Create case node."""
node = cls("case", parent)
node.occur = 0
return node
case = classmethod(case)
def define(cls, name, parent=None, interleave=False):
"""Create define node."""
node = cls("define", parent, interleave=interleave)
node.occur = 0
node.attr["name"] = name
return node
define = classmethod(define)
def __init__(self, name, parent=None, text="", interleave=None):
"""Initialize the object under `parent`.
"""
self.name = name
self.parent = parent
if parent is not None: parent.children.append(self)
self.text = text
self.adjust_interleave(interleave)
self.children = []
self.annots = []
self.attr = {}
def serialize_children(self):
"""Return serialization of receiver's children.
"""
return ''.join([ch.serialize() for ch in self.children])
def serialize_annots(self):
"""Return serialization of receiver's annotation elements.
"""
return ''.join([ch.serialize() for ch in self.annots])
def adjust_interleave(self, interleave):
"""Inherit interleave status from parent if undefined."""
if interleave == None and self.parent:
self.interleave = self.parent.interleave
else:
self.interleave = interleave
def subnode(self, node):
"""Make `node` receiver's child."""
self.children.append(node)
node.parent = self
node.adjust_interleave(None)
def annot(self, node):
"""Add `node` as an annotation of the receiver."""
self.annots.append(node)
node.parent = self
def set_attr(self, key, value):
"""Set attribute `key` to `value` and return the receiver."""
self.attr[key] = value
return self
def start_tag(self, alt=None, empty=False):
"""Return XML start tag for the receiver."""
if alt:
name = alt
else:
name = self.name
result = "<" + name
for it in self.attr:
result += ' %s="%s"' % (it, escape(self.attr[it], {'"':""", '%': "%%"}))
if empty:
return result + "/>%s"
else:
return result + ">"
def end_tag(self, alt=None):
"""Return XML end tag for the receiver."""
if alt:
name = alt
else:
name = self.name
return "</" + name + ">"
def serialize(self, occur=None):
"""Return RELAX NG representation of the receiver and subtree.
"""
fmt = self.ser_format.get(self.name, SchemaNode._default_format)
return fmt(self, occur) % (escape(self.text) +
self.serialize_children())
def _default_format(self, occur):
"""Return the default serialization format."""
if self.text or self.children:
return self.start_tag() + "%s" + self.end_tag()
return self.start_tag(empty=True)
def _wrapper_format(self, occur):
"""Return the serializatiopn format for <start>."""
return self.start_tag() + self._chorder() + self.end_tag()
def _define_format(self, occur):
"""Return the serialization format for a define node."""
if hasattr(self, "default"):
self.attr["nma:default"] = self.default
middle = self._chorder() if self.children else "<empty/>%s"
return self.start_tag() + middle + self.end_tag()
def _element_format(self, occur):
"""Return the serialization format for an element node."""
if occur:
occ = occur
else:
occ = self.occur
if occ == 1:
if hasattr(self, "default"):
self.attr["nma:default"] = self.default
else:
self.attr["nma:implicit"] = "true"
middle = self._chorder() if self.children else "<empty/>%s"
fmt = self.start_tag() + self.serialize_annots() + middle + self.end_tag()
if (occ == 2 or self.parent.name == "choice"
or self.parent.name == "case" and len(self.parent.children) == 1):
return fmt
else:
return "<optional>" + fmt + "</optional>"
def _chorder(self):
"""Add <interleave> if child order is arbitrary."""
if (self.interleave and len(self.children) > 1):
return "<interleave>%s</interleave>"
return "%s"
def _list_format(self, occur):
"""Return the serialization format for a _list_ node."""
if self.keys:
self.attr["nma:key"] = " ".join(self.keys)
keys = ''.join([self.keymap[k].serialize(occur=2)
for k in self.keys])
else:
keys = ""
if self.maxEl:
self.attr["nma:max-elements"] = self.maxEl
if int(self.minEl) == 0:
ord_ = "zeroOrMore"
else:
ord_ = "oneOrMore"
if int(self.minEl) > 1:
self.attr["nma:min-elements"] = self.minEl
middle = self._chorder() if self.children else "<empty/>%s"
return ("<" + ord_ + ">" + self.start_tag("element") +
self.serialize_annots() + keys +
middle + self.end_tag("element") + "</" + ord_ + ">")
def _choice_format(self, occur):
"""Return the serialization format for a choice node."""
middle = "%s" if self.children else "<empty/>%s"
fmt = self.start_tag() + middle + self.end_tag()
if self.occur != 2:
return "<optional>" + fmt + "</optional>"
else:
return fmt
def _case_format(self, occur):
"""Return the serialization format for a case node."""
if self.occur == 1:
self.attr["nma:implicit"] = "true"
ccnt = len(self.children)
if ccnt == 0: return "<empty/>%s"
if ccnt == 1 or not self.interleave:
return self.start_tag("group") + "%s" + self.end_tag("group")
return (self.start_tag("interleave") + "%s" +
self.end_tag("interleave"))
ser_format = { "nma:data": _wrapper_format,
"nma:input": _wrapper_format,
"nma:notification": _wrapper_format,
"nma:output": _wrapper_format,
"element": _element_format,
"_list_": _list_format,
"choice": _choice_format,
"case": _case_format,
"define": _define_format,
}
"""Class variable - dictionary of methods returning string
serialization formats. Keys are node names."""
| gpl-2.0 | 3,778,500,691,622,639,600 | 35.412811 | 89 | 0.57672 | false |
lukaszb/django-richtemplates | richtemplates/listeners.py | 1 | 1369 | import logging
from django.db.models import signals
from django.db import DatabaseError, IntegrityError
from django.contrib.auth.models import User
from richtemplates.utils import get_user_profile_model
def new_richtemplates_profile(instance, **kwargs):
if kwargs['created'] is True:
UserProfile = get_user_profile_model()
if UserProfile is not None:
try:
# We run get_or_create instead of create as there may be other
# handlers which would automaticaly create profiles
UserProfile.objects.get_or_create(
user = instance,
)
logging.debug("New profile created for user %s" % instance)
except (DatabaseError, IntegrityError), err:
logging.warning("Richtemplates tried to create profile for new "
"user %s but it seems there is already one or "
"profile table does not exist. "
"Original error: %s" % (instance, err))
logging.warning("Consider running syncdb again after issue is "
"resolved")
def start_listening():
signals.post_save.connect(new_richtemplates_profile, sender=User,
dispatch_uid="richtemplates.listeners.new_richtemplates_profile")
| bsd-3-clause | 9,204,878,192,692,085,000 | 41.78125 | 80 | 0.604091 | false |
spotify/crtauth | test/roundtrip_test.py | 1 | 14010 | # Copyright (c) 2011-2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
from crtauth import server
from crtauth import key_provider
from crtauth import rsa
from crtauth import protocol
from crtauth import ssh
from crtauth import exceptions
from crtauth import msgpack_protocol
from crtauth.client import create_response
from crtauth.server import create_response as server_create_response
inner_s = ("AAAAB3NzaC1yc2EAAAABIwAAAQEArt7xdaxlbzzGlgLhqpLuE5x9d+so0M"
"JiqQSmiUJojuK+v1cxnYCnQQPF0BkAhw2hiFiDvLLVogIu8m2wCV9XAGxrz38NLHVq"
"ke+EAduJAfiiD1iwvSLbFBOMVRYfzUoiuPIudwZqmLuCpln1RUE6O/ujmYNyoPS4fq"
"a1svaiZ4C77tLMi2ztMIX97SN2o0EntrhOonJ1nk+7JLYvkhsT8rX20bg6Mlu909iO"
"vtTbElnypKzmjFZyBvzZhocRo4yfrekP3s2QyKSIB5ARGenoSoQa43cD93tqbLGK4o"
"JSkkfxc9HFPo0t+deDorZmelNNFvEn5KeqP0HJvw/jm2U1PQ==")
s = ("ssh-rsa %s [email protected]" % inner_s)
t_pubkey = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDK0wNhgGlFZf"
"BoRBS+M8wGoyOOVunYYjeaoRXKFKfhx288ZIo87WMfN6i5KnUTH3A/mYlVnK4bh"
"chS6dUFisaXcURvFgY46pUSGuLTZxTe9anIIR/iT+V+8MRDHXffRGOCLEQUl0le"
"YTht0dc7rxaW42d83yC7uuCISbgWqOANvMkZYqZjaejOOGVpkApxLGG8K8RvNBB"
"M8TYqE3DQHSyRVU6S9HWLbWF+i8W2h4CLX2Quodf0c1dcqlftClHjdIyed/zQKh"
"Ao+FDcJrN+2ZDJ0mkYLVlJDZuLk/K/vSOwD3wXhby3cdHCsxnRfy2Ylnt31VF0a"
"VtlhW4IJ+5mMzmz [email protected]")
test_priv_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAytMDYYBpRWXwaEQUvjPMBqMjjlbp2GI3mqEVyhSn4cdvPGSK
PO1jHzeouSp1Ex9wP5mJVZyuG4XIUunVBYrGl3FEbxYGOOqVEhri02cU3vWpyCEf
4k/lfvDEQx1330RjgixEFJdJXmE4bdHXO68WluNnfN8gu7rgiEm4FqjgDbzJGWKm
Y2nozjhlaZAKcSxhvCvEbzQQTPE2KhNw0B0skVVOkvR1i21hfovFtoeAi19kLqHX
9HNXXKpX7QpR43SMnnf80CoQKPhQ3CazftmQydJpGC1ZSQ2bi5Pyv70jsA98F4W8
t3HRwrMZ0X8tmJZ7d9VRdGlbZYVuCCfuZjM5swIDAQABAoIBADtnoHbfQHYGDGrN
ffHTg+9xuslG5YjuA3EzuwkMEbvMSOU8YUzFDqInEDDjoZSvQZYvJw0/LbN79Jds
S2srIU1b7HpIzhu/gVfjLgpTB8bh1w95vDfxxLrwU9uAdwqaojaPNoV9ZgzRltB7
hHnDp28cPcRSKekyK+9fAB8K6Uy8N00hojBDwtwXM8C4PpQKod38Vd0Adp9dEdX6
Ro9suYb+d+qFalYbKIbjKWkll+ZiiGJjF1HSQCTwlzS2haPXUlbk57HnN+8ar+a3
ITTc2gbNuTqBRD1V/gCaD9F0npVI3mQ34eUADNVVGS0xw0pN4j++Da8KXP+pyn/G
DU/n8SECgYEA/KN4BTrg/LB7cGrzkMQmW26NA++htjiWHK3WTsQBKBDFyReJBn67
o9kMTHBP35352RfuJ3xEEJ0/ddqGEY/SzNk3HMTlxBbR5Xq8ye102dxfEO3eijJ/
F4VRSf9sFgdRoLvE62qLudytK4Ku9nnKoIqrMxFweTpwxzf2jjIKDbECgYEAzYXe
QxT1A/bfs5Qd6xoCVOAb4T/ALqFo95iJu4EtFt7nvt7avqL+Vsdxu5uBkTeEUHzh
1q47LFoFdGm+MesIIiPSSrbfZJ6ht9kw8EbF8Py85X4LBXey67JlzzUq+ewFEP91
do7uGQAY+BRwXtzzPqaVBVa94YOxdq/AGutrIqMCgYBr+cnQImwKU7tOPse+tbbX
GRa3+fEZmnG97CZOH8OGxjRiT+bGmd/ElX2GJfJdVn10ZZ/pzFii6TI4Qp9OXjPw
TV4as6Sn/EDVXXHWs+BfRKp059VXJ2HeQaKOh9ZAS/x9QANXwn/ZfhGdKQtyWHdb
yiiFeQyjI3EUFD0SZRya4QKBgA1QvQOvmeg12Gx0DjQrLTd+hY/kZ3kd8AUKlvHU
/qzaqD0PhzCOstfAeDflbVGRPTtRu/gCtca71lqidzYYuiAsHfXFP1fvhx64LZmD
nFNurHZZ4jDqfmcS2dHA6hXjGrjtNBkITZjFDtkTyev7eK74b/M2mXrA44CDBnk4
A2rtAoGAMv92fqI+B5taxlZhTLAIaGVFbzoASHTRl3eQJbc4zc38U3Zbiy4deMEH
3QTXq7nxWpE4YwHbgXAeJUGfUpE+nEZGMolj1Q0ueKuSstQg5p1nwhQIxej8EJW+
7siqmOTZDKzieik7KVzaJ/U02Q186smezKIuAOYtT8VCf9UksJ4=
-----END RSA PRIVATE KEY-----"""
class RoundtripTest(unittest.TestCase):
def test_read_base64_key(self):
key = rsa.RSAPublicKey(s)
self.assertEqual(key.fingerprint(), "\xfb\xa1\xeao\xd3y")
self.assertEqual(key.decoded, inner_s)
self.assertEqual(key.encoded[:15], "\x00\x00\x00\x07ssh-rsa"
"\x00\x00\x00\x01")
def test_read_binary_key(self):
key = rsa.RSAPublicKey(ssh.base64url_decode(s.split(" ")[1]))
self.assertEqual(key.fingerprint(), "\xfb\xa1\xeao\xd3y")
self.assertEqual(key.decoded, inner_s)
self.assertEqual(key.encoded[:15], "\x00\x00\x00\x07ssh-rsa"
"\x00\x00\x00\x01")
def test_create_challenge(self):
auth_server = server.AuthServer("gurka", DummyKeyProvider(),
"server.name")
s = auth_server.create_challenge("noa")
cb = ssh.base64url_decode(s)
verifiable_payload = protocol.VerifiablePayload.deserialize(cb)
challenge = protocol.Challenge.deserialize(verifiable_payload.payload)
self.assertEquals("\xfb\xa1\xeao\xd3y", challenge.fingerprint)
def test_create_challenge_v1(self):
auth_server = server.AuthServer("secret", DummyKeyProvider(),
"server.name")
challenge = auth_server.create_challenge("noa", 1)
cb = ssh.base64url_decode(challenge)
decoded_challenge = msgpack_protocol.Challenge.deserialize(cb)
self.assertEquals("\xfb\xa1\xeao\xd3y", decoded_challenge.fingerprint)
def test_create_challenge_no_legacy_support(self):
auth_server = server.AuthServer("secret", DummyKeyProvider(),
"server.name",
lowest_supported_version=1)
self.assertRaises(exceptions.ProtocolVersionError,
auth_server.create_challenge, "noa")
def test_create_challenge_v1_another(self):
auth_server = server.AuthServer("secret", DummyKeyProvider(),
"server.name",
lowest_supported_version=1)
challenge = auth_server.create_challenge("noa", 1)
cb = ssh.base64url_decode(challenge)
decoded_challenge = msgpack_protocol.Challenge.deserialize(cb)
self.assertEquals("\xfb\xa1\xeao\xd3y", decoded_challenge.fingerprint)
def test_authentication_roundtrip(self):
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
token = auth_server.create_token(response)
self.assertTrue(auth_server.validate_token(token))
def test_authentication_roundtrip_v1(self):
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server.create_challenge("test", 1)
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
token = auth_server.create_token(response)
self.assertTrue(auth_server.validate_token(token))
def test_authentication_roundtrip_mitm1(self):
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server.create_challenge("test")
try:
create_response(challenge, "another.server",
ssh.SingleKeySigner(test_priv_key))
self.fail("Should have gotten InvalidInputException")
except exceptions.InvalidInputException:
pass
def test_authentication_roundtrip_mitm2(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"another.server")
try:
auth_server_b.create_token(response)
self.fail("should have thrown exception")
except exceptions.InvalidInputException:
pass
def test_create_token_too_new(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: time.time() - 1000)
try:
auth_server_b.create_token(response)
self.fail("Should have issued InvalidInputException, "
"challenge too new")
except exceptions.InvalidInputException:
pass
def test_create_token_invalid_duration(self):
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
token = auth_server._make_token("some_user", int(time.time()) + 3600)
self.assertRaises(exceptions.InvalidInputException,
auth_server.validate_token, token)
def test_create_token_too_old(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: time.time() + 1000)
try:
auth_server_b.create_token(response)
self.fail("Should have issued InvalidInputException, "
"challenge too old")
except exceptions.InvalidInputException:
pass
def test_create_token_invalid_input(self):
auth_server = server.AuthServer("gurka", DummyKeyProvider(),
"server.name")
for t in ("2tYneWsOm88qu_Trzahw2r6ZLg37oepv03mykGS-HdcnWJLuUMDOmfVI"
"Wl5n3U6qt6Fub2E", "random"):
try:
auth_server.create_token(t)
self.fail("Input is invalid, should have thrown exception")
except exceptions.ProtocolError:
pass
def test_validate_token_too_old(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
token = auth_server_a.create_token(response)
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: time.time() + 1000)
try:
auth_server_b.validate_token(token)
self.fail("Should have issued TokenExpiredException, "
"token too old")
except exceptions.TokenExpiredException:
pass
def test_validate_token_too_new(self):
auth_server_a = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name")
challenge = auth_server_a.create_challenge("test")
response = create_response(challenge, "server.name",
ssh.SingleKeySigner(test_priv_key))
token = auth_server_a.create_token(response)
auth_server_b = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: time.time() - 1000)
try:
auth_server_b.validate_token(token)
self.fail("Should have issued TokenExpiredException, "
"token too new")
except exceptions.TokenExpiredException:
pass
def test_validate_token_wrong_secret(self):
token = "dgAAAJgtmNoqST9RaxayI7UP5-GLviUDAAAAFHQAAABUJYr_VCWLPQAAAAR0ZXN0"
auth_server = server.AuthServer("server_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: 1411746561.058992)
auth_server.validate_token(token)
auth_server = server.AuthServer("wrong_secret", DummyKeyProvider(),
"server.name",
now_func=lambda: 1411746561.058992)
try:
auth_server.validate_token(token)
self.fail("Should have gotten InvalidInputException")
except exceptions.InvalidInputException:
pass
def test_b64_roundtrip(self):
l = ["a", "ab", "abc", "abcd"]
for i in l:
self.assertEquals(ssh.base64url_decode(ssh.base64url_encode(i)), i)
def test_compatibility_create_response(self):
self.assertEqual(server_create_response, create_response)
class DummyKeyProvider(key_provider.KeyProvider):
def get_key(self, username):
if username == 'noa':
return rsa.RSAPublicKey(s)
elif username == 'test':
return rsa.RSAPublicKey(t_pubkey)
else:
raise exceptions.CrtAuthError("Unknown username: %s" % username)
| apache-2.0 | 1,554,741,149,084,993,500 | 47.310345 | 82 | 0.635261 | false |
Litetokens/litetokensd | litetokensd.py | 1 | 63305 | #! /usr/bin/env python3
import os
import argparse
import json
import decimal
import sys
import logging
import unicodedata
import time
import dateutil.parser
import calendar
import configparser
import traceback
import threading
from threading import Thread
import binascii
from fractions import Fraction
import requests
import appdirs
from prettytable import PrettyTable
from lockfile import LockFile
from lib import config, api, util, exceptions, litecoin, blocks, blockchain
if os.name == 'nt':
from lib import util_windows
D = decimal.Decimal
json_print = lambda x: print(json.dumps(x, sort_keys=True, indent=4))
def get_address (db, address):
address_dict = {}
address_dict['balances'] = util.api('get_balances', {'filters': [('address', '==', address),]})
address_dict['debits'] = util.api('get_debits', {'filters': [('address', '==', address),]})
address_dict['credits'] = util.api('get_credits', {'filters': [('address', '==', address),]})
address_dict['burns'] = util.api('get_burns', {'filters': [('source', '==', address),]})
address_dict['sends'] = util.api('get_sends', {'filters': [('source', '==', address), ('destination', '==', address)], 'filterop': 'or'})
address_dict['orders'] = util.api('get_orders', {'filters': [('source', '==', address),]})
address_dict['order_matches'] = util.api('get_order_matches', {'filters': [('tx0_address', '==', address), ('tx1_address', '==', address)], 'filterop': 'or'})
address_dict['ltcpays'] = util.api('get_ltcpays', {'filters': [('source', '==', address), ('destination', '==', address)], 'filterop': 'or'})
address_dict['issuances'] = util.api('get_issuances', {'filters': [('source', '==', address),]})
address_dict['broadcasts'] = util.api('get_broadcasts', {'filters': [('source', '==', address),]})
address_dict['bets'] = util.api('get_bets', {'filters': [('source', '==', address),]})
address_dict['bet_matches'] = util.api('get_bet_matches', {'filters': [('tx0_address', '==', address), ('tx1_address', '==', address)], 'filterop': 'or'})
address_dict['dividends'] = util.api('get_dividends', {'filters': [('source', '==', address),]})
address_dict['cancels'] = util.api('get_cancels', {'filters': [('source', '==', address),]})
address_dict['rps'] = util.api('get_rps', {'filters': [('source', '==', address),]})
address_dict['rps_matches'] = util.api('get_rps_matches', {'filters': [('tx0_address', '==', address), ('tx1_address', '==', address)], 'filterop': 'or'})
address_dict['callbacks'] = util.api('get_callbacks', {'filters': [('source', '==', address),]})
address_dict['bet_expirations'] = util.api('get_bet_expirations', {'filters': [('source', '==', address),]})
address_dict['order_expirations'] = util.api('get_order_expirations', {'filters': [('source', '==', address),]})
address_dict['rps_expirations'] = util.api('get_rps_expirations', {'filters': [('source', '==', address),]})
address_dict['bet_match_expirations'] = util.api('get_bet_match_expirations', {'filters': [('tx0_address', '==', address), ('tx1_address', '==', address)], 'filterop': 'or'})
address_dict['order_match_expirations'] = util.api('get_order_match_expirations', {'filters': [('tx0_address', '==', address), ('tx1_address', '==', address)], 'filterop': 'or'})
address_dict['rps_match_expirations'] = util.api('get_rps_match_expirations', {'filters': [('tx0_address', '==', address), ('tx1_address', '==', address)], 'filterop': 'or'})
return address_dict
def format_order (order):
give_quantity = util.devise(db, D(order['give_quantity']), order['give_asset'], 'output')
get_quantity = util.devise(db, D(order['get_quantity']), order['get_asset'], 'output')
give_remaining = util.devise(db, D(order['give_remaining']), order['give_asset'], 'output')
get_remaining = util.devise(db, D(order['get_remaining']), order['get_asset'], 'output')
give_asset = order['give_asset']
get_asset = order['get_asset']
if get_asset < give_asset:
price = util.devise(db, D(order['get_quantity']) / D(order['give_quantity']), 'price', 'output')
price_assets = get_asset + '/' + give_asset + ' ask'
else:
price = util.devise(db, D(order['give_quantity']) / D(order['get_quantity']), 'price', 'output')
price_assets = give_asset + '/' + get_asset + ' bid'
return [D(give_remaining), give_asset, price, price_assets, str(order['fee_required'] / config.UNIT), str(order['fee_provided'] / config.UNIT), order['expire_index'] - util.last_block(db)['block_index'], order['tx_hash']]
def format_bet (bet):
odds = D(bet['counterwager_quantity']) / D(bet['wager_quantity'])
if not bet['target_value']: target_value = None
else: target_value = bet['target_value']
if not bet['leverage']: leverage = None
else: leverage = util.devise(db, D(bet['leverage']) / 5040, 'leverage', 'output')
return [util.BET_TYPE_NAME[bet['bet_type']], bet['feed_address'], util.isodt(bet['deadline']), target_value, leverage, str(bet['wager_remaining'] / config.UNIT) + ' XLT', util.devise(db, odds, 'odds', 'output'), bet['expire_index'] - util.last_block(db)['block_index'], bet['tx_hash']]
def format_order_match (db, order_match):
order_match_id = order_match['tx0_hash'] + order_match['tx1_hash']
order_match_time_left = order_match['match_expire_index'] - util.last_block(db)['block_index']
return [order_match_id, order_match_time_left]
def format_feed (feed):
timestamp = util.isodt(feed['timestamp'])
if not feed['text']:
text = '<Locked>'
else:
text = feed['text']
return [feed['source'], timestamp, text, feed['value'], D(feed['fee_fraction_int']) / D(1e8)]
def market (give_asset, get_asset):
# Your Pending Orders Matches.
addresses = []
for bunch in litecoin.get_wallet():
addresses.append(bunch[:2][0])
filters = [
('tx0_address', 'IN', addresses),
('tx1_address', 'IN', addresses)
]
awaiting_ltcs = util.api('get_order_matches', {'filters': filters, 'filterop': 'OR', 'status': 'pending'})
table = PrettyTable(['Matched Order ID', 'Time Left'])
for order_match in awaiting_ltcs:
order_match = format_order_match(db, order_match)
table.add_row(order_match)
print('Your Pending Order Matches')
print(table)
print('\n')
# Open orders.
orders = util.api('get_orders', {'status': 'open'})
table = PrettyTable(['Give Quantity', 'Give Asset', 'Price', 'Price Assets', 'Required {} Fee'.format(config.LTC), 'Provided {} Fee'.format(config.LTC), 'Time Left', 'Tx Hash'])
for order in orders:
if give_asset and order['give_asset'] != give_asset: continue
if get_asset and order['get_asset'] != get_asset: continue
order = format_order(order)
table.add_row(order)
print('Open Orders')
table = table.get_string(sortby='Price')
print(table)
print('\n')
# Open bets.
bets = util.api('get_bets', {'status': 'open'})
table = PrettyTable(['Bet Type', 'Feed Address', 'Deadline', 'Target Value', 'Leverage', 'Wager', 'Odds', 'Time Left', 'Tx Hash'])
for bet in bets:
bet = format_bet(bet)
table.add_row(bet)
print('Open Bets')
print(table)
print('\n')
# Feeds
broadcasts = util.api('get_broadcasts', {'status': 'valid', 'order_by': 'timestamp', 'order_dir': 'desc'})
table = PrettyTable(['Feed Address', 'Timestamp', 'Text', 'Value', 'Fee Fraction'])
seen_addresses = []
for broadcast in broadcasts:
# Only show feeds with broadcasts in the last two weeks.
last_block_time = util.last_block(db)['block_time']
if broadcast['timestamp'] + config.TWO_WEEKS < last_block_time:
continue
# Always show only the latest broadcast from a feed address.
if broadcast['source'] not in seen_addresses:
feed = format_feed(broadcast)
table.add_row(feed)
seen_addresses.append(broadcast['source'])
else:
continue
print('Feeds')
print(table)
def cli(method, params, unsigned):
# Get unsigned transaction serialisation.
array = params['source'].split('_')
if len(array) > 1:
signatures_required, signatures_possible = array[0], array[-1]
params['source'] = '_'.join([signatures_required] + sorted(array[1:-1]) + [signatures_possible]) # Sort source array.
pubkey = None
else:
# Get public key for source.
source = array[0]
pubkey = None
if not litecoin.is_valid(source):
raise exceptions.AddressError('Invalid address.')
if litecoin.is_mine(source):
litecoin.wallet_unlock()
else:
# TODO: Do this only if the encoding method needs it.
print('Source not in backend wallet.')
answer = input('Public key (hexadecimal) or Private key (Wallet Import Format): ')
# Public key or private key?
try:
binascii.unhexlify(answer) # Check if hex.
pubkey = answer # If hex, assume public key.
private_key_wif = None
except binascii.Error:
private_key_wif = answer # Else, assume private key.
pubkey = litecoin.private_key_to_public_key(private_key_wif)
params['pubkey'] = pubkey
""" # NOTE: For debugging, e.g. with `Invalid Params` error.
tx_info = sys.modules['lib.send'].compose(db, params['source'], params['destination'], params['asset'], params['quantity'])
print(litecoin.transaction(db, tx_info, encoding=params['encoding'],
fee_per_kb=params['fee_per_kb'],
regular_dust_size=params['regular_dust_size'],
multisig_dust_size=params['multisig_dust_size'],
op_return_value=params['op_return_value'],
self_public_key_hex=pubkey,
allow_unconfirmed_inputs=params['allow_unconfirmed_inputs']))
exit(0)
"""
# Construct transaction.
unsigned_tx_hex = util.api(method, params)
print('Transaction (unsigned):', unsigned_tx_hex)
# Ask to sign and broadcast (if not multi‐sig).
if len(array) > 1:
print('Multi‐signature transactions are signed and broadcasted manually.')
elif not unsigned and input('Sign and broadcast? (y/N) ') == 'y':
if litecoin.is_mine(source):
private_key_wif = None
elif not private_key_wif: # If private key was not given earlier.
private_key_wif = input('Private key (Wallet Import Format): ')
# Sign and broadcast.
signed_tx_hex = litecoin.sign_tx(unsigned_tx_hex, private_key_wif=private_key_wif)
print('Transaction (signed):', signed_tx_hex)
print('Hash of transaction (broadcasted):', litecoin.broadcast_tx(signed_tx_hex))
def set_options (data_dir=None, backend_rpc_connect=None,
backend_rpc_port=None, backend_rpc_user=None, backend_rpc_password=None,
backend_rpc_ssl=False, backend_rpc_ssl_verify=True,
blockchain_service_name=None, blockchain_service_connect=None,
rpc_host=None, rpc_port=None, rpc_user=None,
rpc_password=None, rpc_allow_cors=None, log_file=None,
config_file=None, database_file=None, testnet=False,
testcoin=False, carefulness=0, force=False,
broadcast_tx_mainnet=None):
if force:
config.FORCE = force
else:
config.FORCE = False
# Data directory
if not data_dir:
config.DATA_DIR = appdirs.user_data_dir(appauthor=config.XLT_NAME, appname=config.XLT_CLIENT, roaming=True)
else:
config.DATA_DIR = os.path.expanduser(data_dir)
if not os.path.isdir(config.DATA_DIR): os.mkdir(config.DATA_DIR)
# Configuration file
configfile = configparser.ConfigParser()
if config_file:
config_path = config_file
else:
config_path = os.path.join(config.DATA_DIR, '{}.conf'.format(config.XLT_CLIENT))
configfile.read(config_path)
has_config = 'Default' in configfile
#logging.debug("Config file: %s; Exists: %s" % (config_path, "Yes" if has_config else "No"))
# testnet
if testnet:
config.TESTNET = testnet
elif has_config and 'testnet' in configfile['Default']:
config.TESTNET = configfile['Default'].getboolean('testnet')
else:
config.TESTNET = False
# testcoin
if testcoin:
config.TESTCOIN = testcoin
elif has_config and 'testcoin' in configfile['Default']:
config.TESTCOIN = configfile['Default'].getboolean('testcoin')
else:
config.TESTCOIN = False
# carefulness (check conservation of assets)
if carefulness:
config.CAREFULNESS = carefulness
elif has_config and 'carefulness' in configfile['Default']:
config.CAREFULNESS = configfile['Default'].getboolean('carefulness')
else:
config.CAREFULNESS = 0
##############
# THINGS WE CONNECT TO
# Backend RPC host (Litecoin Core)
if backend_rpc_connect:
config.BACKEND_RPC_CONNECT = backend_rpc_connect
elif has_config and 'backend-rpc-connect' in configfile['Default'] and configfile['Default']['backend-rpc-connect']:
config.BACKEND_RPC_CONNECT = configfile['Default']['backend-rpc-connect']
elif has_config and 'litecoind-rpc-connect' in configfile['Default'] and configfile['Default']['litecoind-rpc-connect']:
config.BACKEND_RPC_CONNECT = configfile['Default']['litecoind-rpc-connect']
else:
config.BACKEND_RPC_CONNECT = 'localhost'
# Backend Core RPC port (Litecoin Core)
if backend_rpc_port:
config.BACKEND_RPC_PORT = backend_rpc_port
elif has_config and 'backend-rpc-port' in configfile['Default'] and configfile['Default']['backend-rpc-port']:
config.BACKEND_RPC_PORT = configfile['Default']['backend-rpc-port']
elif has_config and 'litecoind-rpc-port' in configfile['Default'] and configfile['Default']['litecoind-rpc-port']:
config.BACKEND_RPC_PORT = configfile['Default']['litecoind-rpc-port']
else:
if config.TESTNET:
config.BACKEND_RPC_PORT = config.DEFAULT_BACKEND_RPC_PORT_TESTNET
else:
config.BACKEND_RPC_PORT = config.DEFAULT_BACKEND_RPC_PORT
try:
config.BACKEND_RPC_PORT = int(config.BACKEND_RPC_PORT)
if not (int(config.BACKEND_RPC_PORT) > 1 and int(config.BACKEND_RPC_PORT) < 65535):
raise exceptions.ConfigurationError('invalid backend API port number')
except:
raise Exception("Please specific a valid port number backend-rpc-port configuration parameter")
# Backend Core RPC user (Litecoin Core)
if backend_rpc_user:
config.BACKEND_RPC_USER = backend_rpc_user
elif has_config and 'backend-rpc-user' in configfile['Default'] and configfile['Default']['backend-rpc-user']:
config.BACKEND_RPC_USER = configfile['Default']['backend-rpc-user']
elif has_config and 'litecoind-rpc-user' in configfile['Default'] and configfile['Default']['litecoind-rpc-user']:
config.BACKEND_RPC_USER = configfile['Default']['litecoind-rpc-user']
else:
config.BACKEND_RPC_USER = 'litecoinrpc'
# Backend Core RPC password (Litecoin Core)
if backend_rpc_password:
config.BACKEND_RPC_PASSWORD = backend_rpc_password
elif has_config and 'backend-rpc-password' in configfile['Default'] and configfile['Default']['backend-rpc-password']:
config.BACKEND_RPC_PASSWORD = configfile['Default']['backend-rpc-password']
elif has_config and 'litecoind-rpc-password' in configfile['Default'] and configfile['Default']['litecoind-rpc-password']:
config.BACKEND_RPC_PASSWORD = configfile['Default']['litecoind-rpc-password']
else:
raise exceptions.ConfigurationError('backend RPC password not set. (Use configuration file or --backend-rpc-password=PASSWORD)')
# Backend Core RPC SSL
if backend_rpc_ssl:
config.BACKEND_RPC_SSL= backend_rpc_ssl
elif has_config and 'backend-rpc-ssl' in configfile['Default'] and configfile['Default']['backend-rpc-ssl']:
config.BACKEND_RPC_SSL = configfile['Default']['backend-rpc-ssl']
else:
config.BACKEND_RPC_SSL = False # Default to off.
# Backend Core RPC SSL Verify
if backend_rpc_ssl_verify:
config.BACKEND_RPC_SSL_VERIFY = backend_rpc_ssl_verify
elif has_config and 'backend-rpc-ssl-verify' in configfile['Default'] and configfile['Default']['backend-rpc-ssl-verify']:
config.BACKEND_RPC_SSL_VERIFY = configfile['Default']['backend-rpc-ssl-verify']
else:
config.BACKEND_RPC_SSL_VERIFY = False # Default to off (support self‐signed certificates)
# Construct backend URL.
config.BACKEND_RPC = config.BACKEND_RPC_USER + ':' + config.BACKEND_RPC_PASSWORD + '@' + config.BACKEND_RPC_CONNECT + ':' + str(config.BACKEND_RPC_PORT)
if config.BACKEND_RPC_SSL:
config.BACKEND_RPC = 'https://' + config.BACKEND_RPC
else:
config.BACKEND_RPC = 'http://' + config.BACKEND_RPC
# blockchain service name
if blockchain_service_name:
config.BLOCKCHAIN_SERVICE_NAME = blockchain_service_name
elif has_config and 'blockchain-service-name' in configfile['Default'] and configfile['Default']['blockchain-service-name']:
config.BLOCKCHAIN_SERVICE_NAME = configfile['Default']['blockchain-service-name']
else:
config.BLOCKCHAIN_SERVICE_NAME = 'blockr'
# custom blockchain service API endpoint
# leave blank to use the default. if specified, include the scheme prefix and port, without a trailing slash (e.g. http://localhost:3001)
if blockchain_service_connect:
config.BLOCKCHAIN_SERVICE_CONNECT = blockchain_service_connect
elif has_config and 'blockchain-service-connect' in configfile['Default'] and configfile['Default']['blockchain-service-connect']:
config.BLOCKCHAIN_SERVICE_CONNECT = configfile['Default']['blockchain-service-connect']
else:
config.BLOCKCHAIN_SERVICE_CONNECT = None #use default specified by the library
##############
# THINGS WE SERVE
# litetokensd API RPC host
if rpc_host:
config.RPC_HOST = rpc_host
elif has_config and 'rpc-host' in configfile['Default'] and configfile['Default']['rpc-host']:
config.RPC_HOST = configfile['Default']['rpc-host']
else:
config.RPC_HOST = 'localhost'
# litetokensd API RPC port
if rpc_port:
config.RPC_PORT = rpc_port
elif has_config and 'rpc-port' in configfile['Default'] and configfile['Default']['rpc-port']:
config.RPC_PORT = configfile['Default']['rpc-port']
else:
if config.TESTNET:
if config.TESTCOIN:
config.RPC_PORT = config.DEFAULT_RPC_PORT_TESTNET + 1
else:
config.RPC_PORT = config.DEFAULT_RPC_PORT_TESTNET
else:
if config.TESTCOIN:
config.RPC_PORT = config.DEFAULT_RPC_PORT + 1
else:
config.RPC_PORT = config.DEFAULT_RPC_PORT
try:
config.RPC_PORT = int(config.RPC_PORT)
if not (int(config.BACKEND_RPC_PORT) > 1 and int(config.BACKEND_RPC_PORT) < 65535):
raise exceptions.ConfigurationError('invalid litetokensd API port number')
except:
raise Exception("Please specific a valid port number rpc-port configuration parameter")
# litetokensd API RPC user
if rpc_user:
config.RPC_USER = rpc_user
elif has_config and 'rpc-user' in configfile['Default'] and configfile['Default']['rpc-user']:
config.RPC_USER = configfile['Default']['rpc-user']
else:
config.RPC_USER = 'rpc'
# litetokensd API RPC password
if rpc_password:
config.RPC_PASSWORD = rpc_password
elif has_config and 'rpc-password' in configfile['Default'] and configfile['Default']['rpc-password']:
config.RPC_PASSWORD = configfile['Default']['rpc-password']
else:
raise exceptions.ConfigurationError('RPC password not set. (Use configuration file or --rpc-password=PASSWORD)')
config.RPC = 'http://' + config.RPC_USER + ':' + config.RPC_PASSWORD + '@' + config.RPC_HOST + ':' + str(config.RPC_PORT)
# RPC CORS
if rpc_allow_cors:
config.RPC_ALLOW_CORS = rpc_allow_cors
elif has_config and 'rpc-allow-cors' in configfile['Default'] and configfile['Default']['rpc-allow-cors']:
config.RPC_ALLOW_CORS = configfile['Default'].getboolean('rpc-allow-cors')
else:
config.RPC_ALLOW_CORS = True
##############
# OTHER SETTINGS
# Log
if log_file:
config.LOG = log_file
elif has_config and 'log-file' in configfile['Default'] and configfile['Default']['log-file']:
config.LOG = configfile['Default']['log-file']
else:
string = config.XLT_CLIENT
if config.TESTNET:
string += '.testnet'
if config.TESTCOIN:
string += '.testcoin'
config.LOG = os.path.join(config.DATA_DIR, string + '.log')
# Encoding
if config.TESTCOIN:
config.PREFIX = b'XX' # 2 bytes (possibly accidentally created)
else:
config.PREFIX = b'XLTTOKEN' # 8 bytes
# Database
if database_file:
config.DATABASE = database_file
elif has_config and 'database-file' in configfile['Default'] and configfile['Default']['database-file']:
config.DATABASE = configfile['Default']['database-file']
else:
string = '{}.'.format(config.XLT_CLIENT) + str(config.VERSION_MAJOR)
if config.TESTNET:
string += '.testnet'
if config.TESTCOIN:
string += '.testcoin'
config.DATABASE = os.path.join(config.DATA_DIR, string + '.db')
# (more) Testnet
if config.TESTNET:
config.MAGIC_BYTES = config.MAGIC_BYTES_TESTNET
if config.TESTCOIN:
config.ADDRESSVERSION = config.ADDRESSVERSION_TESTNET
config.BLOCK_FIRST = config.BLOCK_FIRST_TESTNET_TESTCOIN
config.BURN_START = config.BURN_START_TESTNET_TESTCOIN
config.BURN_END = config.BURN_END_TESTNET_TESTCOIN
config.UNSPENDABLE = config.UNSPENDABLE_TESTNET
else:
config.ADDRESSVERSION = config.ADDRESSVERSION_TESTNET
config.BLOCK_FIRST = config.BLOCK_FIRST_TESTNET
config.BURN_START = config.BURN_START_TESTNET
config.BURN_END = config.BURN_END_TESTNET
config.UNSPENDABLE = config.UNSPENDABLE_TESTNET
else:
config.MAGIC_BYTES = config.MAGIC_BYTES_MAINNET
if config.TESTCOIN:
config.ADDRESSVERSION = config.ADDRESSVERSION_MAINNET
config.BLOCK_FIRST = config.BLOCK_FIRST_MAINNET_TESTCOIN
config.BURN_START = config.BURN_START_MAINNET_TESTCOIN
config.BURN_END = config.BURN_END_MAINNET_TESTCOIN
config.UNSPENDABLE = config.UNSPENDABLE_MAINNET
else:
config.ADDRESSVERSION = config.ADDRESSVERSION_MAINNET
config.BLOCK_FIRST = config.BLOCK_FIRST_MAINNET
config.BURN_START = config.BURN_START_MAINNET
config.BURN_END = config.BURN_END_MAINNET
config.UNSPENDABLE = config.UNSPENDABLE_MAINNET
# method used to broadcast signed transactions. litecoind or bci (default: litecoind)
if broadcast_tx_mainnet:
config.BROADCAST_TX_MAINNET = broadcast_tx_mainnet
elif has_config and 'broadcast-tx-mainnet' in configfile['Default']:
config.BROADCAST_TX_MAINNET = configfile['Default']['broadcast-tx-mainnet']
else:
config.BROADCAST_TX_MAINNET = '{}'.format(config.LTC_CLIENT)
def balances (address):
litecoin.validate_address(address, util.last_block(db)['block_index'])
address_data = get_address(db, address=address)
balances = address_data['balances']
table = PrettyTable(['Asset', 'Amount'])
table.add_row([config.LTC, blockchain.getaddressinfo(address)['balance']]) # LTC
for balance in balances:
asset = balance['asset']
quantity = util.devise(db, balance['quantity'], balance['asset'], 'output')
table.add_row([asset, quantity])
print('Balances')
print(table.get_string())
def generate_move_random_hash(move):
move = int(move).to_bytes(2, byteorder='big')
random = os.urandom(16)
move_random_hash = litecoin.dhash(random+move)
return binascii.hexlify(random).decode('utf8'), binascii.hexlify(move_random_hash).decode('utf8')
if __name__ == '__main__':
if os.name == 'nt':
#patch up cmd.exe's "challenged" (i.e. broken/non-existent) UTF-8 logging
util_windows.fix_win32_unicode()
# Parse command-line arguments.
parser = argparse.ArgumentParser(prog=config.XLT_CLIENT, description='the reference implementation of the {} protocol'.format(config.XLT_NAME))
parser.add_argument('-V', '--version', action='version', version="{} v{}".format(config.XLT_CLIENT, config.VERSION_STRING))
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='sets log level to DEBUG instead of WARNING')
parser.add_argument('--testnet', action='store_true', help='use {} testnet addresses and block numbers'.format(config.LTC_NAME))
parser.add_argument('--testcoin', action='store_true', help='use the test {} network on every blockchain'.format(config.XLT_NAME))
parser.add_argument('--carefulness', type=int, default=0, help='check conservation of assets after every CAREFULNESS transactions (potentially slow)')
parser.add_argument('--unconfirmed', action='store_true', help='allow the spending of unconfirmed transaction outputs')
parser.add_argument('--encoding', default='auto', type=str, help='data encoding method')
parser.add_argument('--fee-per-kb', type=D, default=D(config.DEFAULT_FEE_PER_KB / config.UNIT), help='fee per kilobyte, in {}'.format(config.LTC))
parser.add_argument('--regular-dust-size', type=D, default=D(config.DEFAULT_REGULAR_DUST_SIZE / config.UNIT), help='value for dust Pay‐to‐Pubkey‐Hash outputs, in {}'.format(config.LTC))
parser.add_argument('--multisig-dust-size', type=D, default=D(config.DEFAULT_MULTISIG_DUST_SIZE / config.UNIT), help='for dust OP_CHECKMULTISIG outputs, in {}'.format(config.LTC))
parser.add_argument('--op-return-value', type=D, default=D(config.DEFAULT_OP_RETURN_VALUE / config.UNIT), help='value for OP_RETURN outputs, in {}'.format(config.LTC))
parser.add_argument('--unsigned', action='store_true', help='print out unsigned hex of transaction; do not sign or broadcast')
parser.add_argument('--data-dir', help='the directory in which to keep the database, config file and log file, by default')
parser.add_argument('--database-file', help='the location of the SQLite3 database')
parser.add_argument('--config-file', help='the location of the configuration file')
parser.add_argument('--log-file', help='the location of the log file')
parser.add_argument('--backend-rpc-connect', help='the hostname or IP of the backend litecoind JSON-RPC server')
parser.add_argument('--backend-rpc-port', type=int, help='the backend JSON-RPC port to connect to')
parser.add_argument('--backend-rpc-user', help='the username used to communicate with backend over JSON-RPC')
parser.add_argument('--backend-rpc-password', help='the password used to communicate with backend over JSON-RPC')
parser.add_argument('--backend-rpc-ssl', action='store_true', help='use SSL to connect to backend (default: false)')
parser.add_argument('--backend-rpc-ssl-verify', action='store_true', help='verify SSL certificate of backend; disallow use of self‐signed certificates (default: false)')
parser.add_argument('--blockchain-service-name', help='the blockchain service name to connect to')
parser.add_argument('--blockchain-service-connect', help='the blockchain service server URL base to connect to, if not default')
parser.add_argument('--rpc-host', help='the IP of the interface to bind to for providing JSON-RPC API access (0.0.0.0 for all interfaces)')
parser.add_argument('--rpc-port', type=int, help='port on which to provide the {} JSON-RPC API'.format(config.XLT_CLIENT))
parser.add_argument('--rpc-user', help='required username to use the {} JSON-RPC API (via HTTP basic auth)'.format(config.XLT_CLIENT))
parser.add_argument('--rpc-password', help='required password (for rpc-user) to use the {} JSON-RPC API (via HTTP basic auth)'.format(config.XLT_CLIENT))
parser.add_argument('--rpc-allow-cors', action='store_true', default=True, help='Allow ajax cross domain request')
subparsers = parser.add_subparsers(dest='action', help='the action to be taken')
parser_server = subparsers.add_parser('server', help='run the server')
parser_server.add_argument('--force', action='store_true', help='skip backend check, version check, lockfile check')
parser_send = subparsers.add_parser('send', help='create and broadcast a *send* message')
parser_send.add_argument('--source', required=True, help='the source address')
parser_send.add_argument('--destination', required=True, help='the destination address')
parser_send.add_argument('--quantity', required=True, help='the quantity of ASSET to send')
parser_send.add_argument('--asset', required=True, help='the ASSET of which you would like to send QUANTITY')
parser_send.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_order = subparsers.add_parser('order', help='create and broadcast an *order* message')
parser_order.add_argument('--source', required=True, help='the source address')
parser_order.add_argument('--get-quantity', required=True, help='the quantity of GET_ASSET that you would like to receive')
parser_order.add_argument('--get-asset', required=True, help='the asset that you would like to buy')
parser_order.add_argument('--give-quantity', required=True, help='the quantity of GIVE_ASSET that you are willing to give')
parser_order.add_argument('--give-asset', required=True, help='the asset that you would like to sell')
parser_order.add_argument('--expiration', type=int, required=True, help='the number of blocks for which the order should be valid')
parser_order.add_argument('--fee-fraction-required', default=config.DEFAULT_FEE_FRACTION_REQUIRED, help='the miners’ fee required for an order to match this one, as a fraction of the {} to be bought'.format(config.LTC))
parser_order_fees = parser_order.add_mutually_exclusive_group()
parser_order_fees.add_argument('--fee-fraction-provided', default=config.DEFAULT_FEE_FRACTION_PROVIDED, help='the miners’ fee provided, as a fraction of the {} to be sold'.format(config.LTC))
parser_order_fees.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_ltcpay= subparsers.add_parser('{}pay'.format(config.LTC).lower(), help='create and broadcast a *{}pay* message, to settle an Order Match for which you owe {}'.format(config.LTC, config.LTC))
parser_ltcpay.add_argument('--source', required=True, help='the source address')
parser_ltcpay.add_argument('--order-match-id', required=True, help='the concatenation of the hashes of the two transactions which compose the order match')
parser_ltcpay.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_issuance = subparsers.add_parser('issuance', help='issue a new asset, issue more of an existing asset or transfer the ownership of an asset')
parser_issuance.add_argument('--source', required=True, help='the source address')
parser_issuance.add_argument('--transfer-destination', help='for transfer of ownership of asset issuance rights')
parser_issuance.add_argument('--quantity', default=0, help='the quantity of ASSET to be issued')
parser_issuance.add_argument('--asset', required=True, help='the name of the asset to be issued (if it’s available)')
parser_issuance.add_argument('--divisible', action='store_true', help='whether or not the asset is divisible (must agree with previous issuances)')
parser_issuance.add_argument('--callable', dest='callable_', action='store_true', help='whether or not the asset is callable (must agree with previous issuances)')
parser_issuance.add_argument('--call-date', help='the date from which a callable asset may be called back (must agree with previous issuances)')
parser_issuance.add_argument('--call-price', help='the price, in XLT per whole unit, at which a callable asset may be called back (must agree with previous issuances)')
parser_issuance.add_argument('--description', type=str, required=True, help='a description of the asset (set to ‘LOCK’ to lock against further issuances with non‐zero quantitys)')
parser_issuance.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_broadcast = subparsers.add_parser('broadcast', help='broadcast textual and numerical information to the network')
parser_broadcast.add_argument('--source', required=True, help='the source address')
parser_broadcast.add_argument('--text', type=str, required=True, help='the textual part of the broadcast (set to ‘LOCK’ to lock feed)')
parser_broadcast.add_argument('--value', type=float, default=-1, help='numerical value of the broadcast')
parser_broadcast.add_argument('--fee-fraction', default=0, help='the fraction of bets on this feed that go to its operator')
parser_broadcast.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_bet = subparsers.add_parser('bet', help='offer to make a bet on the value of a feed')
parser_bet.add_argument('--source', required=True, help='the source address')
parser_bet.add_argument('--feed-address', required=True, help='the address which publishes the feed to bet on')
parser_bet.add_argument('--bet-type', choices=list(util.BET_TYPE_NAME.values()), required=True, help='choices: {}'.format(list(util.BET_TYPE_NAME.values())))
parser_bet.add_argument('--deadline', required=True, help='the date and time at which the bet should be decided/settled')
parser_bet.add_argument('--wager', required=True, help='the quantity of XLT to wager')
parser_bet.add_argument('--counterwager', required=True, help='the minimum quantity of XLT to be wagered by the user to bet against you, if he were to accept the whole thing')
parser_bet.add_argument('--target-value', default=0.0, help='target value for Equal/NotEqual bet')
parser_bet.add_argument('--leverage', type=int, default=5040, help='leverage, as a fraction of 5040')
parser_bet.add_argument('--expiration', type=int, required=True, help='the number of blocks for which the bet should be valid')
parser_bet.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_dividend = subparsers.add_parser('dividend', help='pay dividends to the holders of an asset (in proportion to their stake in it)')
parser_dividend.add_argument('--source', required=True, help='the source address')
parser_dividend.add_argument('--quantity-per-unit', required=True, help='the quantity of XLT to be paid per whole unit held of ASSET')
parser_dividend.add_argument('--asset', required=True, help='the asset to which pay dividends')
parser_dividend.add_argument('--dividend-asset', required=True, help='asset in which to pay the dividends')
parser_dividend.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_burn = subparsers.add_parser('burn', help='destroy {} to earn XLT, during an initial period of time')
parser_burn.add_argument('--source', required=True, help='the source address')
parser_burn.add_argument('--quantity', required=True, help='quantity of {} to be destroyed'.format(config.LTC))
parser_burn.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_cancel= subparsers.add_parser('cancel', help='cancel an open order or bet you created')
parser_cancel.add_argument('--source', required=True, help='the source address')
parser_cancel.add_argument('--offer-hash', required=True, help='the transaction hash of the order or bet')
parser_cancel.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_callback = subparsers.add_parser('callback', help='callback a fraction of an asset')
parser_callback.add_argument('--source', required=True, help='the source address')
parser_callback.add_argument('--fraction', required=True, help='the fraction of ASSET to call back')
parser_callback.add_argument('--asset', required=True, help='the asset to callback')
parser_callback.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_rps = subparsers.add_parser('rps', help='open a rock-paper-scissors like game')
parser_rps.add_argument('--source', required=True, help='the source address')
parser_rps.add_argument('--wager', required=True, help='the quantity of XLT to wager')
parser_rps.add_argument('--move', type=int, required=True, help='the selected move')
parser_rps.add_argument('--possible-moves', type=int, required=True, help='the number of possible moves (odd number greater or equal than 3)')
parser_rps.add_argument('--expiration', type=int, required=True, help='the number of blocks for which the bet should be valid')
parser_rps.add_argument('--fee', help='the exact LTC fee to be paid to miners')
parser_rpsresolve = subparsers.add_parser('rpsresolve', help='resolve a rock-paper-scissors like game')
parser_rpsresolve.add_argument('--source', required=True, help='the source address')
parser_rpsresolve.add_argument('--random', type=str, required=True, help='the random number used in the corresponding rps transaction')
parser_rpsresolve.add_argument('--move', type=int, required=True, help='the selected move in the corresponding rps transaction')
parser_rpsresolve.add_argument('--rps-match-id', required=True, help='the concatenation of the hashes of the two transactions which compose the rps match')
parser_rpsresolve.add_argument('--fee', help='the exact LTC fee to be paid to miners')
parser_publish = subparsers.add_parser('publish', help='publish arbitrary data in the blockchain')
parser_publish.add_argument('--source', required=True, help='the source address')
parser_publish.add_argument('--data-hex', required=True, help='the hex‐encoded data')
parser_publish.add_argument('--fee', help='the exact {} fee to be paid to miners'.format(config.LTC))
parser_address = subparsers.add_parser('balances', help='display the balances of a {} address'.format(config.XLT_NAME))
parser_address.add_argument('address', help='the address you are interested in')
parser_asset = subparsers.add_parser('asset', help='display the basic properties of a {} asset'.format(config.XLT_NAME))
parser_asset.add_argument('asset', help='the asset you are interested in')
parser_wallet = subparsers.add_parser('wallet', help='list the addresses in your backend wallet along with their balances in all {} assets'.format(config.XLT_NAME))
parser_pending= subparsers.add_parser('pending', help='list pending order matches awaiting {}payment from you'.format(config.LTC))
parser_reparse = subparsers.add_parser('reparse', help='reparse all transactions in the database')
parser_reparse.add_argument('--force', action='store_true', help='skip backend check, version check, lockfile check')
parser_rollback = subparsers.add_parser('rollback', help='rollback database')
parser_rollback.add_argument('block_index', type=int, help='the index of the last known good block')
parser_rollback.add_argument('--force', action='store_true', help='skip backend check, version check, lockfile check')
parser_market = subparsers.add_parser('market', help='fill the screen with an always up-to-date summary of the {} market'.format(config.XLT_NAME) )
parser_market.add_argument('--give-asset', help='only show orders offering to sell GIVE_ASSET')
parser_market.add_argument('--get-asset', help='only show orders offering to buy GET_ASSET')
args = parser.parse_args()
# Convert.
args.fee_per_kb = int(args.fee_per_kb * config.UNIT)
args.regular_dust_size = int(args.regular_dust_size * config.UNIT)
args.multisig_dust_size = int(args.multisig_dust_size * config.UNIT)
args.op_return_value= int(args.op_return_value * config.UNIT)
# Hack
try: args.force
except (NameError, AttributeError): args.force = None
# Configuration
set_options(data_dir=args.data_dir,
backend_rpc_connect=args.backend_rpc_connect,
backend_rpc_port=args.backend_rpc_port,
backend_rpc_user=args.backend_rpc_user,
backend_rpc_password=args.backend_rpc_password,
backend_rpc_ssl=args.backend_rpc_ssl,
backend_rpc_ssl_verify=args.backend_rpc_ssl_verify,
blockchain_service_name=args.blockchain_service_name,
blockchain_service_connect=args.blockchain_service_connect,
rpc_host=args.rpc_host, rpc_port=args.rpc_port, rpc_user=args.rpc_user,
rpc_password=args.rpc_password, rpc_allow_cors=args.rpc_allow_cors,
log_file=args.log_file, config_file=args.config_file,
database_file=args.database_file, testnet=args.testnet,
testcoin=args.testcoin, carefulness=args.carefulness,
force=args.force)
# Logging (to file and console).
logger = logging.getLogger() #get root logger
logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)
#Console logging
console = logging.StreamHandler()
console.setLevel(logging.DEBUG if args.verbose else logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logger.addHandler(console)
#File logging (rotated)
max_log_size = 20 * 1024 * 1024 #max log size of 20 MB before rotation (make configurable later)
if os.name == 'nt':
fileh = util_windows.SanitizedRotatingFileHandler(config.LOG, maxBytes=max_log_size, backupCount=5)
else:
fileh = logging.handlers.RotatingFileHandler(config.LOG, maxBytes=max_log_size, backupCount=5)
fileh.setLevel(logging.DEBUG if args.verbose else logging.INFO)
formatter = logging.Formatter('%(asctime)s %(message)s', '%Y-%m-%d-T%H:%M:%S%z')
fileh.setFormatter(formatter)
logger.addHandler(fileh)
#API requests logging (don't show on console in normal operation)
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.DEBUG if args.verbose else logging.WARNING)
requests_log.propagate = False
urllib3_log = logging.getLogger('urllib3')
urllib3_log.setLevel(logging.DEBUG if args.verbose else logging.WARNING)
urllib3_log.propagate = False
# Enforce locks?
if config.FORCE:
lock = threading.RLock() # This won’t lock!
else:
lock = LockFile(config.DATABASE) # This will!
# Database
logging.info('Status: Connecting to database.')
db = util.connect_to_db()
# Version
logging.info('Status: Running v{} of litetokensd.'.format(config.VERSION_STRING, config.XLT_CLIENT))
if not config.FORCE and args.action in ('server', 'reparse', 'rollback'):
logging.info('Status: Checking version.')
try:
util.version_check(db)
except exceptions.VersionUpdateRequiredError as e:
traceback.print_exc(file=sys.stdout)
sys.exit(config.EXITCODE_UPDATE_REQUIRED)
# MESSAGE CREATION
if args.action == 'send':
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
quantity = util.devise(db, args.quantity, args.asset, 'input')
cli('create_send', {'source': args.source,
'destination': args.destination, 'asset':
args.asset, 'quantity': quantity, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'order':
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
fee_required, fee_fraction_provided = D(args.fee_fraction_required), D(args.fee_fraction_provided)
give_quantity, get_quantity = D(args.give_quantity), D(args.get_quantity)
# Fee argument is either fee_required or fee_provided, as necessary.
if args.give_asset == config.LTC:
fee_required = 0
fee_fraction_provided = util.devise(db, fee_fraction_provided, 'fraction', 'input')
fee_provided = round(D(fee_fraction_provided) * D(give_quantity) * D(config.UNIT))
print('Fee provided: {} {}'.format(util.devise(db, fee_provided, config.LTC, 'output'), config.LTC))
elif args.get_asset == config.LTC:
fee_provided = 0
fee_fraction_required = util.devise(db, args.fee_fraction_required, 'fraction', 'input')
fee_required = round(D(fee_fraction_required) * D(get_quantity) * D(config.UNIT))
print('Fee required: {} {}'.format(util.devise(db, fee_required, config.LTC, 'output'), config.LTC))
else:
fee_required = 0
fee_provided = 0
give_quantity = util.devise(db, give_quantity, args.give_asset, 'input')
get_quantity = util.devise(db, get_quantity, args.get_asset, 'input')
cli('create_order', {'source': args.source,
'give_asset': args.give_asset, 'give_quantity':
give_quantity, 'get_asset': args.get_asset,
'get_quantity': get_quantity, 'expiration':
args.expiration, 'fee_required': fee_required,
'fee_provided': fee_provided, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == '{}pay'.format(config.LTC).lower():
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
cli('create_ltcpay', {'source': args.source,
'order_match_id': args.order_match_id, 'fee':
args.fee, 'allow_unconfirmed_inputs':
args.unconfirmed, 'encoding': args.encoding,
'fee_per_kb': args.fee_per_kb,
'regular_dust_size': args.regular_dust_size,
'multisig_dust_size': args.multisig_dust_size,
'op_return_value': args.op_return_value},
args.unsigned)
elif args.action == 'issuance':
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
quantity = util.devise(db, args.quantity, None, 'input',
divisible=args.divisible)
if args.callable_:
if not args.call_date:
parser.error('must specify call date of callable asset', )
if not args.call_price:
parser.error('must specify call price of callable asset')
call_date = calendar.timegm(dateutil.parser.parse(args.call_date).utctimetuple())
call_price = float(args.call_price)
else:
call_date, call_price = 0, 0
cli('create_issuance', {'source': args.source, 'asset': args.asset,
'quantity': quantity, 'divisible':
args.divisible, 'description':
args.description, 'callable_': args.callable_,
'call_date': call_date, 'call_price':
call_price, 'transfer_destination':
args.transfer_destination, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'broadcast':
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
value = util.devise(db, args.value, 'value', 'input')
fee_fraction = util.devise(db, args.fee_fraction, 'fraction', 'input')
cli('create_broadcast', {'source': args.source,
'fee_fraction': fee_fraction, 'text':
args.text, 'timestamp': int(time.time()),
'value': value, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'bet':
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
deadline = calendar.timegm(dateutil.parser.parse(args.deadline).utctimetuple())
wager = util.devise(db, args.wager, config.XLT, 'input')
counterwager = util.devise(db, args.counterwager, config.XLT, 'input')
target_value = util.devise(db, args.target_value, 'value', 'input')
leverage = util.devise(db, args.leverage, 'leverage', 'input')
cli('create_bet', {'source': args.source,
'feed_address': args.feed_address, 'bet_type':
util.BET_TYPE_ID [args.bet_type], 'deadline': deadline, 'wager_quantity': wager,
'counterwager_quantity': counterwager, 'expiration':
args.expiration, 'target_value': target_value,
'leverage': leverage, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'dividend':
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
quantity_per_unit = util.devise(db, args.quantity_per_unit, config.XLT, 'input')
cli('create_dividend', {'source': args.source,
'quantity_per_unit': quantity_per_unit,
'asset': args.asset, 'dividend_asset':
args.dividend_asset, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'burn':
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
quantity = util.devise(db, args.quantity, config.LTC, 'input')
cli('create_burn', {'source': args.source, 'quantity': quantity,
'fee': args.fee, 'allow_unconfirmed_inputs':
args.unconfirmed, 'encoding': args.encoding,
'fee_per_kb': args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'cancel':
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
cli('create_cancel', {'source': args.source,
'offer_hash': args.offer_hash, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'callback':
if args.fee: args.fee = util.devise(db, args.fee, config.LTC, 'input')
cli('create_callback', {'source': args.source,
'fraction': util.devise(db, args.fraction, 'fraction', 'input'),
'asset': args.asset, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'rps':
if args.fee: args.fee = util.devise(db, args.fee, 'LTC', 'input')
wager = util.devise(db, args.wager, 'XLT', 'input')
random, move_random_hash = generate_move_random_hash(args.move)
print('random: {}'.format(random))
print('move_random_hash: {}'.format(move_random_hash))
cli('create_rps', {'source': args.source,
'possible_moves': args.possible_moves, 'wager': wager,
'move_random_hash': move_random_hash, 'expiration': args.expiration,
'fee': args.fee,'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'rpsresolve':
if args.fee: args.fee = util.devise(db, args.fee, 'LTC', 'input')
cli('create_rpsresolve', {'source': args.source,
'random': args.random, 'move': args.move,
'rps_match_id': args.rps_match_id, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
elif args.action == 'publish':
if args.fee: args.fee = util.devise(db, args.fee, 'LTC', 'input')
cli('create_publish', {'source': args.source,
'data_hex': args.data_hex, 'fee': args.fee,
'allow_unconfirmed_inputs': args.unconfirmed,
'encoding': args.encoding, 'fee_per_kb':
args.fee_per_kb, 'regular_dust_size':
args.regular_dust_size, 'multisig_dust_size':
args.multisig_dust_size, 'op_return_value':
args.op_return_value},
args.unsigned)
# VIEWING (temporary)
elif args.action == 'balances':
balances(args.address)
elif args.action == 'asset':
results = util.api('get_asset_info', {'assets': [args.asset]})
if results:
results = results[0] # HACK
else:
print('Asset ‘{}’ not found.'.format(args.asset))
exit(0)
asset_id = util.asset_id(args.asset)
divisible = results['divisible']
locked = results['locked']
supply = util.devise(db, results['supply'], args.asset, dest='output')
call_date = util.isodt(results['call_date']) if results['call_date'] else results['call_date']
call_price = str(results['call_price']) + ' XLT' if results['call_price'] else results['call_price']
print('Asset Name:', args.asset)
print('Asset ID:', asset_id)
print('Divisible:', divisible)
print('Locked:', locked)
print('Supply:', supply)
print('Issuer:', results['issuer'])
print('Callable:', results['callable'])
print('Call Date:', call_date)
print('Call Price:', call_price)
print('Description:', '‘' + results['description'] + '’')
if args.asset != config.LTC:
print('Shareholders:')
balances = util.api('get_balances', {'filters': [('asset', '==', args.asset)]})
print('\taddress, quantity, escrow')
for holder in util.holders(db, args.asset):
quantity = holder['address_quantity']
if not quantity: continue
quantity = util.devise(db, quantity, args.asset, 'output')
if holder['escrow']: escrow = holder['escrow']
else: escrow = 'None'
print('\t' + str(holder['address']) + ',' + str(quantity) + ',' + escrow)
elif args.action == 'wallet':
total_table = PrettyTable(['Asset', 'Balance'])
totals = {}
print()
for bunch in litecoin.get_wallet():
address, ltc_balance = bunch[:2]
address_data = get_address(db, address=address)
balances = address_data['balances']
table = PrettyTable(['Asset', 'Balance'])
empty = True
if ltc_balance:
table.add_row([config.LTC, ltc_balance]) # LTC
if config.LTC in totals.keys(): totals[config.LTC] += ltc_balance
else: totals[config.LTC] = ltc_balance
empty = False
for balance in balances:
asset = balance['asset']
try:
balance = D(util.devise(db, balance['quantity'], balance['asset'], 'output'))
except:
balance = None
if balance:
if asset in totals.keys(): totals[asset] += balance
else: totals[asset] = balance
table.add_row([asset, balance])
empty = False
if not empty:
print(address)
print(table.get_string())
print()
for asset in totals.keys():
balance = totals[asset]
total_table.add_row([asset, round(balance, 8)])
print('TOTAL')
print(total_table.get_string())
print()
elif args.action == 'pending':
addresses = []
for bunch in litecoin.get_wallet():
addresses.append(bunch[:2][0])
filters = [
('tx0_address', 'IN', addresses),
('tx1_address', 'IN', addresses)
]
awaiting_ltcs = util.api('get_order_matches', {'filters': filters, 'filterop': 'OR', 'status': 'pending'})
table = PrettyTable(['Matched Order ID', 'Time Left'])
for order_match in awaiting_ltcs:
order_match = format_order_match(db, order_match)
table.add_row(order_match)
print(table)
elif args.action == 'market':
market(args.give_asset, args.get_asset)
# PARSING
elif args.action == 'reparse':
logging.info('Status: Acquiring process lock.')
with lock:
blocks.reparse(db)
elif args.action == 'rollback':
logging.info('Status: Acquiring process lock.')
with lock:
blocks.reparse(db, block_index=args.block_index)
elif args.action == 'server':
logging.info('Status: Acquiring process lock.')
with lock:
api_status_poller = api.APIStatusPoller()
api_status_poller.daemon = True
api_status_poller.start()
api_server = api.APIServer()
api_server.daemon = True
api_server.start()
# Check blockchain explorer.
if not config.FORCE:
time_wait = 10
num_tries = 10
for i in range(1, num_tries + 1):
try:
blockchain.check()
except: # TODO
logging.warn("Blockchain backend (%s) not yet initialized. Waiting %i seconds and trying again (try %i of %i)..." % (
config.BLOCKCHAIN_SERVICE_NAME, time_wait, i, num_tries))
time.sleep(time_wait)
else:
break
else:
raise Exception("Blockchain backend (%s) not initialized! Aborting startup after %i tries." % (
config.BLOCKCHAIN_SERVICE_NAME, num_tries))
blocks.follow(db)
else:
parser.print_help()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | -3,661,254,247,231,459,000 | 54.542581 | 289 | 0.619398 | false |
kosklain/CausalityCompetition | CausalityTrainer.py | 1 | 2257 | import data_io
import CausalityFeatureFunctions as f
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
class CausalityTrainer:
def __init__(self, directionForward=True):
self.directionForward = directionForward
def getFeatureExtractor(self, features):
combined = f.FeatureMapper(features)
return combined
def getPipeline(self, feat):
features = self.getFeatureExtractor(feat)
steps = [("extract_features", features),
("classify", RandomForestRegressor(compute_importances=True, n_estimators=500,
verbose=2, n_jobs=1, min_samples_split=10,
random_state=0))]
return Pipeline(steps)
def getTrainingDataset(self):
print "Reading in the training data"
train = data_io.read_train_pairs()
print "Reading the information about the training data"
train2 = data_io.read_train_info()
train["A type"] = train2["A type"]
train["B type"] = train2["B type"]
return train
def run(self):
features = f.features
train = self.getTrainingDataset()
print "Reading preprocessed features"
if f.preprocessedFeatures != []:
intermediate = data_io.read_intermediate_train()
for i in f.preprocessedFeatures:
train[i] = intermediate[i]
for i in features:
if i[0] in f.preprocessedFeatures:
i[1] = i[0]
i[2] = f.SimpleTransform(transformer=f.ff.identity)
print "Reading targets"
target = data_io.read_train_target()
print "Extracting features and training model"
classifier = self.getPipeline(features)
if self.directionForward:
finalTarget = [ x*(x+1)/2 for x in target.Target]
else:
finalTarget = [ -x*(x-1)/2 for x in target.Target]
classifier.fit(train, finalTarget)
print classifier.steps[-1][1].feature_importances_
print "Saving the classifier"
data_io.save_model(classifier)
if __name__=="__main__":
ct = CausalityTrainer() | gpl-2.0 | 6,256,355,404,492,569,000 | 38.614035 | 96 | 0.594152 | false |
dgnorth/DriftUe4Plugin | Scripts/publish.py | 1 | 13837 | """Build and upload script to make UE4 client and server builds available on S3.
"""
import sys
import os
import threading
import time
import json
from datetime import datetime
import mimetypes
import argparse
import re
import getpass
import operator
from dateutil.parser import parse
from tabulate import tabulate
import boto3
from boto3.s3.transfer import S3Transfer, TransferConfig
def get_archives_in_folder(path):
ret = []
for filename in os.listdir(path):
if filename.endswith(".zip"):
full_filename = os.path.join(path, filename)
ret.append(full_filename)
return ret
def delete_archives(path):
archives = get_archives_in_folder(path)
for full_filename in archives:
print "Deleting old archive '%s'" % full_filename
os.remove(full_filename)
def get_script_path():
return os.path.abspath(os.path.split(__file__)[0])
def get_config():
config_filename = os.path.join(get_script_path(), "publish.cfg")
ret = {}
try:
with open(config_filename, 'r') as f:
ret = json.load(f)
except:
print "No config file. All configuration must come from command-line"
return ret
def get_project_file():
# assume the project file is one level above this script
path = os.path.abspath(os.path.join(get_script_path(), "..\\")).replace('\\', '/')
project_name = path.split('/')[-1]
ret = os.path.join(path, project_name) + ".uproject"
if not os.path.exists(ret):
raise RuntimeError("Project file '%s' not found" % ret)
return project_name, ret
config = get_config()
index_file = None
def get_index_path(s3path):
return "{path}/index.json".format(path=s3path)
def get_index(s3region, s3bucket, s3path):
global index_file
if index_file:
return index_file
key_name = get_index_path(s3path)
try:
response = boto3.client('s3', s3region).get_object(Bucket=s3bucket, Key=key_name)
except Exception as e:
if 'NoSuchKey' not in str(e):
raise
index_file = {
'repository': None,
'next_build_number': 10000,
'refs': [],
}
else:
index_file = json.load(response['Body'])
return index_file
def download_manifest(s3region, s3bucket, manifest_key_name):
s3 = boto3.client('s3', s3region)
resp = s3.get_object(Bucket=s3bucket, Key=manifest_key_name)
ret = json.load(resp['Body'])
return ret
def get_staging_directory(project_file, config):
project_root, _ = os.path.split(project_file)
project_root = os.path.abspath(project_root)
return os.path.join(project_root, 'Saved', 'StagedBuilds', config)
def create_build_manifest(build_number, repository, ref, project, target_platform, config, version_string=None, executable_path=None):
# Gather info for build manifest
build_manifest = {
'repository': repository,
'ref': ref,
'project': project,
'target_platform': target_platform,
'config': config,
'timestamp': datetime.utcnow().isoformat(),
'commit_id': None,
'build_number': build_number,
'built_by': getpass.getuser(),
'version_string': version_string,
'executable_path': executable_path,
}
# Generate a canonical name for the build archive (excluding extension)
canonical_ref = '{}/{}'.format(ref, build_number).replace('/', '.')
canonical_buildname = '{project}-{target_platform}-{config}-{canonical_ref}'.format(
canonical_ref=canonical_ref, **build_manifest
)
build_manifest['build'] = canonical_buildname
return build_manifest
def transfer_build_to_s3(archive_name, key_name):
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
self._start_time = time.time()
self._last_time = time.time()
self._megseg = 0.0
@property
def archive_info(self):
return {
"filename": self._filename,
"size": long(self._size),
"upload_time_sec": long(self._last_time - self._start_time)
}
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
if time.time() - self._last_time > 0.02:
self._last_time = time.time()
elapsed = time.time() - self._start_time
self._megseg = (self._seen_so_far / 1024.0 / 1024.0) / elapsed
sys.stdout.write("Upload progress: %s kb / %s kb (%.2f%%) %.1f mb/s\r" % (self._seen_so_far // 1024, self._size // 1024, percentage, self._megseg))
sys.stdout.flush()
transfer_config = TransferConfig(
multipart_threshold=4 * 1024 * 1024,
max_concurrency=30
)
client = boto3.client('s3', s3region)
transfer = S3Transfer(client, transfer_config)
mimetype, encoding = mimetypes.guess_type(archive_name)
if mimetype is None:
print "Can't figure out mimetype for:", archive_name
sys.exit(1)
print " Archive filename: ", archive_name
print " S3 Bucket: ", s3bucket
print " S3 Key Name: ", key_name
print " Key Mime Type: ", mimetype
cb = ProgressPercentage(archive_name)
transfer.upload_file(
archive_name, s3bucket, key_name,
extra_args={'ContentType': mimetype},
callback=cb,
)
return cb
def publish_build(zippathname, build_manifest, s3region, s3bucket, s3path):
client = boto3.client('s3', s3region)
# The archive and manifest must be stored in the correct subfolder, so we append
# the UE4 build folder root and repository name.
base_name = "{}/{}/{}".format(
s3path,
build_manifest['target_platform'],
build_manifest['build']
)
zipname, zipext = os.path.splitext(zippathname)
archive_key_name = base_name + zipext
manifest_key_name = base_name + '.json'
# Upload the build to S3
progress = transfer_build_to_s3(zippathname, archive_key_name)
# Update manifest information
build_manifest['archive_info'] = progress.archive_info
build_manifest['archive'] = archive_key_name
# Get a permalink to the build
build_manifest['archive_url'] = client.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': s3bucket,
'Key': archive_key_name,
},
ExpiresIn=60*60*24*365,
HttpMethod='GET'
)
# Upload build manifest. Use the same name as the archive, but .json.
response = client.put_object(
Bucket=s3bucket,
Key=manifest_key_name,
Body=json.dumps(build_manifest, indent=4),
ContentType='application/json'
)
print "build_manifest:", json.dumps(build_manifest, indent=4)
# Index file update
print "Updating index file"
index_file = get_index(s3region, s3bucket, s3path)
if index_file['next_build_number'] != build_manifest['build_number']:
print "ATTENTION! Build number counter and build number don't match!"
index_file['next_build_number'] += 1
ref = build_manifest['ref']
target_platform = build_manifest['target_platform']
for ref_item in index_file['refs']:
if ref_item['ref'] == ref and ref_item['target_platform'] == target_platform:
break
else:
ref_item = {
'ref': ref,
'target_platform': target_platform,
}
index_file['refs'].append(ref_item)
# Add a reference to the manifest file
ref_item['build_manifest'] = manifest_key_name
key_name = get_index_path(s3path)
response = client.put_object(
Bucket=s3bucket,
Key=key_name,
Body=json.dumps(index_file, indent=4),
ContentType='application/json'
)
print "Publishing build succeeded"
def list_builds(s3region, s3bucket, s3path):
sys.stdout.write('Fetching build information...')
index_file = get_index(s3region, s3bucket, s3path)
results = []
for entry in index_file['refs']:
sys.stdout.write('.')
manifest = download_manifest(s3region, s3bucket, entry['build_manifest'])
dt = manifest['timestamp']
dt = parse(dt).replace(tzinfo=None).strftime("%Y-%m-%d %H:%M")
sz = int(manifest['archive_info']['size'])/1024/1024
results.append([entry['ref'], dt, manifest['built_by'], manifest['config'], entry['target_platform'], manifest['build_number'], sz])
results.sort(key=operator.itemgetter(5), reverse=True)
print
print tabulate(results, headers=['ref', 'timestamp', 'built by', 'config', 'platform', 'build number', 'size [mb]'])
if __name__ == "__main__":
start_time = time.time()
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='sub-command help', dest="cmd")
parser_list = subparsers.add_parser('list', help='List published builds')
parser_list.add_argument('--s3bucket', default=config.get('bucket'), help="S3 Bucket name (default: %s)" % config.get('bucket'))
parser_list.add_argument('--s3region', default=config.get('region'), help="S3 Region name (default: %s)" % config.get('region'))
parser_list.add_argument('--s3path', default=config.get('path'), help="S3 Path (default: %s)" % config.get('path'))
parser_publish = subparsers.add_parser('publish', help='Publish a build to the cloud')
parser_publish.add_argument("-r", "--ref", required=True, help='Ref to publish this build under (required)')
parser_publish.add_argument("-c", "--config", default="Development", help='Build configuration that was built (default: Development)')
parser_publish.add_argument('-a', '--archive', help="Path to archive file to upload to S3. If not specified all .zip archives from staging folder will be published.")
parser_publish.add_argument('-v', '--version-string', help="A canonical version string of the build (optional).")
parser_publish.add_argument('-p', '--platform', default="Win64", help="Platform of the build (default: Win64)")
parser_publish.add_argument('--s3bucket', default=config.get('bucket'), help="S3 Bucket name (default: %s)" % config.get('bucket'))
parser_publish.add_argument('--s3region', default=config.get('region'), help="S3 Region name (default: %s)" % config.get('region'))
parser_publish.add_argument('--s3path', default=config.get('path'), help="S3 Path (default: %s)" % config.get('path'))
args = parser.parse_args()
tp_archives = {} # Key is target platform, value is archive folder, zip file name.
build_manifests = []
project_name, project_file = get_project_file()
s3region = args.s3region
s3bucket = args.s3bucket
s3path = args.s3path
if not all([s3region, s3bucket, s3path]):
print "Missing required parameters. Please run command with --help for details"
sys.exit(1)
if args.cmd == 'publish':
server_platform = args.platform
executable_path = "{project_name}\\Binaries\\{server_platform}\\{project_name}Server.exe".format(project_name=project_name,
server_platform=server_platform)
config_name = args.config
ref = args.ref
REF_MAX_LEN = 16
if len(ref) > REF_MAX_LEN:
print "ref can be at most %s characters" % REF_MAX_LEN
sys.exit(2)
re1 = re.compile(r"[\w.-]*$")
if not re1.match(ref):
print "ref cannot contain any special characters other than . and -"
sys.exit(2)
if args.archive:
archives = args.archive.split(",")
else:
staging_directory = get_staging_directory(project_file, config_name)
archives = get_archives_in_folder(staging_directory)
if len(archives) == 0:
print "No archives found in folder '%s'. Nothing to publish!" % staging_directory
sys.exit(2)
index_file = get_index(s3region, s3bucket, s3path)
for archive in archives:
if not os.path.exists(archive):
print "Archive '%s' not found. Cannot publish" % archive
sys.exit(1)
for archive in archives:
target_platform = archive.replace("\\", ".").split(".")[-2]
print "Publishing target platform '%s'" % target_platform
build_manifest = create_build_manifest(
build_number=index_file['next_build_number'],
repository=s3path,
ref=args.ref,
project=project_name,
target_platform=target_platform,
config=config_name,
version_string=args.version_string,
executable_path=executable_path,
)
publish_build(archive, build_manifest, s3region, s3bucket, s3path)
build_manifests.append(build_manifest)
if build_manifests:
print "Build manifests:"
for build_manifest in build_manifests:
print json.dumps(build_manifest, indent=4)
print
for build_manifest in build_manifests:
print "Archive URL: %s" % build_manifest['archive_url']
elif args.cmd == 'list':
list_builds(s3region, s3bucket, s3path)
| mit | 7,270,144,388,517,457,000 | 36.702997 | 170 | 0.610176 | false |
brianloveswords/django-badger | setup.py | 1 | 1111 | from setuptools import setup
setup(
name='django-badger',
version='0.0.1',
description='Django app for managing and awarding badgers',
long_description=open('README.rst').read(),
author='Leslie Michael Orchard',
author_email='[email protected]',
url='http://github.com/lmorchard/django-badger',
license='BSD',
packages=['badger', 'badger.templatetags', 'badger.management', 'badger.management.commands', 'badger.migrations'],
package_data={'badger': ['fixtures/*', 'templates/badger_playdoh/*.html', 'templates/badger_playdoh/includes/*.html', 'templates/badger_vanilla/*.html', 'templates/badger_vanilla/includes/*.html']},
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
# I don't know what exactly this means, but why not?
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| bsd-3-clause | 3,426,159,240,017,291,300 | 41.730769 | 202 | 0.656166 | false |
paulsbrookes/cqed_sims_qutip | spectroscopy/spec_anim.py | 1 | 6723 | import numpy as np
from qutip import *
from pylab import *
from scipy.fftpack import fft
import matplotlib.pyplot as plt
import yaml
from scipy.interpolate import interp1d
class parameters:
def __init__(self, wc, wq, eps, g, chi, kappa, gamma, t_levels, c_levels):
self.wc = wc
self.wq = wq
self.eps = eps
self.g = g
self.chi = chi
self.gamma = gamma
self.kappa = kappa
self.t_levels = t_levels
self.c_levels = c_levels
def hamiltonian(params, wd):
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
H = - (params.wc - wd) * a.dag() * a - (params.wq - wd) * sm.dag() * sm \
+ params.chi * sm.dag() * sm * (sm.dag() * sm - 1) + params.g * (a.dag() * sm + a * sm.dag()) \
+ params.eps * (a + a.dag())
return H
def transmission_calc_array(params, wd_points):
transmissions = parallel_map(transmission_calc, wd_points, (params,), num_cpus = 10)
transmissions = np.array(transmissions)
return transmissions
def transmission_calc(wd, params):
a = tensor(destroy(params.c_levels), qeye(params.t_levels))
sm = tensor(qeye(params.c_levels), destroy(params.t_levels))
c_ops = []
c_ops.append(np.sqrt(params.kappa) * a)
c_ops.append(np.sqrt(params.gamma) * sm)
H = hamiltonian(params, wd)
rho_ss = steadystate(H, c_ops)
transmission = expect(a, rho_ss)
return transmission
def new_points(wd_points, transmissions, threshold):
metric_vector = curvature_vector(wd_points, transmissions)
indices = np.array([index for index, metric in enumerate(metric_vector) if metric > threshold]) + 1
new_wd_points = generate_points(wd_points, indices)
return new_wd_points
def generate_points(wd_points, indices):
n_points = 6
new_wd_points = np.array([])
for index in indices:
multi_section = np.linspace(wd_points[index - 1], wd_points[index + 1], n_points)
new_wd_points = np.concatenate((new_wd_points, multi_section))
unique_set = set(new_wd_points) - set(wd_points)
new_wd_points_unique = np.array(list(unique_set))
return new_wd_points_unique
def curvature_vector(wd_points, transmissions):
is_ordered = all([wd_points[i] <= wd_points[i + 1] for i in xrange(len(wd_points) - 1)])
assert is_ordered, "Vector of wd_points is not ordered."
assert len(wd_points) == len(transmissions), "Vectors of wd_points and transmissions are not of equal length."
metric_vector = []
for index in range(len(wd_points) - 2):
metric = curvature(wd_points[index:index + 3], transmissions[index:index + 3])
metric_vector.append(metric)
return metric_vector
def curvature(wd_triplet, transmissions_triplet):
wd_are_floats = all([isinstance(wd_triplet[i], float) for i in xrange(len(wd_triplet) - 1)])
assert wd_are_floats, "The vector wd_triplet contains numbers which are not floats."
transmissions_are_floats = all([isinstance(transmissions_triplet[i], float) \
for i in xrange(len(transmissions_triplet) - 1)])
assert transmissions_are_floats, "The vector transmissions_triplet contains numbers which are not floats."
wd_delta_0 = wd_triplet[1] - wd_triplet[0]
wd_delta_1 = wd_triplet[2] - wd_triplet[1]
transmissions_delta_0 = transmissions_triplet[1] - transmissions_triplet[0]
transmissions_delta_1 = transmissions_triplet[2] - transmissions_triplet[1]
metric = 2 * (wd_delta_1 * transmissions_delta_1 - wd_delta_0 * transmissions_delta_0) / (wd_delta_0 + wd_delta_1)
abs_normalised_metric = np.absolute(metric / transmissions_triplet[1])
return abs_normalised_metric
def y_lim_calc(y_points):
buffer_fraction = 0.1
y_max = np.amax(y_points)
y_min = np.amin(y_points)
range = y_max - y_min
y_lim_u = y_max + buffer_fraction * range
y_lim_l = y_min - buffer_fraction * range
return np.array([y_lim_l, y_lim_u])
def sweep(eps, wd_lower, wd_upper, params, fidelity):
params.eps = eps
save = 1
wd_points = np.linspace(wd_lower, wd_upper, 10)
transmissions = transmission_calc_array(params, wd_points)
abs_transmissions = np.absolute(transmissions)
new_wd_points = new_points(wd_points, abs_transmissions, fidelity)
fig, ax = plt.subplots(1, 1)
ax.set_xlim(wd_lower, wd_upper)
y_limits = y_lim_calc(abs_transmissions)
ax.set_ylim(y_limits[0], y_limits[1])
ax.set_xlabel('Cavity drive frequency (GHz)')
ax.set_ylabel('|<a>|')
ax.hold(True)
plt.show(False)
plt.draw()
background = fig.canvas.copy_from_bbox(ax.bbox)
points = ax.plot(wd_points, abs_transmissions, 'o')[0]
while (len(new_wd_points) > 0):
new_transmissions = transmission_calc_array(params, new_wd_points)
new_abs_transmissions = np.absolute(new_transmissions)
wd_points = np.concatenate([wd_points, new_wd_points])
transmissions = concatenate([transmissions, new_transmissions])
abs_transmissions = concatenate([abs_transmissions, new_abs_transmissions])
sort_indices = np.argsort(wd_points)
wd_points = wd_points[sort_indices]
transmissions = transmissions[sort_indices]
abs_transmissions = abs_transmissions[sort_indices]
new_wd_points = new_points(wd_points, abs_transmissions, fidelity)
points.set_data(wd_points, abs_transmissions)
fig.canvas.restore_region(background)
ax.draw_artist(points)
fig.canvas.blit(ax.bbox)
y_limits = y_lim_calc(abs_transmissions)
ax.set_ylim(y_limits[0], y_limits[1])
if save == 1:
np.savetxt('results/abs_transmissions.csv', abs_transmissions, delimiter=',')
np.savetxt('results/drive_frequencies.csv', wd_points, delimiter=',')
params_dic = {'f_c': params.wc,
'f_q': params.wq,
'epsilon': params.eps,
'g': params.g,
'kappa': params.kappa,
'gamma': params.gamma,
'transmon_levels': params.t_levels,
'cavity_levels': params.c_levels}
with open('results/parameters.yml', 'w') as outfile: yaml.dump(params_dic, outfile, default_flow_style = True)
plt.scatter(wd_points, abs_transmissions)
plt.show()
if __name__ == '__main__':
#wc, wq, eps, g, chi, kappa, gamma, t_levels, c_levels
params = parameters(10.4262, 9.4914, 0.0001, 0.275, -0.097, 0.00146, 0.000833, 2, 10)
eps = 0.0001
fidelity = 0.05
wd_lower = 10.4
wd_upper = 10.55
sweep(eps, wd_lower, wd_upper, params, fidelity)
| apache-2.0 | 1,119,140,199,306,191,900 | 37.861272 | 118 | 0.638554 | false |
marpaia/chef-osx | cookbooks/python/files/default/ipython/profile_default/ipython_config.py | 1 | 16895 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet',
# 'osx').
# c.InteractiveShellApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# Whether to display a banner upon starting IPython.
c.TerminalIPythonApp.display_banner = False
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet',
# 'osx').
# c.TerminalIPythonApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# If a command or file is given via the command-line, e.g. 'ipython foo.py
# c.TerminalIPythonApp.force_interact = False
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.TerminalIPythonApp.pylab_import_all = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u'/Users/marpaia/.ipython'
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s] %(message)s'
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vim'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.5 (default, Jun 1 2013, 01:36:25) \nType "copyright", "credits" or "license" for more information.\n\nIPython 0.13.2 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
#
# c.HistoryManager.db_log_output = False
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
#
# c.HistoryManager.db_cache_size = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
| apache-2.0 | 5,318,918,276,306,134,000 | 35.333333 | 418 | 0.671974 | false |
sevazhidkov/leonard | jobs/return_messages.py | 1 | 3006 | import os
import random
import logging
import arrow
import telegram
from telegram.error import Unauthorized
from leonard import Leonard
from modules.menu import GREETING_PHRASES
from libs.timezone import local_time
from libs.utils import FakeMessage
telegram_client = telegram.Bot(os.environ['BOT_TOKEN'])
bot = Leonard(telegram_client)
bot.collect_plugins()
RETURN_MESSAGE_HOURS = list(range(11, 20))
RETURN_MESSAGE = '{} {}\n{}'
HOUR_MESSAGES = [(range(11, 17), 'Have a nice day ❤️'),
(range(17, 20), 'Good evening!')]
ASSIST_MESSAGES = ['By the way, if you have problems with me, you can write my developer @sevazhidkov',
'You can unsubscribe from such messages using Subscriptions 📬']
def main():
return
for key in bot.redis.scan_iter(match='user:*:registered'):
if bot.redis.get(key).decode('utf-8') != '1':
# TODO: Add reminder about registration
continue
_, u_id, _ = key.decode('utf-8').split(':')
status = bot.user_get(u_id, 'notifications:returns:messages')
if status == '0':
continue
time = local_time(bot, int(u_id))
if time.hour not in RETURN_MESSAGE_HOURS:
continue
if bot.user_get(u_id, 'return_sent'):
continue
return_hour = bot.user_get(u_id, 'return_hour')
if return_hour and time.hour != int(return_hour):
continue
elif not return_hour:
# Choose hour for return message
hour = random.choice(RETURN_MESSAGE_HOURS)
bot.user_set(u_id, 'return_hour', hour, ex=len(RETURN_MESSAGE_HOURS) * 60 * 60)
if hour != time.hour:
continue
last_interaction = arrow.get(bot.user_get(u_id, 'last_interaction') or time)
interaction_delta = time - last_interaction
if interaction_delta and last_interaction.replace(hours=+1) > time:
continue
bot.logger.info('Checking return message to: {}, where list: {}'.format(
u_id, ([0] * round(interaction_delta.days / 2) + [0]) + [1, 1]
))
result = random.choice(([0] * round(interaction_delta.days / 2) + [0]) + [1, 1])
bot.user_set(u_id, 'return_sent', time.timestamp, ex=len(RETURN_MESSAGE_HOURS) * 60 * 60)
if result != 1:
continue
m = FakeMessage()
m.u_id = u_id
for interval, message in HOUR_MESSAGES:
if time.hour in interval:
hour_message = message
try:
bot.call_handler(m, 'main-menu', phrase=RETURN_MESSAGE.format(
hour_message, random.choice(GREETING_PHRASES), random.choice(ASSIST_MESSAGES)
))
except Unauthorized:
bot.logger.warning('Unauthorized for {}'.format(u_id))
except Exception as error:
bot.logger.error(error)
if __name__ == '__main__':
try:
main()
except Exception as e:
bot.logger.error(e)
| mit | 8,447,620,987,240,022,000 | 31.956044 | 103 | 0.592197 | false |
libvirt/autotest | client/common_lib/software_manager.py | 1 | 23931 | #!/usr/bin/python
"""
Software package management library.
This is an abstraction layer on top of the existing distributions high level
package managers. It supports package operations useful for testing purposes,
and multiple high level package managers (here called backends). If you want
to make this lib to support your particular package manager/distro, please
implement the given backend class.
@author: Higor Vieira Alves ([email protected])
@author: Lucas Meneghel Rodrigues ([email protected])
@author: Ramon de Carvalho Valle ([email protected])
@copyright: IBM 2008-2009
@copyright: Red Hat 2009-2010
"""
import os, re, logging, ConfigParser, optparse, random, string
try:
import yum
except Exception:
pass
try:
import autotest.common as common
except ImportError:
import common
from autotest_lib.client.bin import os_dep, utils
from autotest_lib.client.common_lib import error
from autotest_lib.client.common_lib import logging_config, logging_manager
def generate_random_string(length):
"""
Return a random string using alphanumeric characters.
@length: Length of the string that will be generated.
"""
r = random.SystemRandom()
str = ""
chars = string.letters + string.digits
while length > 0:
str += r.choice(chars)
length -= 1
return str
class SoftwareManagerLoggingConfig(logging_config.LoggingConfig):
"""
Used with the sole purpose of providing convenient logging setup
for the KVM test auxiliary programs.
"""
def configure_logging(self, results_dir=None, verbose=False):
super(SoftwareManagerLoggingConfig, self).configure_logging(
use_console=True,
verbose=verbose)
class SystemInspector(object):
"""
System inspector class.
This may grow up to include more complete reports of operating system and
machine properties.
"""
def __init__(self):
"""
Probe system, and save information for future reference.
"""
self.distro = utils.get_os_vendor()
self.high_level_pms = ['apt-get', 'yum', 'zypper']
def get_package_management(self):
"""
Determine the supported package management systems present on the
system. If more than one package management system installed, try
to find the best supported system.
"""
list_supported = []
for high_level_pm in self.high_level_pms:
try:
os_dep.command(high_level_pm)
list_supported.append(high_level_pm)
except Exception:
pass
pm_supported = None
if len(list_supported) == 0:
pm_supported = None
if len(list_supported) == 1:
pm_supported = list_supported[0]
elif len(list_supported) > 1:
if 'apt-get' in list_supported and self.distro in ['Debian', 'Ubuntu']:
pm_supported = 'apt-get'
elif 'yum' in list_supported and self.distro == 'Fedora':
pm_supported = 'yum'
else:
pm_supported = list_supported[0]
logging.debug('Package Manager backend: %s' % pm_supported)
return pm_supported
class SoftwareManager(object):
"""
Package management abstraction layer.
It supports a set of common package operations for testing purposes, and it
uses the concept of a backend, a helper class that implements the set of
operations of a given package management tool.
"""
def __init__(self):
"""
Class constructor.
Determines the best supported package management system for the given
operating system running and initializes the appropriate backend.
"""
inspector = SystemInspector()
backend_type = inspector.get_package_management()
if backend_type == 'yum':
self.backend = YumBackend()
elif backend_type == 'zypper':
self.backend = ZypperBackend()
elif backend_type == 'apt-get':
self.backend = AptBackend()
else:
raise NotImplementedError('Unimplemented package management '
'system: %s.' % backend_type)
def check_installed(self, name, version=None, arch=None):
"""
Check whether a package is installed on this system.
@param name: Package name.
@param version: Package version.
@param arch: Package architecture.
"""
return self.backend.check_installed(name, version, arch)
def list_all(self):
"""
List all installed packages.
"""
return self.backend.list_all()
def list_files(self, name):
"""
Get a list of all files installed by package [name].
@param name: Package name.
"""
return self.backend.list_files(name)
def install(self, name):
"""
Install package [name].
@param name: Package name.
"""
return self.backend.install(name)
def remove(self, name):
"""
Remove package [name].
@param name: Package name.
"""
return self.backend.remove(name)
def add_repo(self, url):
"""
Add package repo described by [url].
@param name: URL of the package repo.
"""
return self.backend.add_repo(url)
def remove_repo(self, url):
"""
Remove package repo described by [url].
@param url: URL of the package repo.
"""
return self.backend.remove_repo(url)
def upgrade(self):
"""
Upgrade all packages available.
"""
return self.backend.upgrade()
def provides(self, file):
"""
Returns a list of packages that provides a given capability to the
system (be it a binary, a library).
@param file: Path to the file.
"""
return self.backend.provides(file)
def install_what_provides(self, file):
"""
Installs package that provides [file].
@param file: Path to file.
"""
provides = self.provides(file)
if provides is not None:
self.install(provides)
else:
logging.warning('No package seems to provide %s', file)
class RpmBackend(object):
"""
This class implements operations executed with the rpm package manager.
rpm is a lower level package manager, used by higher level managers such
as yum and zypper.
"""
def __init__(self):
self.lowlevel_base_cmd = os_dep.command('rpm')
def _check_installed_version(self, name, version):
"""
Helper for the check_installed public method.
@param name: Package name.
@param version: Package version.
"""
cmd = (self.lowlevel_base_cmd + ' -q --qf %{VERSION} ' + name +
' 2> /dev/null')
inst_version = utils.system_output(cmd)
if inst_version >= version:
return True
else:
return False
def check_installed(self, name, version=None, arch=None):
"""
Check if package [name] is installed.
@param name: Package name.
@param version: Package version.
@param arch: Package architecture.
"""
if arch:
cmd = (self.lowlevel_base_cmd + ' -q --qf %{ARCH} ' + name +
' 2> /dev/null')
inst_archs = utils.system_output(cmd)
inst_archs = inst_archs.split('\n')
for inst_arch in inst_archs:
if inst_arch == arch:
return self._check_installed_version(name, version)
return False
elif version:
return self._check_installed_version(name, version)
else:
cmd = 'rpm -q ' + name + ' 2> /dev/null'
return (os.system(cmd) == 0)
def list_all(self):
"""
List all installed packages.
"""
installed_packages = utils.system_output('rpm -qa').splitlines()
return installed_packages
def list_files(self, name):
"""
List files installed on the system by package [name].
@param name: Package name.
"""
path = os.path.abspath(name)
if os.path.isfile(path):
option = '-qlp'
name = path
else:
option = '-ql'
l_cmd = 'rpm' + ' ' + option + ' ' + name + ' 2> /dev/null'
try:
result = utils.system_output(l_cmd)
list_files = result.split('\n')
return list_files
except error.CmdError:
return []
class DpkgBackend(object):
"""
This class implements operations executed with the dpkg package manager.
dpkg is a lower level package manager, used by higher level managers such
as apt and aptitude.
"""
def __init__(self):
self.lowlevel_base_cmd = os_dep.command('dpkg')
def check_installed(self, name):
if os.path.isfile(name):
n_cmd = (self.lowlevel_base_cmd + ' -f ' + name +
' Package 2>/dev/null')
name = utils.system_output(n_cmd)
i_cmd = self.lowlevel_base_cmd + ' -s ' + name + ' 2>/dev/null'
# Checking if package is installed
package_status = utils.system_output(i_cmd, ignore_status=True)
not_inst_pattern = re.compile('not-installed', re.IGNORECASE)
dpkg_not_installed = re.search(not_inst_pattern, package_status)
if dpkg_not_installed:
return False
return True
def list_all(self):
"""
List all packages available in the system.
"""
installed_packages = []
raw_list = utils.system_output('dpkg -l').splitlines()[5:]
for line in raw_list:
parts = line.split()
if parts[0] == "ii": # only grab "installed" packages
installed_packages.append("%s-%s" % (parts[1], parts[2]))
def list_files(self, package):
"""
List files installed by package [package].
@param package: Package name.
@return: List of paths installed by package.
"""
if os.path.isfile(package):
l_cmd = self.lowlevel_base_cmd + ' -c ' + package
else:
l_cmd = self.lowlevel_base_cmd + ' -l ' + package
return utils.system_output(l_cmd).split('\n')
class YumBackend(RpmBackend):
"""
Implements the yum backend for software manager.
Set of operations for the yum package manager, commonly found on Yellow Dog
Linux and Red Hat based distributions, such as Fedora and Red Hat
Enterprise Linux.
"""
def __init__(self):
"""
Initializes the base command and the yum package repository.
"""
super(YumBackend, self).__init__()
executable = os_dep.command('yum')
base_arguments = '-y'
self.base_command = executable + ' ' + base_arguments
self.repo_file_path = '/etc/yum.repos.d/autotest.repo'
self.cfgparser = ConfigParser.ConfigParser()
self.cfgparser.read(self.repo_file_path)
y_cmd = executable + ' --version | head -1'
self.yum_version = utils.system_output(y_cmd, ignore_status=True)
logging.debug('Yum backend initialized')
logging.debug('Yum version: %s' % self.yum_version)
self.yum_base = yum.YumBase()
def _cleanup(self):
"""
Clean up the yum cache so new package information can be downloaded.
"""
utils.system("yum clean all")
def install(self, name):
"""
Installs package [name]. Handles local installs.
"""
if os.path.isfile(name):
name = os.path.abspath(name)
command = 'localinstall'
else:
command = 'install'
i_cmd = self.base_command + ' ' + command + ' ' + name
try:
utils.system(i_cmd)
return True
except Exception:
return False
def remove(self, name):
"""
Removes package [name].
@param name: Package name (eg. 'ipython').
"""
r_cmd = self.base_command + ' ' + 'erase' + ' ' + name
try:
utils.system(r_cmd)
return True
except Exception:
return False
def add_repo(self, url):
"""
Adds package repository located on [url].
@param url: Universal Resource Locator of the repository.
"""
# Check if we URL is already set
for section in self.cfgparser.sections():
for option, value in self.cfgparser.items(section):
if option == 'url' and value == url:
return True
# Didn't find it, let's set it up
while True:
section_name = 'software_manager' + '_' + generate_random_string(4)
if not self.cfgparser.has_section(section_name):
break
self.cfgparser.add_section(section_name)
self.cfgparser.set(section_name, 'name',
'Repository added by the autotest software manager.')
self.cfgparser.set(section_name, 'url', url)
self.cfgparser.set(section_name, 'enabled', 1)
self.cfgparser.set(section_name, 'gpgcheck', 0)
self.cfgparser.write(self.repo_file_path)
def remove_repo(self, url):
"""
Removes package repository located on [url].
@param url: Universal Resource Locator of the repository.
"""
for section in self.cfgparser.sections():
for option, value in self.cfgparser.items(section):
if option == 'url' and value == url:
self.cfgparser.remove_section(section)
self.cfgparser.write(self.repo_file_path)
def upgrade(self):
"""
Upgrade all available packages.
"""
r_cmd = self.base_command + ' ' + 'update'
try:
utils.system(r_cmd)
return True
except Exception:
return False
def provides(self, name):
"""
Returns a list of packages that provides a given capability.
@param name: Capability name (eg, 'foo').
"""
d_provides = self.yum_base.searchPackageProvides(args=[name])
provides_list = [key for key in d_provides]
if provides_list:
logging.info("Package %s provides %s", provides_list[0], name)
return str(provides_list[0])
else:
return None
class ZypperBackend(RpmBackend):
"""
Implements the zypper backend for software manager.
Set of operations for the zypper package manager, found on SUSE Linux.
"""
def __init__(self):
"""
Initializes the base command and the yum package repository.
"""
super(ZypperBackend, self).__init__()
self.base_command = os_dep.command('zypper') + ' -n'
z_cmd = self.base_command + ' --version'
self.zypper_version = utils.system_output(z_cmd, ignore_status=True)
logging.debug('Zypper backend initialized')
logging.debug('Zypper version: %s' % self.zypper_version)
def install(self, name):
"""
Installs package [name]. Handles local installs.
@param name: Package Name.
"""
path = os.path.abspath(name)
i_cmd = self.base_command + ' install -l ' + name
try:
utils.system(i_cmd)
return True
except Exception:
return False
def add_repo(self, url):
"""
Adds repository [url].
@param url: URL for the package repository.
"""
ar_cmd = self.base_command + ' addrepo ' + url
try:
utils.system(ar_cmd)
return True
except Exception:
return False
def remove_repo(self, url):
"""
Removes repository [url].
@param url: URL for the package repository.
"""
rr_cmd = self.base_command + ' removerepo ' + url
try:
utils.system(rr_cmd)
return True
except Exception:
return False
def remove(self, name):
"""
Removes package [name].
"""
r_cmd = self.base_command + ' ' + 'erase' + ' ' + name
try:
utils.system(r_cmd)
return True
except Exception:
return False
def upgrade(self):
"""
Upgrades all packages of the system.
"""
u_cmd = self.base_command + ' update -l'
try:
utils.system(u_cmd)
return True
except Exception:
return False
def provides(self, name):
"""
Searches for what provides a given file.
@param name: File path.
"""
p_cmd = self.base_command + ' what-provides ' + name
list_provides = []
try:
p_output = utils.system_output(p_cmd).split('\n')[4:]
for line in p_output:
line = [a.strip() for a in line.split('|')]
try:
state, pname, type, version, arch, repository = line
if pname not in list_provides:
list_provides.append(pname)
except IndexError:
pass
if len(list_provides) > 1:
logging.warning('More than one package found, '
'opting by the first queue result')
if list_provides:
logging.info("Package %s provides %s", list_provides[0], name)
return list_provides[0]
return None
except Exception:
return None
class AptBackend(DpkgBackend):
"""
Implements the apt backend for software manager.
Set of operations for the apt package manager, commonly found on Debian and
Debian based distributions, such as Ubuntu Linux.
"""
def __init__(self):
"""
Initializes the base command and the debian package repository.
"""
super(AptBackend, self).__init__()
executable = os_dep.command('apt-get')
self.base_command = executable + ' -y'
self.repo_file_path = '/etc/apt/sources.list.d/autotest'
self.apt_version = utils.system_output('apt-get -v | head -1',
ignore_status=True)
logging.debug('Apt backend initialized')
logging.debug('apt version: %s' % self.apt_version)
def install(self, name):
"""
Installs package [name].
@param name: Package name.
"""
command = 'install'
i_cmd = self.base_command + ' ' + command + ' ' + name
try:
utils.system(i_cmd)
return True
except Exception:
return False
def remove(self, name):
"""
Remove package [name].
@param name: Package name.
"""
command = 'remove'
flag = '--purge'
r_cmd = self.base_command + ' ' + command + ' ' + flag + ' ' + name
try:
utils.system(r_cmd)
return True
except Exception:
return False
def add_repo(self, repo):
"""
Add an apt repository.
@param repo: Repository string. Example:
'deb http://archive.ubuntu.com/ubuntu/ maverick universe'
"""
repo_file = open(self.repo_file_path, 'a')
repo_file_contents = repo_file.read()
if repo not in repo_file_contents:
repo_file.write(repo)
def remove_repo(self, repo):
"""
Remove an apt repository.
@param repo: Repository string. Example:
'deb http://archive.ubuntu.com/ubuntu/ maverick universe'
"""
repo_file = open(self.repo_file_path, 'r')
new_file_contents = []
for line in repo_file.readlines:
if not line == repo:
new_file_contents.append(line)
repo_file.close()
new_file_contents = "\n".join(new_file_contents)
repo_file.open(self.repo_file_path, 'w')
repo_file.write(new_file_contents)
repo_file.close()
def upgrade(self):
"""
Upgrade all packages of the system with eventual new versions.
"""
ud_command = 'update'
ud_cmd = self.base_command + ' ' + ud_command
try:
utils.system(ud_cmd)
except Exception:
logging.error("Apt package update failed")
up_command = 'upgrade'
up_cmd = self.base_command + ' ' + up_command
try:
utils.system(up_cmd)
return True
except Exception:
return False
def provides(self, file):
"""
Return a list of packages that provide [file].
@param file: File path.
"""
if not self.check_installed('apt-file'):
self.install('apt-file')
command = os_dep.command('apt-file')
cache_update_cmd = command + ' update'
try:
utils.system(cache_update_cmd, ignore_status=True)
except Exception:
logging.error("Apt file cache update failed")
fu_cmd = command + ' search ' + file
try:
provides = utils.system_output(fu_cmd).split('\n')
list_provides = []
for line in provides:
if line:
try:
line = line.split(':')
package = line[0].strip()
path = line[1].strip()
if path == file and package not in list_provides:
list_provides.append(package)
except IndexError:
pass
if len(list_provides) > 1:
logging.warning('More than one package found, '
'opting by the first queue result')
if list_provides:
logging.info("Package %s provides %s", list_provides[0], file)
return list_provides[0]
return None
except Exception:
return None
if __name__ == '__main__':
parser = optparse.OptionParser(
"usage: %prog [install|remove|list-all|list-files|add-repo|remove-repo|"
"upgrade|what-provides|install-what-provides] arguments")
parser.add_option('--verbose', dest="debug", action='store_true',
help='include debug messages in console output')
options, args = parser.parse_args()
debug = options.debug
logging_manager.configure_logging(SoftwareManagerLoggingConfig(),
verbose=debug)
software_manager = SoftwareManager()
if args:
action = args[0]
args = " ".join(args[1:])
else:
action = 'show-help'
if action == 'install':
software_manager.install(args)
elif action == 'remove':
software_manager.remove(args)
if action == 'list-all':
software_manager.list_all()
elif action == 'list-files':
software_manager.list_files(args)
elif action == 'add-repo':
software_manager.add_repo(args)
elif action == 'remove-repo':
software_manager.remove_repo(args)
elif action == 'upgrade':
software_manager.upgrade()
elif action == 'what-provides':
software_manager.provides(args)
elif action == 'install-what-provides':
software_manager.install_what_provides(args)
elif action == 'show-help':
parser.print_help()
| gpl-2.0 | -5,629,466,862,281,476,000 | 29.254109 | 83 | 0.557018 | false |
espdev/readthedocs.org | readthedocs/projects/signals.py | 1 | 1499 | """Project signals"""
import logging
import django.dispatch
from django.contrib import messages
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy as _
from readthedocs.oauth.services import registry
before_vcs = django.dispatch.Signal(providing_args=["version"])
after_vcs = django.dispatch.Signal(providing_args=["version"])
before_build = django.dispatch.Signal(providing_args=["version"])
after_build = django.dispatch.Signal(providing_args=["version"])
project_import = django.dispatch.Signal(providing_args=["project"])
log = logging.getLogger(__name__)
@receiver(project_import)
def handle_project_import(sender, **kwargs):
"""Add post-commit hook on project import"""
project = sender
request = kwargs.get('request')
_set = False
_service = None
for service_cls in registry:
if service_cls.is_project_service(project):
for service in service_cls.for_user(request.user):
_service = service
if service.setup_webhook(project):
messages.success(request, _('Webhook activated'))
_set = True
else:
messages.error(request, _('Webhook configuration failed'))
if not _set and _service:
messages.error(
request,
_('No accounts available to set webhook on. '
'Please connect your %s account.' % _service.get_adapter()().get_provider().name)
)
| mit | 8,835,816,446,431,787,000 | 30.893617 | 95 | 0.651768 | false |
apple/swift-lldb | packages/Python/lldbsuite/test/lang/swift/expression/access_control/TestExpressionAccessControl.py | 1 | 1897 | # TestExpressionAccessControl.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Make sure expressions ignore access control
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftExpressionAccessControl(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
def check_expression(self, expression, expected_result, use_summary=True):
value = self.frame().EvaluateExpression(expression)
self.assertTrue(value.IsValid(), expression + "returned a valid value")
if self.TraceOn():
print(value.GetSummary())
print(value.GetValue())
if use_summary:
answer = value.GetSummary()
else:
answer = value.GetValue()
report_str = "%s expected: %s got: %s" % (
expression, expected_result, answer)
self.assertTrue(answer == expected_result, report_str)
@swiftTest
def test_swift_expression_access_control(self):
"""Make sure expressions ignore access control"""
self.build()
lldbutil.run_to_source_breakpoint(
self, 'Set breakpoint here', lldb.SBFileSpec('main.swift'))
self.check_expression("foo.m_a", "3", use_summary=False)
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
| apache-2.0 | -7,535,670,957,241,162,000 | 32.875 | 80 | 0.650501 | false |
alexis-roche/niseg | examples/partial_volume_estimation.py | 1 | 1753 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Script example of partial volume estimation
"""
from argparse import ArgumentParser
import numpy as np
import nibabel as nb
from niseg import BrainT1PVE
# Parse command line
description = 'Estimate brain tissue concentrations of CSF, GM and WM from a skull \
stripped T1 image in CSF, GM and WM. If no mask image is provided, the mask is defined \
by thresholding the input image above zero (strictly).'
parser = ArgumentParser(description=description)
parser.add_argument('img', metavar='img', nargs='+', help='input image')
parser.add_argument('--mask', dest='mask', help='mask image')
parser.add_argument('--niters', dest='niters',
help='number of iterations (default=%d)' % 25)
parser.add_argument('--beta', dest='beta',
help='Spatial smoothness beta parameter (default=%f)' % 0.5)
parser.add_argument('--ngb_size', dest='ngb_size',
help='Grid neighborhood system (default=%d)' % 6)
args = parser.parse_args()
def get_argument(dest, default):
val = args.__getattribute__(dest)
if val == None:
return default
else:
return val
# Input image
img = nb.load(args.img[0])
# Input mask image
mask_img = get_argument('mask', None)
if mask_img == None:
mask_img = img
else:
mask_img = nb.load(mask_img)
mask = mask_img.get_data() > 0
# Other optional arguments
niters = get_argument('niters', 25)
beta = get_argument('beta', None)
ngb_size = get_argument('ngb_size', 6)
# Perform tissue classification
PV = BrainT1PVE(img, mask=mask, beta=beta, ngb_size=ngb_size)
PV.run(niters=niters, print_parameters=True)
# Save tissue concentration maps
PV.save('temp')
| bsd-3-clause | 9,000,726,666,906,467,000 | 28.711864 | 88 | 0.697661 | false |
leigh2/gchq_puzzle | puzzle.py | 1 | 3170 | #!/usr/bin/env python
import numpy as np
cols = {0: [7,2,1,1,7],
1: [1,1,2,2,1,1],
2: [1,3,1,3,1,3,1,3,1],
3: [1,3,1,1,5,1,3,1],
4: [1,3,1,1,4,1,3,1],
5: [1,1,1,2,1,1],
6: [7,1,1,1,1,1,7],
7: [1,1,3],
8: [2,1,2,1,8,2,1],
9: [2,2,1,2,1,1,1,2],
10: [1,7,3,2,1],
11: [1,2,3,1,1,1,1,1],
12: [4,1,1,2,6],
13: [3,3,1,1,1,3,1],
14: [1,2,5,2,2],
15: [2,2,1,1,1,1,1,2,1],
16: [1,3,3,2,1,8,1],
17: [6,2,1],
18: [7,1,4,1,1,3],
19: [1,1,1,1,4],
20: [1,3,1,3,7,1],
21: [1,3,1,1,1,2,1,1,4],
22: [1,3,1,4,3,3],
23: [1,1,2,2,2,6,1],
24: [7,1,3,2,1,1]}
rows = {0: [7,3,1,1,7],
1: [1,1,2,2,1,1],
2: [1,3,1,3,1,1,3,1],
3: [1,3,1,1,6,1,3,1],
4: [1,3,1,5,2,1,3,1],
5: [1,1,2,1,1],
6: [7,1,1,1,1,1,7],
7: [3,3],
8: [1,2,3,1,1,3,1,1,2],
9: [1,1,3,2,1,1],
10: [4,1,4,2,1,2],
11: [1,1,1,1,1,4,1,3],
12: [2,1,1,1,2,5],
13: [3,2,2,6,3,1],
14: [1,9,1,1,2,1],
15: [2,1,2,2,3,1],
16: [3,1,1,1,1,5,1],
17: [1,2,2,5],
18: [7,1,2,1,1,1,3],
19: [1,1,2,1,2,2,1],
20: [1,3,1,4,5,1],
21: [1,3,1,3,10,2],
22: [1,3,1,1,6,6],
23: [1,1,2,1,1,2],
24: [7,2,1,2,5]}
grid = np.array([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,1,1,0,0,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,1,1,0,0,0,0,1,1,0,0,0,0,1,0,0,0,0,1,1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype=np.int)
length = grid.shape[0]
| gpl-2.0 | -3,621,785,363,306,098,700 | 36.738095 | 84 | 0.346372 | false |
ShrimpingIt/tableaux | depictions/umbrella/main.py | 1 | 2106 | from os import urandom
from time import sleep
from neopixel import NeoPixel
from machine import Pin
from math import floor
dataPin = Pin(13)
ledCount = 27
np = NeoPixel(dataPin, ledCount)
def blank():
for pos in range(ledCount):
np[pos]=(0,0,0)
np.write();
def visitAll():
for pos in range(ledCount):
blank()
np[pos]=(0,0,255)
np.write()
print(pos)
input('...')
def log2_approx(val):
val = floor(val)
approx = 0
while val != 0:
val &= ~ (1<<approx)
approx = approx + 1
return approx
def rand_int(bound):
byteCount = (log2_approx(bound) // 8) + 1 # each byte is 8 powers of two
val = 0
for idx, entry in enumerate(bytearray(urandom(byteCount))):
val |= entry << (idx * 8)
return val % bound
blue = (0,0,255)
sequences = [
[0],
[17,1],
[18,2,16],
[19,3,15],
[20,4,14,12],
[21,5,13,11],
[23,6,10],
[24,7,9],
[25,8],
[26],
]
positions = [-1 for entry in sequences]
under = [
3, 15,
4, 14, 12,
5, 13, 11,
6, 10
]
#sheltered = under
sheltered = []
d0 = Pin(16, Pin.OUT)
d1 = Pin(5, Pin.OUT)
d0.high()
d1.high()
'''
d1 = Pin(5, Pin.OUT)
pwm1 = PWM(d1)
pwm1.freq(1000)
pwm1.duty(256)
ramp = range(0,1024, 8)
while True:
for duty in ramp:
pwm1.duty(duty)
sleep(0.05)
for duty in reversed(ramp):
pwm1.duty(duty)
sleep(0.05)
'''
def run():
while True:
blank()
for index, sequence in enumerate(sequences):
# retrieve activity for this drop
position = positions[index]
if position == -1:
# inactive drops sometimes become active (at 0)
if rand_int(2) == 0:
position = 0
else:
position = position + 1 # previously active drops fall one more step
if position == len(sequence): # drops falling off the bottom become inactive
position = -1
elif sequence[position] in sheltered: # drops going into sheltered area become inactive
position = -1
# light any active lights
if position != -1:
pixel = sequence[position]
np[pixel] = blue
# store activity for this drop for next time round loop
positions[index] = position
np.write()
sleep(0.05)
run()
| agpl-3.0 | 2,675,218,914,456,920,600 | 16.697479 | 91 | 0.625356 | false |
fullphat/redsquare | support/blink1.py | 1 | 8924 | #!/usr/bin/env python
import sys
import time
import re
import sys
import uuid
debugimport=False
use_pyusb=False
try:
print "[blink1]: trying blink1_pyusb..."
from blink1_pyusb import Blink1 as Blink1_pyusb
print "[blink1]: using blink1_pyusb"
use_pyusb = True
#sys.modules['Blink1'] = blink1_pyusb
except ImportError:
try:
print "[blink1]: couldn't load blink1_pyusb, trying blink1_ctypes..."
from blink1_ctypes import Blink1 as Blink1_ctypes
#sys.modules['Blink1'] = blink1_ctypes
print "[blink1]: using blink1_ctypes"
except ImportError:
print "[blink1]: Failed to load blink1_pyusb or blink1_ctypes"
print "[blink1]: Try installing pyusb using 'sudo pip install pyusb'"
sys.exit(1)
hostid = uuid.uuid4().hex[:8]
class Blink1:
'''
Object wrapper class.
This a wrapper for objects. It is initialiesed with the object to wrap
and then proxies the unhandled getattribute methods to it.
Other classes are to inherit from it.
'''
def __init__(self, unit=0):
'''
Wrapper constructor.
'''
# wrap the object
if use_pyusb :
blink1 = Blink1_pyusb(unit)
else :
blink1 = Blink1_ctypes()
self._wrapped_obj = blink1
def __getattr__(self, attr):
# see if this object has attr
# NOTE do not use hasattr, it goes into
# infinite recurrsion
if attr in self.__dict__:
# this object has it
return getattr(self, attr)
# proxy to the wrapped object
try :
return getattr(self._wrapped_obj, attr)
except Exception:
print "****** error!"
return None
# FIXME: can't overload methods?
# def fade_to_rgb(self, millis, colorstr):
# rgb = Blink1.parse_color_string(colorstr)
# self._wrapped_obj.fade_to_rgb(millis, rgb[0], rgb[1], rgb[2])
def get_hostid(self): # FIXME
return hostid
def get_blink1id(self):
return self.get_hostid() + self.get_serialnumber()
@classmethod
def parse_color_string(cls,rgbstr):
'''
'''
rgbstr = rgbstr.lower()
rgb = None
# match hex color code "#FFcc00"
m = re.search(r"#([0-9a-f]{6})", rgbstr)
if m:
rgb = tuple(ord(c) for c in m.group(1).decode('hex'))
else:
# match color triplets like "255,0,0" and "0xff,0xcc,0x33"
m = re.search(r"(0x[\da-f]+|\d+),(0x[\da-f]+|\d+),(0x[\da-f]+|\d+)",
rgbstr)
if m:
rgb = tuple(int(c,0) for c in m.groups())
return rgb
class Blink1Pattern:
def __init__(self):
self.repeats = 0
self.colors = []
self.times = []
def __repr__(self):
return "{ 'repeats': "+ repr(self.repeats) +", 'colors': "+repr(self.colors)+", 'times':"+repr(self.times) +" }"
def __str__(self):
#strtimes = "".join( str(n) for n in self.times )
return "{ 'repeats': "+ str(self.repeats) +", 'colors': "+str(self.colors)+", 'times': [" + ", ".join( str(n) for n in self.times ) +"] }"
@classmethod
def parse_pattern_string(cls, pattstr):
'''
parse color patterns in the format: '3, #ff00ff, 1.5 ,#000000, 1.0'
'''
print "parse_pattern_string:"+pattstr
vals = pattstr.split(',')
if( len(vals) % 2 == 0 ) : # even is bad, must be odd
print "bad patternstr: "+pattstr
else:
patt = Blink1Pattern()
patt.repeats = int(vals[0]) #
# every other element from pos 2
# every other element from pos 2
patt.colors = map( Blink1.parse_color_string, vals[1::2])
patt.times = [float(m) for m in vals[2::2]]
return patt
def demo(blink1):
'''
'''
print "blink1 version: "+ blink1.get_version()
democolors = [ (255, 0, 0), # red
( 0,255, 0), # grn
( 0, 0,255), # blu
(255,255, 0), # yellow
( 0,255,255), # cyan
(255, 0,255), # magenta
( 0, 0, 0), # off
]
demo_millis = 200
for rgb in democolors:
(r,g,b) = rgb
print "fading to %3i,%3i,%3i" % (r,g,b)
blink1.fade_to_rgbn( demo_millis/2, r,g,b, 0 )
time.sleep( demo_millis/1000.0 )
blink1.fade_to_rgbn( demo_millis/2, 0,0,0, 0 )
time.sleep( demo_millis/1000.0 )
def main():
'''
'''
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--demo',
action='store_const', dest='cmd',const='demo',
help='run simple demo')
parser.add_option('--version',
action='store_const', dest='cmd',const='version',
help='return firmware version')
parser.add_option('--hostid',
action='store_const', dest='cmd',const='hostid',
help='return hostid')
parser.add_option('--blink',
dest='blink',default=0, type='int',
help='blink specified number of times')
parser.add_option('--play',
dest='play',default=0, type='string',
help='play built-in light sequence')
parser.add_option('--patt',
dest='patt',default=0, type='string',
help='play specified color pattern')
parser.add_option('--rgb', default='',
dest='rgb',
help="the RGB color to use")
parser.add_option('-l', '--led', default=0, type='int',
dest='ledn',
help="which LED to use (default=both)")
parser.add_option('-m', '--millis', default=300, type='int',
dest='fade_millis',
help="fade millis for color commands")
parser.add_option('-t', '--delay', default=500, type='int',
dest='delay_millis',
help="millis between commands like blink, random, etc.")
parser.add_option('--debug',action="store_true", dest='debug' )
parser.add_option('--on', action="store_const",dest='rgb',const="#FFFFFF")
parser.add_option('--off', action="store_const",dest='rgb',const="#000000")
parser.add_option('--red', action="store_const",dest='rgb',const="#FF0000")
parser.add_option('--green',action="store_const",dest='rgb',const="#00FF00")
parser.add_option('--blue', action="store_const",dest='rgb',const="#0000FF")
(options, args) = parser.parse_args()
rgbstr = options.rgb
fade_millis = options.fade_millis
ledn = options.ledn
rgb = Blink1.parse_color_string( rgbstr )
debug_rw = options.debug
#print "rgbval:%s millis:%i ledn:%i " % (repr(rgb),fade_millis,ledn)
#
blink1 = Blink1()
if blink1.dev == None :
print("no blink1 found")
# blink command (takes an argument of number of blinks)
if options.blink :
if not rgb : rgb = (255,255,255)
for i in range(0,options.blink):
blink1.fade_to_rgbn( fade_millis, rgb[0],rgb[1],rgb[2], ledn)
time.sleep( options.delay_millis / 1000.0 )
blink1.fade_to_rgbn( fade_millis, 0,0,0, ledn)
time.sleep( options.delay_millis / 1000.0 )
elif options.play :
play = map(int, options.play.split(',')) # convert str list to int list
#print "play: "+repr(options.play) + ','+repr(play)
play.extend( [0] * (4 - len(play)) ) # make list fixed size, seems dumb
blink1.playloop( play[0], play[1], play[2], play[3] )
elif options.patt :
blink1patt = Blink1Pattern.parse_pattern_string(options.patt)
print "playing pattern: "+ str(blink1patt)
for i in range(blink1patt.repeats):
for j in range(len(blink1patt.colors)):
color = blink1patt.colors[j]
millis = int( blink1patt.times[j] * 1000 )
print "color: "+str(color) +", millis: "+ str(millis)
blink1.fade_to_rgb( millis/2, color[0], color[1], color[2])
time.sleep( millis / 1000.0 )
elif options.cmd == 'version':
print "version: "+ blink1.get_version()
elif options.cmd == 'hostid':
print "hostid: "+ blink1.get_hostid()
elif options.cmd == 'demo' :
demo(blink1)
elif options.cmd == None and rgb :
print "fading to #%02x%02x%02x" % (rgb) + " in %d msec" % fade_millis
blink1.fade_to_rgbn( fade_millis, rgb[0],rgb[1],rgb[2], ledn)
else:
parser.print_help()
if __name__ == "__main__":
sys.exit(main())
| mit | -756,769,589,801,679,400 | 32.051852 | 146 | 0.532721 | false |
AddonScriptorDE/plugin.video.trailerseite_de | default.py | 1 | 11540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import socket
import re
import sys
import xbmcplugin
import xbmcaddon
import xbmcgui
socket.setdefaulttimeout(30)
pluginhandle = int(sys.argv[1])
addonId = 'plugin.video.trailerseite_de'
addon = xbmcaddon.Addon(id=addonId)
translation = addon.getLocalizedString
xbox = xbmc.getCondVisibility("System.Platform.xbox")
maxVideoQuality = str(addon.getSetting("maxVideoQuality"))
showAllTrailers = addon.getSetting("showAllTrailers") == "true"
forceViewMode = addon.getSetting("forceViewMode") == "true"
viewMode = str(addon.getSetting("viewMode"))
baseUrl = "http://www.trailerseite.de"
def index():
addDir(translation(30001), baseUrl+"/kino/neustarts-film-trailer.html", 'listMoviesMain', "")
addDir(translation(30002), baseUrl+"/kino/film-trailer-vorschau.html", 'listMoviesMain', "")
addDir(translation(30003), baseUrl+"/kino/highlights-film-trailer.html", 'listMoviesMain', "")
addDir(translation(30004), baseUrl+"/kino/arthouse-film-trailer.html", 'listMoviesMain', "")
addDir(translation(30005), baseUrl+"/kino/charts/deutsche-kino-top-10.html", 'listMoviesMain', "")
addDir(translation(30006), baseUrl+"/kino/charts/us-kino-top-10.html", 'listMoviesMain', "")
addDir(translation(30007), baseUrl+"/kino/charts/arthouse-kino-top-10.html", 'listMoviesMain', "")
addDir(translation(30015), "http://feeds.feedburner.com/updates?format=xml", 'listLastTrailer', "")
addDir(translation(30016), "http://feeds.feedburner.com/updates?format=xml", 'listLastVideos', "")
addDir(translation(30014), baseUrl+"/kino/starttermine-kinofilme-24075.html", 'listMoviesDate', "")
addDir(translation(30008), baseUrl+"/kino/film-trailer-a-z.html", 'listMoviesAZ', "")
addDir(translation(30009), baseUrl+"/trailer-dvd/neustarts/", 'listMoviesMain', "")
addDir(translation(30010), baseUrl+"/trailer-dvd/dvd-vorschau.html", 'listMoviesMain', "")
addDir(translation(30011), baseUrl+"/trailer-dvd/dvd-top-10.html", 'listMoviesMain', "")
addDir(translation(30012), baseUrl+"/filmkritiken/16007-film-specials.html", 'listMoviesMain', "")
addDir("Der ehrliche Dennis", baseUrl+"/der-ehrliche-dennis/index.html", 'listMoviesMain', "")
xbmcplugin.endOfDirectory(pluginhandle)
def listMoviesMain(url):
content = getUrl(url)
spl = content.split('<div class="expoteaser">')
listMovies(url, spl)
spl = content.split('<div class="teasermultiple">')
listMovies(url, spl)
spl = content.split('<div class="rightteaser">')
listMovies(url, spl)
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listMovies(mainUrl, spl):
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('href="(.+?)"', re.DOTALL).findall(entry)
url = baseUrl+match[0]
match = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
title = cleanTitle(title)
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumb = baseUrl+"/"+match[0]
thumbNew = thumb.replace("-expo.jpg", ".jpg").replace("-right.jpg", ".jpg").replace(".jpg", "-right.jpg")
req = urllib2.Request(thumbNew)
try:
urllib2.urlopen(req)
thumb = thumbNew
except:
thumbNew = thumb.replace("-expo.jpg", ".jpg").replace("-right.jpg", ".jpg").replace(".jpg", "-expo.jpg")
req = urllib2.Request(thumbNew)
try:
urllib2.urlopen(req)
thumb = thumbNew
except:
pass
if showAllTrailers and mainUrl not in [baseUrl+"/der-ehrliche-dennis/index.html", baseUrl+"/filmkritiken/16007-film-specials.html"]:
addDir(title, url, 'listTrailers', thumb)
else:
addLink(title, url, 'playVideo', thumb, "")
def listTrailers(url, name, thumb):
content = getUrl(url)
spl = content.split('<div class="extraplayer">')
addLink(name+" Trailer", url, 'playVideo', thumb, "")
for i in range(1, len(spl), 1):
entry = spl[i]
if 'class="aFLVPlayer"' not in entry:
entry = entry[entry.find("<a href=")+1:]
match = re.compile('<a href="(.+?)">(.+?)</a>', re.DOTALL).findall(entry)
url = match[0][0]
title = match[0][1]
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumb = match[0]
addLink(title, url, 'playVideo', thumb, "")
xbmcplugin.endOfDirectory(pluginhandle)
if forceViewMode:
xbmc.executebuiltin('Container.SetViewMode('+viewMode+')')
def listLastTrailer(url):
content = getUrl(url)
spl = content.split('<item>')
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('<link>(.+?)</link>', re.DOTALL).findall(entry)
url = match[0]
match = re.compile('<title>(.+?)</title>', re.DOTALL).findall(entry)
title = match[0]
match = re.compile('<date>(.+?)-(.+?)-(.+?) ', re.DOTALL).findall(entry)
month = match[0][1]
day = match[0][2]
title = day+"."+month+" - "+title
if '/film/' in url and "Trailer" in title:
addLink(title, url, 'playVideo', "", "")
xbmcplugin.endOfDirectory(pluginhandle)
def listLastVideos(url):
content = getUrl(url)
spl = content.split('<item>')
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('<link>(.+?)</link>', re.DOTALL).findall(entry)
url = match[0]
match = re.compile('<title>(.+?)</title>', re.DOTALL).findall(entry)
title = match[0]
match = re.compile('<date>(.+?)-(.+?)-(.+?) ', re.DOTALL).findall(entry)
month = match[0][1]
day = match[0][2]
title = day+"."+month+" - "+title
if '/film/' in url and "Trailer" not in title:
addLink(title, url, 'playVideo', "", "")
xbmcplugin.endOfDirectory(pluginhandle)
def listMoviesAZ(url):
xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
content = getUrl(url)
content = content[content.find('<div class="abhaken">'):]
content = content[:content.find('</table>'):]
match = re.compile('<a href="(.+?)" title=".+?" >(.+?)</a>', re.DOTALL).findall(content)
for url, title in match:
match2 = re.compile('<a href=".+?" title="(.+?)"', re.DOTALL).findall(title)
if match2:
title = cleanTitle(match2[0][0])
else:
title = cleanTitle(title)
if showAllTrailers:
addDir(title, baseUrl+url, 'listTrailers', "")
else:
addLink(title, baseUrl+url, 'playVideo', "", "")
xbmcplugin.endOfDirectory(pluginhandle)
def listMoviesDate(url):
content = getUrl(url)
spl = content.split('<div class="textbox-white">')
for i in range(1, len(spl), 1):
entry = spl[i].replace('">', '" title="TEST" >')
entry = entry[:entry.find("</tr>")]
match = re.compile('<h3>Ab (.+?).20', re.DOTALL).findall(entry)
date = match[0]
match = re.compile('<a href="(.+?)" title=".+?" >(.+?)</a>', re.DOTALL).findall(entry)
for url, title in match:
title = date+" - "+cleanTitle(title)
if showAllTrailers:
addDir(title, baseUrl+url, 'listTrailers', "")
else:
addLink(title, baseUrl+url, 'playVideo', "", "")
xbmcplugin.endOfDirectory(pluginhandle)
def playVideo(url):
content = getUrl(url)
matchDM = re.compile('src="http://www.dailymotion.com/embed/video/(.+?)\\?', re.DOTALL).findall(content)
content = content[content.find('<div class="flashplayer">'):]
matchSD = re.compile('href="(.+?)"', re.DOTALL).findall(content)
matchHD = re.compile('<a class="aFLVPlayer" href="(.+?)"></a>', re.DOTALL).findall(content)
streamUrl = ""
if matchHD and maxVideoQuality == "1":
streamUrl = matchHD[0]
elif matchSD:
streamUrl = matchSD[0]
elif matchDM:
streamUrl = getDailyMotionUrl(matchDM[0])
listitem = xbmcgui.ListItem(path=streamUrl)
xbmcplugin.setResolvedUrl(pluginhandle, True, listitem)
def queueVideo(url, name):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
listitem = xbmcgui.ListItem(name)
playlist.add(url, listitem)
def getDailyMotionUrl(id):
if xbox:
url = "plugin://video/DailyMotion.com/?url="+id+"&mode=playVideo"
else:
url = "plugin://plugin.video.dailymotion_com/?url="+id+"&mode=playVideo"
return url
def cleanTitle(title):
title = title.replace("<", "<").replace(">", ">").replace("&", "&").replace("&", "&").replace("'", "'")
title = title.replace("'", "'").replace("–", "-").replace("“", "-").replace("”", "-").replace("’", "'")
title = title.replace(""", "\"").replace("ü", "ü").replace("ä", "ä").replace("ö", "ö")
title = title.replace("Trailer", "").strip()
return title
def getUrl(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:19.0) Gecko/20100101 Firefox/19.0')
response = urllib2.urlopen(req)
link = response.read()
response.close()
return link
def parameters_string_to_dict(parameters):
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
def addLink(name, url, mode, iconimage, desc):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+urllib.quote_plus(mode)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name, "Plot": desc})
liz.setProperty('IsPlayable', 'true')
liz.addContextMenuItems([(translation(30013), 'RunPlugin(plugin://'+addonId+'/?mode=queueVideo&url='+urllib.quote_plus(u)+'&name='+urllib.quote_plus(name)+')',)])
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz)
return ok
def addDir(name, url, mode, iconimage):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+urllib.quote_plus(mode)+"&name="+urllib.quote_plus(name)+"&thumb="+urllib.quote_plus(iconimage)
ok = True
liz = xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo(type="Video", infoLabels={"Title": name})
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
params = parameters_string_to_dict(sys.argv[2])
mode = urllib.unquote_plus(params.get('mode', ''))
url = urllib.unquote_plus(params.get('url', ''))
name = urllib.unquote_plus(params.get('name', ''))
thumb = urllib.unquote_plus(params.get('thumb', ''))
if mode == 'listMoviesMain':
listMoviesMain(url)
elif mode == 'listLastTrailer':
listLastTrailer(url)
elif mode == 'listLastVideos':
listLastVideos(url)
elif mode == 'listVideosCharts':
listVideosCharts(url)
elif mode == 'listMoviesAZ':
listMoviesAZ(url)
elif mode == 'listMoviesDate':
listMoviesDate(url)
elif mode == 'listTrailers':
listTrailers(url, name, thumb)
elif mode == 'playVideo':
playVideo(url)
elif mode == 'queueVideo':
queueVideo(url, name)
else:
index()
| gpl-2.0 | -1,058,085,944,360,981,600 | 39.911348 | 166 | 0.623039 | false |
bmmalone/as-auto-sklearn | as_asl/train_oasc_models.py | 1 | 2559 | #! /usr/bin/env python3
import argparse
import misc.automl_utils as automl_utils
import misc.parallel as parallel
import as_asl.as_asl_command_line_utils as clu
import as_asl.as_asl_filenames as filenames
import as_asl.as_asl_utils as as_asl_utils
from as_asl.as_asl_ensemble import ASaslPipeline
import logging
import misc.logging_utils as logging_utils
logger = logging.getLogger(__name__)
def _log_info(msg, scenario_name, fold):
msg = "[{}, fold {}]: {}".format(scenario_name, fold, msg)
logger.info(msg)
def _outer_cv(fold, args, config):
msg = "loading the scenario"
_log_info(msg, args.scenario, fold)
scenario_name, scenario = automl_utils.load_scenario(args.scenario)
msg = "extracting fold training data"
_log_info(msg, scenario_name, fold)
testing, training = scenario.get_split(fold)
msg = "constructing and fitting the pipeline"
_log_info(msg, scenario_name, fold)
pipeline = ASaslPipeline(args)
pipeline_fit = pipeline.fit(scenario=training)
msg = "writing pipeline to disk"
_log_info(msg, scenario_name, fold)
model_type = scenario.scenario
model_filename = filenames.get_model_filename(
config['base_path'],
model_type,
fold=fold,
note=config.get('note')
)
pipeline_fit.dump(model_filename)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Run the Bayesian optimization-based approach for "
"training models for algorithm selection.")
clu.add_config(parser)
clu.add_scenario(parser)
clu.add_simple_presolver_options(parser)
clu.add_num_cpus(parser)
clu.add_cv_options(parser)
automl_utils.add_automl_options(parser, default_total_training_time=20)
automl_utils.add_blas_options(parser)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
# see which folds to run
if len(args.folds) == 0:
args.folds = [f for f in range(args.num_folds)]
clu.validate_folds_options(args)
required_keys = ['base_path']
config = as_asl_utils.load_config(args.config, required_keys)
# check if we need to spawn a new process for blas
if automl_utils.spawn_for_blas(args):
return
pipeline = parallel.apply_parallel_iter(
args.folds,
args.num_cpus,
_outer_cv,
args,
config,
progress_bar=True
)
if __name__ == '__main__':
main()
| mit | -4,272,369,325,987,425,300 | 27.433333 | 92 | 0.66823 | false |
rndusr/stig | stig/helpmgr.py | 1 | 22776 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
# http://www.gnu.org/licenses/gpl-3.0.txt
"""Anything related to the help system that is common between interfaces"""
import re
import string
from collections import abc
from . import __appname__, __version__, objects
from .cliopts import DESCRIPTIONS as CLI_DESCRIPTIONS
from .utils import expandtabs
from .utils.string import striplines
from .utils.usertypes import Float, Int
from .logging import make_logger # isort:skip
log = make_logger(__name__)
class ForgivingFormatter(string.Formatter):
def get_value(self, key, args, kwargs):
if isinstance(key, str):
try:
return kwargs[key]
except KeyError:
return '{%s}' % key
else:
return super().get_value(key, args, kwargs)
def __call__(self, lines):
return tuple(
striplines(self.format(line, __appname__=__appname__)
for line in expandtabs.expand(lines, indent=2))
)
finalize_lines = ForgivingFormatter()
class HelpManager():
"""Provide help texts for CommandManager, Settings and KeyMap objects"""
MAIN_TOPICS = {
'commandsmanual' : 'Describes how to call and chain commands',
'commands' : 'Lists commands',
'filtersmanual' : 'Describes how to define and combine filters',
'filters' : 'Lists filters for torrents, files, etc',
'settingsmanual' : 'Describes how to change settings',
'settings' : 'Lists configuration settings',
'keybindings' : 'Lists TUI keybindings',
}
ALIASES = {
'cmds' : 'commands',
'cmdsman' : 'commandsmanual',
'filtersman' : 'filtersmanual',
'config' : 'settings',
'cfg' : 'settings',
'configman' : 'settingsmanual',
'cfgman' : 'settingsmanual',
'keymap' : 'keybindings',
'keys' : 'keybindings',
}
def find(self, topic=None):
"""Find help topic and return lines"""
if topic in self.ALIASES:
topic = self.ALIASES[topic]
if topic is None:
return self.topic_overview
if hasattr(self, 'topic_' + topic):
return getattr(self, 'topic_' + topic)
elif topic in objects.cmdmgr:
return self.command(topic)
elif topic in objects.cfg:
return self.setting(topic)
raise ValueError('Unknown help topic: %r' % topic)
@property
def topic_overview(self):
lines = [
'{} {}'.format(__appname__, __version__),
'',
'SYNTAX',
'\t{__appname__} [<OPTIONS>] [<COMMANDS>]',
'',
]
for section,opts in CLI_DESCRIPTIONS.items():
lines.append('%s' % section.upper())
for opts,desc in opts.items():
lines.append('\t%s \t%s' % (opts, desc))
lines.append('')
def topic_line(topic):
names = (topic,) + tuple(alias for alias,topic_ in self.ALIASES.items()
if topic_ == topic)
return '\t\t%s \t- \t%s' % (', '.join(names), self.MAIN_TOPICS[topic])
lines += ['HELP TOPICS',
('\tAll commands and settings are valid help topics. Read '
"them with '{__appname__} help <TOPIC>' or '{__appname__} -h <TOPIC>'. "
'Additionally, the following topics are available:')]
lines.extend(topic_line(topic) for topic in self.MAIN_TOPICS)
return finalize_lines(lines)
@property
def topic_settingsmanual(self):
lines = [
'SETTINGS',
("\tSettings can be changed with the commands 'set' and 'reset' "
"(see 'help set' and 'help reset')."),
'',
('\tLocal settings change the behaviour of {__appname__} while '
'remote settings change the behaviour of the connected daemon. '
'The names of remote settings start with "srv.".'),
'',
('\tChanges made to local settings are not permanent. All '
'values are set back to their defaults once {__appname__} is '
'restarted (see RC FILES).'),
'',
('\tChanges made to remote settings, on the other hand, are '
'permanent as the daemon has its own configuration system.'),
'',
('\tIn the TUI, the "set" command with no arguments (bound to <alt-s> '
'by default) lists all settings and lets you edit them with <enter>. '
'The "dump" command (bound to <alt-S> by default) makes your '
'current settings, keybindings and tabs permanent.'),
'',
'RC FILES',
('\tAn rc file contains a list of arbitrary commands. '
r'Commands can span multiple lines by escaping line breaks with "\". '
'Lines starting with "#" (optionally preceded by spaces) are ignored.'),
'',
('\tCommands in an rc file are called during startup before the '
'commands given on the command line.'),
'',
('\tThe default rc file path is "$XDG_CONFIG_HOME/{__appname__}/rc", '
'where $XDG_CONFIG_HOME defaults to "~/.config" if it is not set.'),
'',
('\tA different path can be provided with the "--rcfile" option. '
'An existing rc file at the default path can be ignored with the '
'"--norcfile" option.'),
'',
'\tTo permanently change the default config file, create an alias:',
'',
'\t\t$ alias {__appname__}="command {__appname__} --rcfile ~/.{__appname__}rc"',
'',
('\tTo load any additional rc files after the default one use the '
'"rc" command. (Note that this will prevent the TUI from being '
'loaded unless you provide the "--tui" option. See the GUESSING '
'THE USER INTERFACE section in the "commandsmanual" help for '
'more information).'),
'',
('\tTUI commands (e.g. "tab" or "bind") in an rc file are ignored '
'in CLI mode.'),
]
return finalize_lines(lines)
@property
def topic_settings(self):
"""Return help text for all settings"""
localcfg = objects.localcfg
remotecfg = objects.remotecfg
lines = []
lines.append('LOCAL SETTINGS')
for name in sorted(localcfg):
lines.append('\t' + name + ' \t' + localcfg.description(name))
lines += ['']
lines.append('REMOTE SETTINGS')
for name in sorted(remotecfg):
lines.append('\t' + name + ' \t' + remotecfg.description(name))
return finalize_lines(lines)
def setting(self, name):
"""Return help text for setting"""
cfg = objects.cfg
if name not in objects.cfg:
raise ValueError('Unknown help topic: %r' % name)
value = cfg[name]
def pretty_string(value):
if isinstance(value, str) and re.match(r'^\s+$', value):
return repr(value)
elif isinstance(value, (Float, Int)):
return value.with_unit
else:
return str(value)
lines = ['%s - \t%s' % (name, cfg.description(name)),
'\tValue: \t' + pretty_string(cfg[name]),
'\tDefault: \t' + pretty_string(cfg.default(name))]
if hasattr(value, 'options'):
opt_strs = []
for opt in sorted(value.options):
opt_strs.append(str(opt))
aliases = [alias for alias,option in value.aliases.items()
if option == opt]
if aliases:
opt_strs[-1] += ' (%s)' % (', '.join(aliases))
lines.append('\tOptions: \t' + ', '.join(opt_strs))
lines.append('\tSyntax: \t' + cfg.syntax(name))
return finalize_lines(lines)
@property
def topic_commandsmanual(self):
from .commands import (OPS_AND, OPS_OR, OPS_SEQ)
lines = [
'COMMANDS',
'\tCommands can be called:',
'\t\t- \tby providing them as command line arguments,',
"\t\t- \tvia the command line in the TUI (press ':' to open it),",
"\t\t- \tby binding them to keys (see 'help bind'),",
("\t\t- \tby listing them in an rc file (see 'help cfgman') "
"and loading it with the '--rcfile' option or the 'rc' command."),
'',
'CHAINING COMMANDS',
("\tCombining commands with operators makes it possible to run "
"a command based on the previous command's success."),
"",
"\tAvailable command operators are: ",
"\t\t%s \t- \tRun the next command if the previous command succeeded." % '/'.join(OPS_AND),
"\t\t%s \t- \tRun the next command if the previous command failed." % '/'.join(OPS_OR),
"\t\t%s \t- \tRun the next command in any case." % '/'.join(OPS_SEQ),
"",
"\tCommand operators must be enclosed by spaces.",
"",
("\tFor example, 'ls foo & ls bar' would list all 'foo' torrents and, "
"if any where found, continue to list all 'bar' torrents. "
"However, 'ls foo | ls bar' would list 'bar' torrents only if there "
"are no 'foo' torrents."),
'',
'GUESSING THE USER INTERFACE (CLI/TUI)',
("\tIf commands are given as command line arguments and neither "
"'--tui' nor '--notui' are provided, {__appname__} tries to guess "
"whether it makes sense to start the TUI or just run the commands "
"and exit. For example, if you run '{__appname__} stop foo', "
"it is reasonable to assume that you want to run 'stop foo' and "
"get your shell prompt back. But if you run "
"'{__appname__} set connect.host foo.bar', "
"you probably expect the TUI to pop up."),
'',
"\tThis is how this works basically:",
("\t\t- \tWithout CLI commands, the TUI is loaded and vice versa."),
("\t\t- \tCommands in the torrent category (see 'help commands') prevent the TUI."),
("\t\t- \tChanging TUI settings ('(re)set tui.*') enforces the TUI."),
("\t\t- \tChanging remote settings ('set srv.*') prevents the TUI."),
("\t\t- \tCommands that are exclusive to TUI or CLI (e.g. 'tab') enforce their "
"interface. Providing both TUI- and CLI-only commands produces an error. "
"Provide --tui or --notui in that case."),
]
return finalize_lines(lines)
@property
def topic_commands(self):
"""Must be set to a CommandManager object; provides a help text"""
lines = []
for category in objects.cmdmgr.categories:
lines.append('{} COMMANDS'.format(category.upper()))
# To deduplicate commands with the same name that provide
# different interfaces (but should have the same docs), map
# command names to commands.
cmds = {}
for cmd in objects.cmdmgr.all_commands:
if category == cmd.category:
cmds[cmd.name] = cmd
for cmdname,cmd in sorted(cmds.items()):
lines.append('\t{} \t{}'.format(', '.join((cmd.name,) + cmd.aliases),
cmd.description))
lines.append('')
return finalize_lines(lines)
def command(self, name):
"""Return help text for command"""
cmd = objects.cmdmgr[name]
def takes_value(argspec):
if argspec.get('action') in ('store_true', 'store_false', 'store_const'):
return False # Boolean option
if 'nargs' not in argspec:
return True
nargs = argspec['nargs']
return not isinstance(nargs, int) or nargs > 0
def arg_dest(argspec):
if 'metavar' in argspec:
dest = argspec['metavar'].upper()
elif 'dest' in argspec:
dest = argspec['dest'].upper()
elif argspec['names'][0][0] == '-':
dest = argspec['names'][0].lstrip('-').upper()
else:
return None
if 'nargs' in argspec and argspec['nargs'] in ('*', '?'):
return '[<%s>]' % dest
else:
return '<%s>' % dest
lines = [cmd.name.upper()]
log.debug('Generating help text for %s', cmd.name)
names = ', '.join((cmd.name,) + cmd.aliases)
lines = [names + ' - \t' + cmd.description]
lines.append('')
if cmd.usage:
lines.append('USAGE')
lines += ['\t' + u for u in cmd.usage]
lines.append('')
if cmd.argspecs:
lines.append('ARGUMENTS')
lines_args = []
for argspec in cmd.argspecs:
if 'description' not in argspec:
# Argument has no description
continue
arglines = ['\t' + ', '.join(argspec['names'])]
if takes_value(argspec):
dest = arg_dest(argspec)
if dest is not None:
arglines[0] += ' %s' % dest
if isinstance(argspec['description'], str):
arglines[0] += ' \t' + argspec['description']
else: # Assume description is a sequence
arglines[0] += ' \t' + argspec['description'][0]
for paragraph in argspec['description'][1:]:
arglines.append('\t \t' + paragraph)
if 'document_default' not in argspec or argspec['document_default']:
# Argument takes a value that may default to another value
# if ommitted and we want to document that default value
def stringify_default(default):
dflt = default() if callable(default) else default
if not isinstance(dflt, str) and isinstance(dflt, abc.Sequence):
return ' '.join(dflt)
else:
return str(dflt)
if 'default_description' in argspec:
arglines.append('\t \tDefault: %s' % stringify_default(argspec['default_description']))
elif 'default' in argspec:
arglines.append('\t \tDefault: %s' % stringify_default(argspec['default']))
lines_args.extend(arglines)
lines += lines_args
lines.append('')
if cmd.examples:
lines.append('EXAMPLES')
lines += ['\t' + e for e in cmd.examples]
lines.append('')
for name,text in sorted(cmd.more_sections.items()):
lines.append(name.upper())
if callable(text):
text = text()
lines += ['\t' + line for line in text]
lines.append('')
return finalize_lines(lines)
@property
def topic_keybindings(self):
"""Must be set to a KeyMap object; provides a help text"""
from .tui.tuiobjects import keymap
def stringify(s):
return ' '.join(s) if not isinstance(s, str) else s
lines = []
for context in sorted(keymap.contexts, key=lambda c: '' if c is None else c):
if context is None:
lines.append('GENERAL KEYBINDINGS')
else:
lines.append('{} KEYBINDINGS'.format(context.upper()))
km = ((key, stringify(action)) for key,action in keymap.map(context))
# Sort by command
from natsort import natsorted, ns
for key,action in natsorted(km, key=lambda pair: pair[1], alg=ns.IGNORECASE):
if len(action) < 40:
lines.append('\t%s \t%s \t%s' % (key, action, keymap.get_description(key, context)))
else:
lines.append('\t%s \t%s' % (key, action))
lines.append('\t \t%s' % (keymap.get_description(key, context),))
lines.append('')
return finalize_lines(lines)
@property
def topic_filtersmanual(self):
lines = [
'FILTERING TORRENTS, FILES, PEERS, ETC',
('\tCommands that accept FILTER arguments are applied to items '
'that match these filters.'),
'',
'\tThere are two kinds of filters:',
'\t\t- \tBoolean filters stand on their own (e.g. "downloading")',
'\t\t- \tComparative filters need a value (e.g. "seeds>20")',
'',
'\tThe syntax of comparative filters is: [[<FILTER NAME>]<OPERATOR>]<VALUE>',
'',
('\tBesides the usual operators (=, !=, >, <, >=, <=), "~" matches if the '
'item\'s value contains the literal string VALUE and "=~" matches if the '
'item\'s value matches against the Perl-style regular expression VALUE.'),
'\tExample: "name~foo" matches all torrents with "foo" in their name.',
'',
('\tIf FILTER NAME is omitted, it defaults to a comparative filter that '
"makes sense, e.g. \"name\" for torrents (see 'help filters'). "
'If OPERATOR is omitted, it defaults to "~".'),
'\tExample: "foo" is the same as "~foo" is the same as "name~foo".',
'',
('\tSpaces at the start and the end of VALUE are always removed. '
'If the result starts and ends with matching single or double quotes, the '
'quotes are removed. Any other quotes are not interpreted, i.e. they must '
'not be escaped.'),
'\tExample: "name = foo " matches "foo"; "name = \' foo \' " matches " foo "',
'',
'\tAll filters can be inverted by prepending "!" to the filter name.',
('\tExample: "name!=foo" is the same as "!name=foo"; '
'"!name!=foo" is the same as "name=foo".'),
'',
('\tMatching strings is case-insensitive if VALUE does not contain any '
'upper-case characters, otherwise it is case-sensitive.'),
'',
('\tWhen matching numbers, the unit prefixes "k", "M", "G", "T" and '
'their binary counterparts "Ki", "Mi", "Gi", "Ti" are supported. '
'The case of unit prefixes is ignored.'),
('\tExample: "size>1mi" is the same as "size>1048576" (1 Mebibyte); '
'"size>1m" is the same as "size>1000000" (1 Megabyte)'),
'',
('\tFor time-based filters, VALUE is either an absolute time stamp '
'or a relative time delta based on the current time.'),
'',
('\tTime stamps support a date in the format [[YYYY-]MM-]DD or YYYY[-MM] '
'and a time in the format HH:MM[:SS]. Date and time can be combined by '
'separating them with a space.'),
'\tExamples: \t"added=2015-05" \tmatches torrents that were added in May 2015.',
('\t\t"completed>=01" \tmatches torrents that finished downloading earlier this month '
'("01" being the first day of the current month).'),
('\t\t"activity<10-17 18:45" \tmatches torrents that were last active before '
'18:45 (6:45 p.m.) on the 17th of October of this year.'),
'',
('\tTime deltas use the format [in |+|-]AMOUNT[s|m|h|d|w|M|y][ ago]. '
'The words "in" and "ago" are aliases for "+" and "-". Negative time '
'deltas match time stamps in the past and positive time deltas '
'match time stamps in the future. Filters have individual defaults for '
'the sign; e.g. "eta > 1h" is the same as "eta > in 1h" while '
'"completed > 1h" is the same as "completed > 1h ago".'),
'',
('\tFilters can be combined with the operators "&" (logical AND) '
'and "|" (logical OR). Multiple FILTER arguments are implicitly '
'combined with "|".'),
'\tExample: "name=foo paused" is the same as "name=foo|paused".',
'',
('\tOperators can be escaped with a preceding "\\" to remove their meaning.'),
'\tExample: "name=foo\\&bar" matches torrents with the name "foo&bar".',
]
return finalize_lines(lines)
@property
def topic_filters(self):
"""Provide help text for arguments to TorrentFilter"""
from .client import (TorrentFilter, FileFilter,
PeerFilter, TrackerFilter,
SettingFilter)
lines = []
for caption,filt in (('TORRENT FILTERS', TorrentFilter),
('FILE FILTERS', FileFilter),
('PEER FILTERS', PeerFilter),
('TRACKER FILTERS', TrackerFilter),
('SETTING FILTERS', SettingFilter)):
lines += ['',
'%s' % caption,
'\tDEFAULT FILTER: %s' % filt.DEFAULT_FILTER,
'']
lines.append('\tBOOLEAN FILTERS')
for fname,f in sorted(filt.BOOLEAN_FILTERS.items()):
lines.append('\t\t{} \t{}'.format(', '.join((fname,) + f.aliases), f.description))
lines += ['', '\tCOMPARATIVE FILTERS']
for fname,f in sorted(filt.COMPARATIVE_FILTERS.items()):
if fname == filt.DEFAULT_FILTER:
lines.append('\t\t{} \t{} (default)'.format(', '.join((fname,) + f.aliases), f.description))
else:
lines.append('\t\t{} \t{}'.format(', '.join((fname,) + f.aliases), f.description))
return finalize_lines(lines)
| gpl-3.0 | -7,333,468,026,851,191,000 | 43.311284 | 112 | 0.530207 | false |
sage-code/level | core/levDriver.py | 1 | 10035 | import core.levScanner as levScanner
import core.levParser as levParser
import core.levLexer as levLexer
import core.levExecute as executor
import core.levSystem as system
from core.levConsole import *
from core.levConfig import *
# Execute a level program
def run_console(p_file):
if p_file:
initConfig(p_file)
# display console header"
printHeader()
# local states
config.debug = False
program = None
# start the run_command welcome screen
# start driver main loop
while True:
# wait for a new command
if config.debug:
warning("debuging...")
command = prompt()
# support a list of commands separated by space
if " " in command:
commandList = command.split(" ", 1)
command = commandList[0]
param = commandList[1]
else:
param = ""
# check state and set implicit command to "next"
if config.debug:
if command in ("clean","scan","lex","parse","run","debug"):
warning("program "+config.program +" in debug mode!")
warning("use one of: [next,stop,resume] to continue")
continue
elif command=="":
command="next"
elif command in ("stop","next","resume"):
warning("program is not in debug mode!")
continue
elif command=="":
continue
# execute a command
if command == "help":
consoleHelp(param)
elif command == "clear":
clearScreen()
elif command == "reset":
resetConfig()
elif command in ("status","state","config"):
printConfig()
elif command == "eval":
if param == "":
echo("enter math expression...")
param = prompt("")
if param !="":
try:
executor.evalPostfix(param)
except Exception as e:
fail("evaluation failed!", e)
elif command == "home":
if param =="":
echo("home:"+config.home)
param = prompt("home:")
if param != "":
config.home=param
elif command == "mode":
if param == "":
echo("debug mode=" + config.mode)
echo("options:{" + ",".join(setMode) + "}")
param = prompt("new mode:")
if param != "":
if param in setMode:
config.mode = param
else:
error("debug mode unknown:" + param)
elif command == "scan":
if param == "":
param = prompt("program file:")
# scan a program
if param != "":
try:
run_scanner(param)
except Exception as e:
fail("scanner failed!", e)
else:
warning("\nfile not specified!")
elif command == "lex":
if param == "":
param = prompt("program file:")
# lexer for a program
if param != "":
try:
run_lexer(param)
except Exception as e:
fail("lexer failed!", e)
else:
warning("\nfile not specified!")
elif command == "program":
if param == "":
if config.pfile:
echo("current program file")
echo("pfile="+config.pfile)
else:
echo("set a program file")
param = prompt("program/file:")
if param != "":
initConfig(param)
echo("pfile="+config.pfile)
else:
warning("\nfile not speficied!")
elif command == "parse":
if param == "":
if config.pfile:
param = config.pfile
else:
param = prompt("program file:")
# parse a program
if param != "":
try:
run_parser(param)
config.state="parsed"
except Exception as e:
fail("parser failed!",e)
else:
warning("\nprogram file not specified.")
elif command == "run":
if param == "":
if config.pfile:
param = config.pfile
else:
param = prompt("program file:")
# parse then run program
if param != "":
try:
run_program(param)
except Exception as e:
fail("program failed!",e)
else:
warning("\nprogram file not specified.")
elif command == "debug":
if param == "":
if config.pfile:
param = config.pfile
else:
param = prompt("program file:")
# debug program
if param != "":
try:
program = debug_program(param)
config.debug = True
except Exception as e:
fail("program failed!", e)
else:
warning("\nprogram file not specified!")
elif command in ("next","resume","stop"):
if command == "next":
try:
go = next(program)
if not go:
warning("program end!")
config.debug = False
except Exception as e:
fail("program failed!", e)
config.debug = False
elif command == "resume":
try:
while True:
go=next(program)
if not go: break
warning("program end!")
except Exception as e:
fail("program failed!", e)
config.debug = False
elif command == "stop":
program=None
warning("debug stop!")
config.debug = False
elif command == "print":
if param=="":
printProgram(config.program)
else:
printProgram(param)
elif command in ("report"):
if param=="" and config.program:
printSymbolTable(config.program)
printSyntaxTree(config.program)
elif param in config.symbolTable:
printSymbolTable(param)
printSyntaxTree(param)
else:
printConfig()
elif command in ("exit","quit"):
break
elif command == "init":
# load default library
system.loadDefault()
else:
step()
warning("unknown command:"+command)
warning("type:help for a list of commands")
step()
# end if
# end while (menu loop)
# end run_console
# ------------------------------------------------------------------
# running the scanner
# ------------------------------------------------------------------
def run_scanner(p_file):
initConfig(p_file)
program = open(config.pfile, 'r', buffering=-1, encoding='utf8')
sourceText = program.readlines()
program.close()
# prepare 2 enclosed functions for scan
reader = levScanner.prepare(sourceText)
for char in reader():
print(char)
return
# ------------------------------------------------------------------
# running the lexer
# ------------------------------------------------------------------
def run_lexer(p_file):
initConfig(p_file)
program = open(config.pfile, 'r', buffering=-1, encoding='utf8')
sourceText = program.readlines()
program.close()
# get enclosed function out from closure
reader = levLexer.prepare(sourceText)
# read and print all tokens
for token in reader():
print(token.show())
# ------------------------------------------------------------------
# running the parser. This will read the entire file in memory
# then use a pipeline with generators feeding in cascade:
# ------------------------------------------------------------------
# parse_module: parser <-- lexer <-- scanner <-- file
# ------------------------------------------------------------------
def run_parser(p_file):
initConfig(p_file)
# print a message to announce start parsing
if config.mode != "silent":
print("=" * con_with)
print("Program: " + config.program + ".lev" + "...")
print("-" * con_with)
# run the parser for main module
scount = levParser.parse_module(config.pfile, config.program, start_count=0)
# print a message to prepare for exit
if config.mode != "silent":
print()
print("-" * con_with)
print("Succesfully parsed:%d statements!" % scount)
print("=" * con_with)
# end run_parser
# run program
def run_program(p_file):
if not programParsed(p_file):
run_parser(p_file)
if config.mode != "silent":
step()
root = config.syntaxTree[config.program]
result = executor.run(root,)
if config.mode != "silent":
print()
step()
if result == 0:
print("Program succesfully run.")
else:
print("Program error: exit = "+result)
# debug program
def debug_program(p_file):
if not programParsed(p_file):
run_parser(p_file)
elif config.mode != 'silent':
echo("program:" + config.program)
root = config.syntaxTree[config.program]
for node in root.children:
print(node.cargo)
yield node.cargo
if config.debug:
yield 0
if config.mode != 'silent':
echo("end program!")
# end levDriver.py
| mit | 636,423,577,740,252,400 | 32.33887 | 80 | 0.461286 | false |
rbraley/django-tastypie | tests/core/tests/authentication.py | 1 | 10334 | import base64
import time
import warnings
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpRequest
from django.test import TestCase
from tastypie.authentication import Authentication, BasicAuthentication, ApiKeyAuthentication, DigestAuthentication, OAuthAuthentication
from tastypie.http import HttpUnauthorized
from tastypie.models import ApiKey, create_api_key
# Be tricky.
from tastypie.authentication import python_digest, oauth2, oauth_provider
if python_digest is None:
warnings.warn("Running tests without python_digest! Bad news!")
if oauth2 is None:
warnings.warn("Running tests without oauth2! Bad news!")
if oauth_provider is None:
warnings.warn("Running tests without oauth_provider! Bad news!")
class AuthenticationTestCase(TestCase):
def test_is_authenticated(self):
auth = Authentication()
request = HttpRequest()
# Doesn't matter. Always true.
self.assertTrue(auth.is_authenticated(None))
self.assertTrue(auth.is_authenticated(request))
def test_get_identifier(self):
auth = Authentication()
request = HttpRequest()
self.assertEqual(auth.get_identifier(request), 'noaddr_nohost')
request = HttpRequest()
request.META['REMOTE_ADDR'] = '127.0.0.1'
request.META['REMOTE_HOST'] = 'nebula.local'
self.assertEqual(auth.get_identifier(request), '127.0.0.1_nebula.local')
class BasicAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_is_authenticated(self):
auth = BasicAuthentication()
request = HttpRequest()
# No HTTP Basic auth details should fail.
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# HttpUnauthorized with auth type and realm
self.assertEqual(auth.is_authenticated(request)['WWW-Authenticate'], 'Basic Realm="django-tastypie"')
# Wrong basic auth details.
request.META['HTTP_AUTHORIZATION'] = 'abcdefg'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# No password.
request.META['HTTP_AUTHORIZATION'] = base64.b64encode('daniel')
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong user/password.
request.META['HTTP_AUTHORIZATION'] = base64.b64encode('daniel:pass')
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Correct user/password.
john_doe = User.objects.get(username='johndoe')
john_doe.set_password('pass')
john_doe.save()
request.META['HTTP_AUTHORIZATION'] = 'Basic %s' % base64.b64encode('johndoe:pass')
self.assertEqual(auth.is_authenticated(request), True)
# Regression: Password with colon.
john_doe = User.objects.get(username='johndoe')
john_doe.set_password('pass:word')
john_doe.save()
request.META['HTTP_AUTHORIZATION'] = 'Basic %s' % base64.b64encode('johndoe:pass:word')
self.assertEqual(auth.is_authenticated(request), True)
# Capitalization shouldn't matter.
john_doe = User.objects.get(username='johndoe')
john_doe.set_password('pass:word')
john_doe.save()
request.META['HTTP_AUTHORIZATION'] = 'bAsIc %s' % base64.b64encode('johndoe:pass:word')
self.assertEqual(auth.is_authenticated(request), True)
class ApiKeyAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(ApiKeyAuthenticationTestCase, self).setUp()
ApiKey.objects.all().delete()
def test_is_authenticated_get_params(self):
auth = ApiKeyAuthentication()
request = HttpRequest()
# Simulate sending the signal.
john_doe = User.objects.get(username='johndoe')
create_api_key(User, instance=john_doe, created=True)
# No username/api_key details should fail.
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong username details.
request.GET['username'] = 'foo'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# No api_key.
request.GET['username'] = 'daniel'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong user/api_key.
request.GET['username'] = 'daniel'
request.GET['api_key'] = 'foo'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Correct user/api_key.
john_doe = User.objects.get(username='johndoe')
request.GET['username'] = 'johndoe'
request.GET['api_key'] = john_doe.api_key.key
self.assertEqual(auth.is_authenticated(request), True)
def test_is_authenticated_header(self):
auth = ApiKeyAuthentication()
request = HttpRequest()
# Simulate sending the signal.
john_doe = User.objects.get(username='johndoe')
create_api_key(User, instance=john_doe, created=True)
# No username/api_key details should fail.
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong username details.
request.META['HTTP_AUTHORIZATION'] = 'foo'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# No api_key.
request.META['HTTP_AUTHORIZATION'] = 'ApiKey daniel'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong user/api_key.
request.META['HTTP_AUTHORIZATION'] = 'ApiKey daniel:pass'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Correct user/api_key.
john_doe = User.objects.get(username='johndoe')
request.META['HTTP_AUTHORIZATION'] = 'ApiKey johndoe:%s' % john_doe.api_key.key
self.assertEqual(auth.is_authenticated(request), True)
# Capitalization shouldn't matter.
john_doe = User.objects.get(username='johndoe')
request.META['HTTP_AUTHORIZATION'] = 'aPiKeY johndoe:%s' % john_doe.api_key.key
self.assertEqual(auth.is_authenticated(request), True)
class DigestAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(DigestAuthenticationTestCase, self).setUp()
ApiKey.objects.all().delete()
def test_is_authenticated(self):
auth = DigestAuthentication()
request = HttpRequest()
# Simulate sending the signal.
john_doe = User.objects.get(username='johndoe')
create_api_key(User, instance=john_doe, created=True)
# No HTTP Basic auth details should fail.
auth_request = auth.is_authenticated(request)
self.assertEqual(isinstance(auth_request, HttpUnauthorized), True)
# HttpUnauthorized with auth type and realm
self.assertEqual(auth_request['WWW-Authenticate'].find('Digest'), 0)
self.assertEqual(auth_request['WWW-Authenticate'].find(' realm="django-tastypie"') > 0, True)
self.assertEqual(auth_request['WWW-Authenticate'].find(' opaque=') > 0, True)
self.assertEqual(auth_request['WWW-Authenticate'].find('nonce=') > 0, True)
# Wrong basic auth details.
request.META['HTTP_AUTHORIZATION'] = 'abcdefg'
auth_request = auth.is_authenticated(request)
self.assertEqual(isinstance(auth_request, HttpUnauthorized), True)
# No password.
request.META['HTTP_AUTHORIZATION'] = base64.b64encode('daniel')
auth_request = auth.is_authenticated(request)
self.assertEqual(isinstance(auth_request, HttpUnauthorized), True)
# Wrong user/password.
request.META['HTTP_AUTHORIZATION'] = base64.b64encode('daniel:pass')
auth_request = auth.is_authenticated(request)
self.assertEqual(isinstance(auth_request, HttpUnauthorized), True)
# Correct user/password.
john_doe = User.objects.get(username='johndoe')
request.META['HTTP_AUTHORIZATION'] = python_digest.build_authorization_request(
john_doe.username,
request.method,
'/', # uri
1, # nonce_count
digest_challenge=auth_request['WWW-Authenticate'],
password=john_doe.api_key.key
)
auth_request = auth.is_authenticated(request)
self.assertEqual(auth_request, True)
class OAuthAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_is_authenticated(self):
from oauth_provider.models import Consumer, Token, Resource
auth = OAuthAuthentication()
request = HttpRequest()
request.META['SERVER_NAME'] = 'testsuite'
request.META['SERVER_PORT'] = '8080'
request.REQUEST = request.GET = {}
request.method = "GET"
# Invalid request.
resp = auth.is_authenticated(request)
self.assertEqual(resp.status_code, 401)
# No username/api_key details should fail.
request.REQUEST = request.GET = {
'oauth_consumer_key': '123',
'oauth_nonce': 'abc',
'oauth_signature': '&',
'oauth_signature_method': 'PLAINTEXT',
'oauth_timestamp': str(int(time.time())),
'oauth_token': 'foo',
}
user = User.objects.create_user('daniel', '[email protected]', 'password')
request.META['Authorization'] = 'OAuth ' + ','.join([key+'='+value for key, value in request.REQUEST.items()])
resource, _ = Resource.objects.get_or_create(url='test', defaults={
'name': 'Test Resource'
})
consumer, _ = Consumer.objects.get_or_create(key='123', defaults={
'name': 'Test',
'description': 'Testing...'
})
token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={
'consumer': consumer,
'resource': resource,
'secret': '',
'user': user,
})
resp = auth.is_authenticated(request)
self.assertEqual(resp, True)
self.assertEqual(request.user.pk, user.pk)
| bsd-3-clause | 2,878,950,592,333,090,000 | 39.367188 | 136 | 0.655603 | false |
moshthepitt/afya360 | health_facilities/sitemaps.py | 1 | 1083 | from django.contrib.sitemaps import Sitemap, GenericSitemap
from django.core.paginator import Paginator
from .models import HealthFacility
class HealthFacilitySitemap(Sitemap):
changefreq = "monthly"
priority = 0.6
def items(self):
return HealthFacility.objects.all()
def lastmod(self, obj):
return obj.updated_on
def health_facility_sitemaps(chunk=1000):
"""
next we'll attemtp to generate a number of sitemaps in chunks using Paginator and GenericSitemap
"""
health_facility_sitemap = {}
health_facilities = HealthFacility.objects.all()
paginated_health_facilities = Paginator(health_facilities, chunk)
for this_page in paginated_health_facilities.page_range:
health_facility_dict = {
'queryset': paginated_health_facilities.page(this_page).object_list,
'date_field': 'updated_on',
}
health_facility_sitemap['health_facilitys_%s' % this_page] = GenericSitemap(
health_facility_dict, priority=0.6, changefreq='monthly')
return health_facility_sitemap
| mit | -7,888,297,912,088,862,000 | 32.84375 | 100 | 0.698984 | false |
JavierAntonioGonzalezTrejo/SCAZAC | administracionScazac/views.py | 1 | 41167 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Modification 20171022: Requirment F3-3 finished.
# Modification 20171025: Requirment F3 finished.
import datetime
from django.db.models import Max
import json
import pandas
import dateutil.parser
from django import forms
from decimal import Decimal
from administracionScazac.forms import MonitoringMapForm
from investigacion.models import MonitoringStation, MonitoringData
from calidadAire.models import MonitoringReports, MonitoringMap, ImecaDataMonth, ImecaDataDay, ImecaDataHour
from django.http import HttpResponseRedirect
from django.core.exceptions import PermissionDenied
from django.views.generic import TemplateView
from django.shortcuts import render
# Create your views here.
class MonitoringMapAdminView(TemplateView): # Added 20171016
"""Requirment F3-2"""
def get(self, request):
"""Preview of the current map settings"""
if not(request.user.is_superuser):
raise PermissionDenied
try:
mapSettings = MonitoringMap.objects.get(pk=1) # Get the initial map Settings, only the first register is used on the System
formInitialValues = {'centerLatitude':mapSettings.centerLatitude, 'centerLength':mapSettings.centerLength, 'zoom':mapSettings.zoom, 'googleAPIKey':mapSettings.googleAPIKey}
mapSettingsForm = MonitoringMapForm(formInitialValues)
return render(request, 'administracionScazac/homepage.djhtml',
{'mapSettingsForm':mapSettingsForm,
'keyGoogleMap':mapSettings.googleAPIKey,
'centerLatitude':mapSettings.centerLatitude,
'centerLength':mapSettings.centerLength,
'zoom':mapSettings.zoom})
except:
mapSettingsForm = MonitoringMapForm(formInitialValues)
return render(request, 'administracionScazac/homepage.djhtml',
{'mapSettingsForm':mapSettingsForm})
def post(self, request):
"""Modifie the current map settings"""
if not(request.user.is_superuser):
raise PermissionDenied
mapSettingsForm = MonitoringMapForm(request.POST)
if mapSettingsForm.is_valid():
try:
mapSettings = MonitoringMap.objects.get(pk=1) # Get the initial map Settings, only the first register is used on the
except:
mapSettings = MonitoringMap()
mapSettings.idMap = 1
mapSettings.centerLatitude = mapSettingsForm.cleaned_data['centerLatitude']
mapSettings.centerLength = mapSettingsForm.cleaned_data['centerLength']
mapSettings.zoom = mapSettingsForm.cleaned_data['zoom']
mapSettings.googleAPIKey = mapSettingsForm.cleaned_data['googleAPIKey']
mapSettings.save()
alertModified = "alert(\"Se ah modificado la configuración con exito!!!\");"
return render(request, 'administracionScazac/homepage.djhtml',
{'mapSettingsForm':mapSettingsForm,
'keyGoogleMap':mapSettings.googleAPIKey,
'centerLatitude':mapSettings.centerLatitude,
'centerLength':mapSettings.centerLength,
'zoom':mapSettings.zoom,
'alertModified':alertModified})
else:
return render(request, 'administracionScazac/homepage.djhtml',
{'mapSettingsForm':mapSettingsForm,
'keyGoogleMap':mapSettingsForm.cleaned_data['googleAPIKey'],
'centerLatitude':mapSettingsForm.cleaned_data['centerLatitude'],
'centerLength':mapSettingsForm.cleaned_data['centerLength'],
'zoom': mapSettingsForm.cleaned_data['zoom']})
class MonitoringStationViewAdd(TemplateView):
"""Requerimento F3-?"""
title = "Agregar Estación de Monitoreo"
def get(self, request):
"""Principal view"""
if not(request.user.is_superuser):
raise PermissionDenied
monitoringStationForm = generateMonitoringStationFormAddOrModify(True)()
try:
mapSettings = MonitoringMap.objects.get(pk=1)
except:
return HttpResponseRedirect("/admin/adminmapa")
return render(request, 'administracionScazac/monitoringStation.djhtml',
{'monitoringStationForm':monitoringStationForm,
'keyGoogleMap':mapSettings.googleAPIKey,
'centerLatitude':mapSettings.centerLatitude,
'centerLength':mapSettings.centerLength,
'zoom':mapSettings.zoom,
'titulo':MonitoringStationViewAdd.title,
'isAdd':True})
def post(self, request):
"""Save the Monitoring Station"""
if not(request.user.is_superuser):
raise PermissionDenied
monitoringStationForm = generateMonitoringStationFormAddOrModify(True)(request.POST)
try:
mapSettings = MonitoringMap.objects.get(pk=1) # Get the initial map Settings, only the first register is used on the System
except:
return HttpResponseRedirect("/admin/adminmapa")
if monitoringStationForm.is_valid():
savedMonitoringStation = MonitoringStation()
lastSerialNumber = MonitoringStation.objects.latest('serialNumber').serialNumber
if lastSerialNumber == 0:
savedMonitoringStation.serialNumber = 1
else:
savedMonitoringStation.serialNumber = lastSerialNumber + 1
savedMonitoringStation.nameMonitoringPlace = monitoringStationForm.cleaned_data['nameMonitoringPlace']
savedMonitoringStation.latitude = monitoringStationForm.cleaned_data['latitude']
savedMonitoringStation.length = monitoringStationForm.cleaned_data['length']
savedMonitoringStation.monitoringRadius = monitoringStationForm.cleaned_data['monitoringRadius']
datePlaceHolder = datetime.datetime.now().date() # The newest and oldest date will be calculated when the data is summited
savedMonitoringStation.dateNewestRegister = datePlaceHolder
savedMonitoringStation.dateOldestRegister = datePlaceHolder
savedMonitoringStation.save()
alertSaved = "alert(\"Se ah guardado la estación con exito!!!\");"
return render(request, 'administracionScazac/monitoringStation.djhtml',
{'monitoringStationForm':monitoringStationForm,
'keyGoogleMap':mapSettings.googleAPIKey,
'centerLatitude':mapSettings.centerLatitude,
'centerLength':mapSettings.centerLength,
'zoom':mapSettings.zoom,
'alertSaved':alertSaved,
'titulo':MonitoringStationViewAdd.title,
'isAdd':True})
else:
return render(request, 'administracionScazac/monitoringStation.djhtml',
{'monitoringStationForm':monitoringStationForm,
'keyGoogleMap':mapSettings.googleAPIKey,
'centerLatitude':mapSettings.centerLatitude,
'centerLength':mapSettings.centerLength,
'zoom':mapSettings.zoom,
'titulo':MonitoringStationViewAdd.title,
'isAdd':True})
class MonitoringStationViewModify(TemplateView):
"""Requerimento F3-?"""
title = "Modificar Estación de Monitoreo"
def get(self, request):
"""Principal view"""
if not(request.user.is_superuser):
raise PermissionDenied
monitoringStationForm = generateMonitoringStationFormAddOrModify(False)()
try:
mapSettings = MonitoringMap.objects.get(pk=1)
except:
return HttpResponseRedirect("/admin/adminmapa")
jsonStation = generateJsonStationModifyView()
alertForErase = "alert(\"Se borrara la estación especificada, si desea no borrarla porfavor presione atras en su navegador.!!!\");"
return render(request, 'administracionScazac/monitoringStation.djhtml',
{'monitoringStationForm':monitoringStationForm,
'keyGoogleMap':mapSettings.googleAPIKey,
'centerLatitude':mapSettings.centerLatitude,
'centerLength':mapSettings.centerLength,
'zoom':mapSettings.zoom,
'titulo': MonitoringStationViewModify.title,
'jsonStation':json.dumps(jsonStation),
'stationID':1,
'alertForErase': alertForErase,
'isAdd':False}) # Because allways the first station will be showed.
def post(self, request):
"""Save the Monitoring Station"""
if not(request.user.is_superuser):
raise PermissionDenied
monitoringStationForm = generateMonitoringStationFormAddOrModify(False)(request.POST)
try:
mapSettings = MonitoringMap.objects.get(pk=1) # Get the initial map Settings, only the first register is used on the System
except:
return HttpResponseRedirect("/admin/adminmapa")
if monitoringStationForm.is_valid():
alertForErase = "alert(\"Se borrara la estación especificada, si desea no borrarla porfavor presione atras en su navegador.!!!\");"
if request.POST["actionPerform"] == "1": # Save the data on the Monitoring Station exept the dates because they are calculed only one time when the data is inserted.
savedMonitoringStation = MonitoringStation.objects.get(pk=request.POST["nameMonitoringPlace"])
savedMonitoringStation.latitude = monitoringStationForm.cleaned_data['latitude']
savedMonitoringStation.length = monitoringStationForm.cleaned_data['length']
savedMonitoringStation.monitoringRadius = monitoringStationForm.cleaned_data['monitoringRadius']
savedMonitoringStation.save()
alertSaved = "alert(\"Se ah modificado la estación con exito!!!\");"
stationID = request.POST["nameMonitoringPlace"]
else:
savedMonitoringStation = MonitoringStation.objects.get(pk=request.POST["nameMonitoringPlace"]).delete()
monitoringStationForm = generateMonitoringStationFormAddOrModify(False)()
alertSaved = "alert(\"Se ah borrado la estación con exito!!!\");"
stationID = 1
jsonStation = generateJsonStationModifyView()
return render(request, 'administracionScazac/monitoringStation.djhtml',
{'monitoringStationForm':monitoringStationForm,
'keyGoogleMap':mapSettings.googleAPIKey,
'centerLatitude':mapSettings.centerLatitude,
'centerLength':mapSettings.centerLength,
'zoom':mapSettings.zoom,
'titulo': MonitoringStationViewModify.title,
'stationID':stationID,
'alertSaved':alertSaved,
'jsonStation':json.dumps(jsonStation),
'alertForErase': alertForErase,
'isAdd':False}) # Because allways the first station will be showed.
else:
return render(request, 'administracionScazac/monitoringStation.djhtml',
{'monitoringStationForm':monitoringStationForm,
'keyGoogleMap':mapSettings.googleAPIKey,
'centerLatitude':mapSettings.centerLatitude,
'centerLength':mapSettings.centerLength,
'zoom':mapSettings.zoom,
'titulo': MonitoringStationViewModify.title,
'stationID':request.POST["nameMonitoringPlace"],
'alertForErase': alertForErase,
'isAdd':False})
class MonitoringDataViewAdd(TemplateView):
"""View for the requirment F3-4"""
def get(self, request):
"""Initial view"""
if not(request.user.is_superuser):
raise PermissionDenied
dataForm = generateMonitoringDataForm()
return render(request, 'administracionScazac/monitoringData.djhtml',{'dataForm': dataForm,
'title':"Subir datos de calidad del aire"})
def post(self, request):
"""Where the file is uploaded"""
if not(request.user.is_superuser):
raise PermissionDenied
dataForm = generateMonitoringDataForm()(request.POST, request.FILES)
if dataForm.is_valid():
# Test the type
name = request.FILES['archivoCSV'].name
if not(name.endswith('.csv')):
return render(request, 'administracionScazac/monitoringData.djhtml',{'dataForm': dataForm,
'title':"El archivo no es CSV."})
# Test if pandas can open the file
#try:
data = pandas.read_csv(request.FILES['archivoCSV'], low_memory=False)
#except:
# return render(request, 'administracionScazac/monitoringData.djhtml',{'dataForm': dataForm,
# 'title':"El archivo no es CSV."})
# Test if the file contains all the columns
try:
test = data["Temp"][0]
test = data["O3"][0]
test = data["CO"][0]
test = data["NO"][0]
test = data["NO2"][0]
test = data["NOX"][0]
test = data["SO2"][0]
test = data["TempAmbiente"][0]
test = data["RH"][0]
test = data["WS"][0]
test = data["WD"][0]
test = data["PresionBaro"][0]
test = data["RadSolar"][0]
test = data["Precipitacion"][0]
test = data["PM10"][0]
test = data["PM2.5"][0]
except:
return render(request, 'administracionScazac/monitoringData.djhtml',{'dataForm': dataForm,
'title':"El archivo no contiene las columnas solicitadas"})
monitoringStation = MonitoringStation.objects.get(pk=dataForm.cleaned_data['nameMonitoringPlace'])
try:
oldDate, newDate = saveMonitoringData(data, monitoringStation)
saveImecaDataHour(monitoringStation, oldDate, newDate)
saveImecaDataDay(monitoringStation, oldDate, newDate)
saveImecaDataMonth(monitoringStation, oldDate, newDate)
monitoringStation.dateOldestRegister = MonitoringData.objects.filter(idStation__pk=dataForm.cleaned_data['nameMonitoringPlace']).order_by('fecha')[0].fecha.date()
monitoringStation.dateNewestRegister = MonitoringData.objects.filter(idStation__pk=dataForm.cleaned_data['nameMonitoringPlace']).order_by('-fecha')[0].fecha.date()
monitoringStation.save()
except:
return render(request, 'administracionScazac/monitoringData.djhtml',{'dataForm': dataForm,
'title':"Datos ya existentes. Elimine los datos que desea reemplazar."})
return render(request, 'administracionScazac/monitoringData.djhtml',{'dataForm': dataForm,
'title':"Subir datos de calidad del aire",
'alertSaved': "alert(\"Se guardaron los datos en la estación de forma exitosa!!!\");"})
else:
return render(request, 'administracionScazac/monitoringData.djhtml',{'dataForm': dataForm,
'title':"Subir datos de calidad del aire"})
class MonitoringDataViewDelete(TemplateView):
"""Requirment F3-4 *Delate*"""
def get(self, request):
"""Initial view"""
if not(request.user.is_superuser):
raise PermissionDenied
dataForm = generateMonitoringDataFormDelete()()
return render(request, 'administracionScazac/monitoringDataDelete.djhtml',{'dataForm': dataForm,
'title':"Eliminar datos de calidad del aire"})
def post(self, request):
"""Procesing function"""
if not(request.user.is_superuser):
raise PermissionDenied
dataForm = generateMonitoringDataFormDelete()(request.POST)
if dataForm.is_valid():
if dataForm.cleaned_data['initialDate'] > dataForm.cleaned_data['finalDate']:
return render(request, 'administracionScazac/monitoringDataDelete.djhtml',{'dataForm': dataForm,
'title':"La fecha inicial debe de ser menor que la final"})
elif dataForm.cleaned_data['initialDate'] == dataForm.cleaned_data['finalDate']:
MonitoringData.objects.filter(idStation__pk=request.POST['nameMonitoringPlace']).filter(fecha__icontains=dataForm.cleaned_data['initialDate'])
ImecaDataHour.objects.filter(idStation__pk=request.POST['nameMonitoringPlace']).filter(fecha__icontains=dataForm.cleaned_data['initialDate'])
ImecaDataDay.objects.filter(idStation__pk=request.POST['nameMonitoringPlace']).filter(fecha__initial=dataForm.cleaned_data['initialDate'])
ImecaDataMonth.objects.filter(idStation__pk=request.POST['nameMonitoringPlace']).filter(fecha__icontains=dataForm.cleaned_data['initialDate'])
saveImecaDataMonth(request.POST['nameMonitoringPlace'], dataForm.cleaned_data['initialDate'], dataForm.cleaned_data['initialDate'])
return render(request, 'administracionScazac/monitoringDataDelete.djhtml',{'dataForm': dataForm,
'title':"Eliminar datos de calidad del aire",
'alertDelete':"alert(\"Se ah borrado la estación con exito!!!\");"})
MonitoringData.objects.filter(idStation__pk=request.POST['nameMonitoringPlace']).filter(fecha__gte=dataForm.cleaned_data['initialDate']).filter(fecha__lte=dataForm.cleaned_data['finalDate'])
ImecaDataHour.objects.filter(idStation__pk=request.POST['nameMonitoringPlace']).filter(fecha__gte=dataForm.cleaned_data['initialDate']).filter(fecha__lte=dataForm.cleaned_data['finalDate'])
ImecaDataDay.objects.filter(idStation__pk=request.POST['nameMonitoringPlace']).filter(fecha__gte=dataForm.cleaned_data['initialDate']).filter(fecha__lte=dataForm.cleaned_data['finalDate'])
ImecaDataMonth.objects.filter(idStation__pk=request.POST['nameMonitoringPlace']).filter(fecha__gte=dataForm.cleaned_data['initialDate']).filter(fecha__lte=dataForm.cleaned_data['finalDate'])
saveImecaDataMonth(request.POST['nameMonitoringPlace'], dataForm.cleaned_data['initialDate'], dataForm.cleaned_data['finalDate'])
return render(request, 'administracionScazac/monitoringDataDelete.djhtml',{'dataForm': dataForm,
'title':"Eliminar datos de calidad del aire",
'alertDelete':"alert(\"Se ah borrado la estación con exito!!!\");"})
else:
return render(request, 'administracionScazac/monitoringDataDelete.djhtml',{'dataForm': dataForm,
'title':"Eliminar datos de calidad del aire"})
def generateMonitoringDataForm():
"""If is on the form file, it does not load all the new geneerated monitoring stations."""
class MonitoringDataFormAdd(forms.Form):
"""Form for the requierment F3-4"""
nameMonitoringPlace = forms.ChoiceField(label="Estación de monitoreo.", help_text="Estación de monitoreo a la cual se le asignaran los datos.", choices=tuple((station.serialNumber, station.nameMonitoringPlace) for station in MonitoringStation.objects.all()))
archivoCSV = forms.FileField(label="Subir archivo con datos.", help_text="Subir archivos a la estación de monitoreo correspondiente. Archivo máximo de 70 MB. Se deben de tener las siguientes columnas: Temp, O3, CO, NO, NO2, NOX, SO2, TempAmbiente, RH, WS, WD, PresionBaro, RadSolar, Precipitacion, PM10, PM2.5. Si la estación de monitoreo no incluyo datos de una columna en especifico (excepto Fecha), favor de incluir como a la columna con un unico dato con el contenido de None." , max_length=70)
return MonitoringDataFormAdd
def generateMonitoringDataFormDelete():
"""If is on the form file, it does not load all the new geneerated monitoring stations."""
class MonitoringDataFormDelete(forms.Form):
"""Form for the requierment F3-4"""
nameMonitoringPlace = forms.ChoiceField(label="Estación de monitoreo.", help_text="Estación de monitoreo a la cual se le borraran los datos.", choices=tuple((station.serialNumber, station.nameMonitoringPlace) for station in MonitoringStation.objects.all()))
initialDate = forms.DateField(label="Fecha del primer registro",help_text="Ingrese una fecha apartir de donde se eliminaran datos. (2006-10-25)", error_messages={'required':"Se necesita que ingrese una fecha de inicio!" , 'invalid':"Ingrese una fecha valida!"}, input_formats=['%Y-%m-%d'] ) # Modified 20171001: Only on type of date will be accepted to reduce complexity converting to type date
finalDate = forms.DateField(label="Fecha del ultimo registro",help_text="Ingrese una fecha de donde se termine de eliminar datos.(2006-10-25)", error_messages={'required':"Se necesita que ingrese una fecha de inicio!" , 'invalid':"Ingrese una fecha valida!"}, input_formats=['%Y-%m-%d'])
return MonitoringDataFormDelete
def generateMonitoringStationFormAddOrModify(isAdd):
"""Be able to have call only once the definition of the form."""
if isAdd:
class MonitoringStationForm(forms.Form):
"""Form for the requirment F3-3"""
nameMonitoringPlace = forms.CharField(label="Nombre del lugar que se monitorea.", help_text="El lugar donde se localiza la estación de monitoreo.", max_length=125)
latitude = forms.DecimalField(label="Latitud", help_text="Latitud con la que se posiciona la estación de monitoreo.", max_digits=10, decimal_places=7, max_value=Decimal(90), min_value=Decimal(-90))
length = forms.DecimalField(label="Longitud", help_text="Longitud con la que se posiciona la estación de monitoreo.", max_digits=10, decimal_places=7, max_value=Decimal(180), min_value=Decimal(-180))
monitoringRadius = forms.DecimalField(label="Radio de monitoreo", help_text="Que tanto terreno la estación de monitoreo puede monitorear. (En metros)", max_digits=10, decimal_places=2)
else:
class MonitoringStationForm(forms.Form):
"""Form for the requirment F3-3"""
nameMonitoringPlace = forms.ChoiceField(label="Nombre del lugar que se monitorea.", help_text="El lugar donde se localiza la estación de monitoreo.", choices=tuple((station.serialNumber, station.nameMonitoringPlace) for station in MonitoringStation.objects.all()))
latitude = forms.DecimalField(label="Latitud", help_text="Latitud con la que se posiciona la estación de monitoreo.", max_digits=10, decimal_places=7, max_value=Decimal(90), min_value=Decimal(-90))
length = forms.DecimalField(label="Longitud", help_text="Longitud con la que se posiciona la estación de monitoreo.", max_digits=10, decimal_places=7, max_value=Decimal(180), min_value=Decimal(-180))
monitoringRadius = forms.DecimalField(label="Radio de monitoreo", help_text="Que tanto terreno la estación de monitoreo puede monitorear. (En metros)", max_digits=10, decimal_places=2)
return MonitoringStationForm
def generateJsonStationModifyView():
"""Generate Json data to be parsed on the Modify view.
Will be used o both post and get function."""
jsonStation = {}
allStations = MonitoringStation.objects.all()
for station in allStations:
jsonStation[station.serialNumber] = {'nameMonitoringPlace':station.nameMonitoringPlace, 'latitude':float(station.latitude), 'length':float(station.length), 'monitoringRadius':float(station.monitoringRadius)}
return jsonStation
def imecaO3(o3):
"""Formula to calculate the imeca of the Ozone"""
if 0 <= o3 and o3 <= 0.220:
return o3 * 50/0.055
elif 0.220 < o3:
return 03 * 200/0.22
else:
return 0
def imecaNO(no):
"""Imeca for the NO, NO2 and NOX"""
if 0.000 <= no and no <= 0.420:
return no * 50/0.105
elif 0.420 < no:
return no * 200/0.42
return 0
def imecaSO2(so2):
"""Imeca for the SO2"""
if 0.000 <= so2 and so2 <= 0.260:
return so2 * 50 / 0.065
elif 0.260 < so2:
return so2 * 200 / 0.26
else:
return 0
def imecaCO(co):
"""Imeca for the CO"""
if 0.00 <= co and co <= 22.00:
return co * 50 / 5.5
elif 22.00 < co:
return co * 200 / 22
else:
return 0
def imecaPM10(pm10):
"""Imeca for the PM10"""
if 0 <= pm10 and pm10 <= 120:
return pm10 * 50 / 60
elif 120 < pm10 and pm10 <= 320:
return 40 + pm10 * 50 / 100
elif 320 < pm10:
return pm10 * 200 / 320
else:
return 0
def imecaPM25(pm25):
"""Imeca for the PM25"""
if 0 <= pm25 and pm25 <= 15.4:
return pm25 * 50 / 15.4
elif 15.4 < pm25 and pm25 <= 40.4:
return 20.50 + pm25 * 48 / 24.9
elif 40.4 < pm25 and pm25 <= 65.4:
return 21.30 + pm25 * 49 / 24.9
elif 65.4 < pm25 and pm25 <= 150.4:
return 113.20 + pm25 * 49 / 84.9
elif 150.4 < pm25:
return pm25 * 201 / 150.5
else:
return 0
def newstOldestDate(data):
"""Calculate both the Newest and Oldest Date on the data specified"""
new = dateutil.parser.parse(data[0])
old = new
for dateStr in data:
date = dateutil.parser.parse(dateStr)
if date < old:
old = date
elif date > new:
new = date
return old, new
def saveMonitoringData(data, monitoringStation):
"""First save the raw data, calculate raw IMECA data, then the avarage IMECA for hours, days and month. Returns the oldest an newest date respectible."""
sizeData = len(data["Fecha"])
if "nulo" == data["Temp"][0]:
temp = None
else:
temp = 1
if data["O3"][0] == "nulo":
o3 = None
else:
o3 = 1
if "nulo" == data["CO"][0]:
co = None
else:
co = 1
if "nulo" == data["NO"][0]:
no = None
else:
no = 1
if "nulo" == data["NO2"][0]:
no2 = None
else:
no2 = 1
if "nulo" == data["NOX"][0]:
nox = None
else:
nox = 1
if "nulo" == data["SO2"][0]:
so2 = None
else:
so2 = 1
if "nulo" == data["TempAmbiente"][0]:
tempAmb = None
else:
tempAmb = 1
if "nulo" == data["RH"][0]:
rh = None
if "nulo" == data["WS"][0]:
ws = None
else:
ws = 1
if "nulo" == data["WD"][0]:
wd = None
else:
wd = 1
if "nulo" == data["PresionBaro"][0]:
presBaro = None
else:
presBaro = 1
if "nulo" == data["RadSolar"][0]:
radSolar = None
else:
radSolar = 1
if "nulo" == data["Precipitacion"][0]:
precip = None
else:
precip = 1
if "nulo" == data["PM10"][0]:
pm10 = None
else:
pm10 = 1
if "nulo" == data["PM2.5"][0]:
pm25 = None
else:
pm25 = 1
for i in range(0, sizeData):
pollant = MonitoringData()
pollant.idStation = monitoringStation
pollant.fecha = dateutil.parser.parse(data["Fecha"][i])
try:
temp = temp * 1
pollant.temperatura = data["Temp"][i]
except:
pollant.temperatura = None
try:
o3 = o3 * 1
pollant.o3 = data["O3"][i]
pollant.imecaO3 = imecaO3(data["O3"][i])
except:
pollant.o3 = None
pollant.imecaO3 = None
try:
co = co * 1
pollant.co = data["CO"][i]
pollant.imecaCO = imecaCO(data["CO"][i])
except:
pollant.co = None
pollant.imecaCO = None
try:
no = no * 1
pollant.no = data["NO"][i]
pollant.imecaNO = imecaNO(data["NO"][i])
except:
pollant.no = None
pollant.imecaNO = None
try:
no2 = no2 * 1
pollant.no2 = data["NO2"][i]
pollant.imecaNO2 = imecaNO(data["NO2"][i])
except:
pollant.no2 = None
pollant.imecaNO2 = None
try:
nox = nox * 1
pollant.nox = data["NOX"][i]
pollant.imecaNOX = imecaNO(data["NOX"][i])
except:
pollant.nox = None
pollant.imecaNOX = None
try:
so2 = so2 * 1
pollant.so2 = data["SO2"][i]
pollant.imecaSO2 = imecaSO2(data["SO2"][i])
except:
pollant.so2 = None
pollant.imecaSO2 = None
try:
tempAmb = tempAmb * 1
pollant.temperaturaAmbiente = data["TempAmbiente"][i]
except:
pollant.temperaturaAmbiente = None
try:
rh = rh * 1
pollant.humedadRelativa = data["RH"][i]
except:
pollant.humedadRelativa = None
try:
ws = ws * 1
pollant.ws = data["WS"][i]
except:
pollant.ws = None
try:
wd = wd * 1
pollant.wd = data["WD"][i]
except:
pollant.wd = None
try:
presBaro = presBaro * 1
pollant.presionBarometrica = data["PresionBaro"][i]
except:
pollant.presionBarometrica = None
try:
radSolar = radSolar * 1
pollant.radiacionSolar = data["RadSolar"][i]
except:
pollant.radiacionSolar = None
try:
precip = precip * 1
pollant.precipitacion = data["Precipitacion"][i]
except:
pollant.precipitacion = None
try:
pm10 = pm10 * 1
pollant.pm10 = data["PM10"][i]
pollant.imecaPM10 = imecaPM10(data["PM10"][i])
except:
pollant.pm10 = None
pollant.imecaPM10 = None
try:
pm25 = pm25 * 1
pollant.pm25 = data["PM2.5"][i]
pollant.imecaPM25 = imecaPM25(data["PM2.5"][i])
except:
pollant.pm25 = None
pollant.imecaPM25 = None
pollant.save()
print pollant.imecaO3
return newstOldestDate(data["Fecha"])
def saveImecaDataHour(station, oldDate, newDate):
""""""
pollantHour = dateutil.parser.parse(str(oldDate.year) + "-" + number2ZeroBeforeDigit(oldDate.month) + "-" + number2ZeroBeforeDigit(oldDate.day) + " 00" )
# Making sure that the query will compare only the year and month
pollantHourString = str(pollantHour.year) + "-" + number2ZeroBeforeDigit(pollantHour.month) + "-" + number2ZeroBeforeDigit(pollantHour.day) + " " + number2ZeroBeforeDigit(pollantHour.hour)
pollantHourData = MonitoringData.objects.filter(fecha__icontains=pollantHourString).filter(idStation__pk=station.serialNumber).values_list('imecaO3', 'imecaNO', 'imecaNO2', 'imecaNOX', 'imecaSO2', 'imecaCO', 'imecaPM10', 'imecaPM25')
# Convert newDate to a DateTime type
stationDateTimeNewestRegister = dateutil.parser.parse(str(newDate.year) + "-" + number2ZeroBeforeDigit(newDate.month) + "-" + number2ZeroBeforeDigit(newDate.day) + " 23" )
#
while pollantHour <= stationDateTimeNewestRegister:
monthImecas = ImecaDataHour()
pollantHourDataSize = len(pollantHourData)
if not(pollantHourDataSize == 0):
# Save the data on the system
print pollantHourString
arrayIMECA = imecaMean(pollantHourData, pollantHourDataSize)
monthImecas.setFecha(str(pollantHour.year), number2ZeroBeforeDigit(pollantHour.month),number2ZeroBeforeDigit(pollantHour.day), number2ZeroBeforeDigit(pollantHour.hour) )
monthImecas.idStation = station
monthImecas.imecaO3 = arrayIMECA[0]
monthImecas.imecaNO = arrayIMECA[1]
monthImecas.imecaNO2 = arrayIMECA[2]
monthImecas.imecaNOX = arrayIMECA[3]
monthImecas.imecaSO2 = arrayIMECA[4]
monthImecas.imecaCO = arrayIMECA[5]
monthImecas.imecaPM10 = arrayIMECA[6]
monthImecas.imecaPM25 = arrayIMECA[7]
monthImecas.save()
pollantHour += datetime.timedelta(hours=1)
pollantHourString = str(pollantHour.year) + "-" + number2ZeroBeforeDigit(pollantHour.month) + "-" + number2ZeroBeforeDigit(pollantHour.day) + " " + number2ZeroBeforeDigit(pollantHour.hour)
pollantHourData = MonitoringData.objects.filter(fecha__icontains=pollantHourString).filter(idStation__pk=station.serialNumber).values_list('imecaO3', 'imecaNO', 'imecaNO2', 'imecaNOX', 'imecaSO2', 'imecaCO', 'imecaPM10', 'imecaPM25')
else:
pollantHour += datetime.timedelta(hours=1)
pollantHourString = str(pollantHour.year) + "-" + number2ZeroBeforeDigit(pollantHour.month) + "-" + number2ZeroBeforeDigit(pollantHour.day) + " " + number2ZeroBeforeDigit(pollantHour.hour)
pollantHourData = MonitoringData.objects.filter(fecha__icontains=pollantHourString).filter(idStation__pk=station.serialNumber).values_list('imecaO3', 'imecaNO', 'imecaNO2', 'imecaNOX', 'imecaSO2', 'imecaCO', 'imecaPM10', 'imecaPM25')
def saveImecaDataDay(station, oldDate, newDate):
""""""
pollantDay = oldDate
pollantDayData = ImecaDataHour.objects.filter(fecha__icontains=pollantDay).filter(idStation__pk=station.serialNumber).values_list('imecaO3', 'imecaNO', 'imecaNO2', 'imecaNOX', 'imecaSO2', 'imecaCO', 'imecaPM10', 'imecaPM25')
while pollantDay <= newDate:
dayImecas = ImecaDataDay()
pollantDayDataSize = len(pollantDayData)
if not(pollantDayData == 0):
# Save the data on the system
arrayIMECA = imecaMean(pollantDayData, pollantDayDataSize)
dayImecas.fecha = pollantDay
dayImecas.idStation = station
dayImecas.imecaO3 = arrayIMECA[0]
dayImecas.imecaNO = arrayIMECA[1]
dayImecas.imecaNO2 = arrayIMECA[2]
dayImecas.imecaNOX = arrayIMECA[3]
dayImecas.imecaSO2 = arrayIMECA[4]
dayImecas.imecaCO = arrayIMECA[5]
dayImecas.imecaPM10 = arrayIMECA[6]
dayImecas.imecaPM25 = arrayIMECA[7]
dayImecas.save()
print str(dayImecas.fecha)
# Continue whit the next day
pollantDay += datetime.timedelta(days=1)
pollantDayData = ImecaDataHour.objects.filter(fecha__icontains=pollantDay).filter(idStation__pk=station.serialNumber).values_list('imecaO3', 'imecaNO', 'imecaNO2', 'imecaNOX', 'imecaSO2', 'imecaCO', 'imecaPM10', 'imecaPM25')
else:
# Just continue whit the next day
pollantDay += datetime.timedelta(days=1)
pollantDayData = ImecaDataHour.objects.filter(fecha__icontains=pollantDay).filter(idStation__pk=station.serialNumber).values_list('imecaO3', 'imecaNO', 'imecaNO2', 'imecaNOX', 'imecaSO2', 'imecaCO', 'imecaPM10', 'imecaPM25')
def saveImecaDataMonth(station, oldDate, newDate):
""""""
# Put the oldest month of monitoring
pollantMonth = dateutil.parser.parse(str(oldDate.year) + "-" + number2ZeroBeforeDigit(oldDate.month) + "-" + "01").date()
# Making sure that the query will compare only the year and month
pollantMonthString = str(pollantMonth.year) + "-" + number2ZeroBeforeDigit(pollantMonth.month)
pollantMonthData = ImecaDataDay.objects.filter(fecha__icontains=pollantMonthString).filter(idStation__pk=station.serialNumber).values_list('imecaO3', 'imecaNO', 'imecaNO2', 'imecaNOX', 'imecaSO2', 'imecaCO', 'imecaPM10', 'imecaPM25')
#
while pollantMonth <= newDate.date():
monthImecas = ImecaDataMonth()
pollantMonthDataSize = len(pollantMonthData)
if not(pollantMonthDataSize == 0):
# Save the data on the system
arrayIMECA = imecaMean(pollantMonthData, pollantMonthDataSize)
monthImecas.setFecha(str(pollantMonth.year), number2ZeroBeforeDigit(pollantMonth.month))
monthImecas.idStation = station
monthImecas.imecaO3 = arrayIMECA[0]
monthImecas.imecaNO = arrayIMECA[1]
monthImecas.imecaNO2 = arrayIMECA[2]
monthImecas.imecaNOX = arrayIMECA[3]
monthImecas.imecaSO2 = arrayIMECA[4]
monthImecas.imecaCO = arrayIMECA[5]
monthImecas.imecaPM10 = arrayIMECA[6]
monthImecas.imecaPM25 = arrayIMECA[7]
monthImecas.save()
# Continue whit the next month, if month == 12, continue whit the next month of the next year
if pollantMonth.month < 12:
pollantMonth = pollantMonth.replace(month=pollantMonth.month + 1)
else:
pollantMonth = pollantMonth.replace(month=1, year=pollantMonth.year + 1)
pollantMonthString = str(pollantMonth.year) + "-" + number2ZeroBeforeDigit(pollantMonth.month)
pollantMonthData = ImecaDataDay.objects.filter(fecha__icontains=pollantMonthString).filter(idStation__pk=station.serialNumber).values_list('imecaO3', 'imecaNO', 'imecaNO2', 'imecaNOX', 'imecaSO2', 'imecaCO', 'imecaPM10', 'imecaPM25')
else:
# Just continue whit the next month
if pollantMonth.month < 12:
pollantMonth = pollantMonth.replace(month=pollantMonth.month + 1)
else:
pollantMonth = pollantMonth.replace(month=1, year=pollantMonth.year + 1)
pollantMonthString = str(pollantMonth.year) + "-" + number2ZeroBeforeDigit(pollantMonth.month)
pollantMonthData = ImecaDataDay.objects.filter(fecha__icontains=pollantMonthString).filter(idStation__pk=station.serialNumber).values_list('imecaO3', 'imecaNO', 'imecaNO2', 'imecaNOX', 'imecaSO2', 'imecaCO', 'imecaPM10', 'imecaPM25')
def imecaMean(monitoringStation, sizeData):
"""Determines the mean of each IMECA. If the IMECA does contains information. Pollant data is on this orden as folows: o3, no, no2, nox, so2, co, pm10, pm25"""
imecas = [0 for i in range(0, 8)]
for imeca in monitoringStation:
try:
imecas[0] += imeca[0]
except:
imecas[0] = None
try:
imecas[1] += imeca[1]
except:
imecas[1] = None
try:
imecas[2] += imeca[2]
except:
imecas[2] = None
try:
imecas[3] += imeca[3]
except:
imecas[3] = None
try:
imecas[4] += imeca[4]
except:
imecas[4] = None
try:
imecas[5] += imeca[5]
except:
imecas[5] = None
try:
imecas[6] += imeca[6]
except:
imecas[6] = None
try:
imecas[7] += imeca[7]
except:
imecas[7] = None
# Calculate the mean of the imeca and converit to float
for i in range(0, 8):
try:
imecas[i] /= sizeData
imecas[i] = imecas[i]
except:
imecas[i] = None
return imecas
def number2ZeroBeforeDigit(number):
"""Returns a the digit whit a zero before if the number it is below 10"""
if number < 10:
return "0" + str(number)
else:
return str(number)
| gpl-3.0 | 6,718,961,637,690,790,000 | 50.749686 | 507 | 0.605066 | false |
datapythonista/pandas | scripts/sync_flake8_versions.py | 3 | 5144 | """
Check that the flake8 (and pandas-dev-flaker) pins are the same in:
- environment.yml
- .pre-commit-config.yaml, in the flake8 hook
- .pre-commit-config.yaml, in the additional dependencies of the yesqa hook
The flake8 hook revision in .pre-commit-config.yaml is taken as the reference revision.
Usage: either
- ``python scripts/sync_flake8_versions.py``, or
- ``pre-commit run sync-flake8-versions --all-files``.
"""
from __future__ import annotations
from dataclasses import (
dataclass,
replace,
)
import sys
from typing import (
Any,
Mapping,
Sequence,
TypeVar,
)
import yaml
@dataclass
class Revision:
name: str
compare: str
version: str
@dataclass
class Revisions:
name: str
pre_commit: Revision | None = None
yesqa: Revision | None = None
environment: Revision | None = None
YamlMapping = Mapping[str, Any]
Repo = TypeVar("Repo", bound=YamlMapping)
COMPARE = ("<=", "==", ">=", "<", ">", "=")
def _get_repo_hook(repos: Sequence[Repo], hook_name: str) -> tuple[Repo, YamlMapping]:
for repo in repos:
for hook in repo["hooks"]:
if hook["id"] == hook_name:
return repo, hook
else: # pragma: no cover
raise RuntimeError(f"Repo with hook {hook_name} not found")
def _conda_to_pip_compat(dep):
if dep.compare == "=":
return replace(dep, compare="==")
else:
return dep
def _validate_additional_dependencies(
flake8_additional_dependencies,
yesqa_additional_dependencies,
environment_additional_dependencies,
) -> None:
for dep in flake8_additional_dependencies:
if dep not in yesqa_additional_dependencies:
sys.stdout.write(
f"Mismatch of '{dep.name}' version between 'flake8' "
"and 'yesqa' in '.pre-commit-config.yaml'\n"
)
sys.exit(1)
if dep not in environment_additional_dependencies:
sys.stdout.write(
f"Mismatch of '{dep.name}' version between 'enviroment.yml' "
"and additional dependencies of 'flake8' in '.pre-commit-config.yaml'\n"
)
sys.exit(1)
def _validate_revisions(revisions):
if revisions.environment != revisions.pre_commit:
sys.stdout.write(
f"{revisions.name} in 'environment.yml' does not "
"match in 'flake8' from 'pre-commit'\n"
)
sys.exit(1)
if revisions.yesqa != revisions.pre_commit:
sys.stdout.write(
f"{revisions.name} in 'yesqa' does not match "
"in 'flake8' from 'pre-commit'\n"
)
sys.exit(1)
def _process_dependencies(deps):
for dep in deps:
if isinstance(dep, str):
for compare in COMPARE:
if compare in dep:
pkg, rev = dep.split(compare, maxsplit=1)
yield _conda_to_pip_compat(Revision(pkg, compare, rev))
break
else:
yield from _process_dependencies(dep["pip"])
def get_revisions(
precommit_config: YamlMapping, environment: YamlMapping
) -> tuple[Revisions, Revisions]:
flake8_revisions = Revisions(name="flake8")
pandas_dev_flaker_revisions = Revisions(name="pandas-dev-flaker")
repos = precommit_config["repos"]
flake8_repo, flake8_hook = _get_repo_hook(repos, "flake8")
flake8_revisions.pre_commit = Revision("flake8", "==", flake8_repo["rev"])
flake8_additional_dependencies = []
for dep in _process_dependencies(flake8_hook.get("additional_dependencies", [])):
if dep.name == "pandas-dev-flaker":
pandas_dev_flaker_revisions.pre_commit = dep
else:
flake8_additional_dependencies.append(dep)
_, yesqa_hook = _get_repo_hook(repos, "yesqa")
yesqa_additional_dependencies = []
for dep in _process_dependencies(yesqa_hook.get("additional_dependencies", [])):
if dep.name == "flake8":
flake8_revisions.yesqa = dep
elif dep.name == "pandas-dev-flaker":
pandas_dev_flaker_revisions.yesqa = dep
else:
yesqa_additional_dependencies.append(dep)
environment_dependencies = environment["dependencies"]
environment_additional_dependencies = []
for dep in _process_dependencies(environment_dependencies):
if dep.name == "flake8":
flake8_revisions.environment = dep
elif dep.name == "pandas-dev-flaker":
pandas_dev_flaker_revisions.environment = dep
else:
environment_additional_dependencies.append(dep)
_validate_additional_dependencies(
flake8_additional_dependencies,
yesqa_additional_dependencies,
environment_additional_dependencies,
)
for revisions in flake8_revisions, pandas_dev_flaker_revisions:
_validate_revisions(revisions)
if __name__ == "__main__":
with open(".pre-commit-config.yaml") as fd:
precommit_config = yaml.safe_load(fd)
with open("environment.yml") as fd:
environment = yaml.safe_load(fd)
get_revisions(precommit_config, environment)
sys.exit(0)
| bsd-3-clause | -1,023,367,306,878,247,000 | 29.43787 | 88 | 0.625 | false |
atakan/Fractal-Trails | trail_analyze.py | 1 | 6139 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Mehmet Atakan Gürkan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (probably in a file named COPYING).
# If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import sys
import argparse
from trail_length_calc import trail_length_1d, trail_length_3d
parser = argparse.ArgumentParser(description='Analyzes a given trail')
parser.add_argument('-i', '--input-file',
metavar='<input file>',
type=argparse.FileType('r'), dest='infile',
default=None,
help='name(s) of the input file(s) (use \'-\' for stdin)')
# XXX accepting multiple file names is not implemented yet.
# (will use nargs?)
parser.add_argument('-t',
type=float, metavar='<float>', default=1.0,
help='duration of motion (default: 1.0)')
parser.add_argument('--first-column-time', dest='firstcol',
action='store_true',
help='first column in data file is time (overrides \'-t\'; default: time interval is determined by subdividing duration uniformly)')
parser.add_argument('--numpy', dest='inputformat', action='store_const',
const='numpy', default='undecided',
help='input in NumPy format (default: NumPy)')
parser.add_argument('--ascii', dest='inputformat', action='store_const',
const='numpy', default='undecided',
help='input in ASCII format (default: NumPy)')
parser.add_argument('--jump-check', dest='jumpcheck',
action='store_true',
help='check if there are discontinuities in the data, ie., points with same time but different coordinates')
args = parser.parse_args()
def br_pl(a1, a2, m1, m2, m3, b):
'''A function that returns a function that makes a broken powerlaw.
a1, a2 : x coordinates of the break points.
b : y intersect of the first power law (x<=a1)
m1, m2, m3: slopes for x<a1, a1<x<a2 and a2<x .'''
def ifelse(x, y, z) :
if x: return y
else: return z
k1 = a1*(m1-m2) + b
k2 = a2*(m2-m3) + k1
return lambda x: ifelse(x<a1, m1*x +b,
ifelse(x<a2, m2*x+k1,
m3*x+k2))
def set_ruler_lengths(rl_min, rl_max, tend) :
'''A function that creates an array of ruler lengths/sampling intervals.
All values returned are in the closed interval of [rl_min, rl_max]. They
are exact divisors of tend, which is the absolute maximum.'''
dummy_rl = [tend/1.0, tend/2.0, tend/3.0, tend/4.0,
tend/5.0, tend/6.0, tend/7.0, tend/8.0,
tend/10.0, tend/12.0, tend/14.0, tend/17.0,
tend/20.0, tend/24.0, tend/28.0, tend/33.0,
tend/40.0, tend/48.0, tend/56.0, tend/67.0]
for i in range(100) :
dummy_rl.append(dummy_rl[-4]/2.0)
rl = []
for drl in dummy_rl :
if drl <= rl_max and drl >= rl_min :
rl.append(drl)
rl.reverse()
return np.array(rl)
dn = np.loadtxt(args.infile)
if args.firstcol==True :
if np.size(np.shape(dn))==2 and np.shape(dn)[1]==4 :
tt = dn[:,0]
dd = dn[:,1:]
elif np.size(np.shape(dn))==2 and np.shape(dn)[1]==2 :
tt = dn[:,0]
dd = dn[:,1]
else :
print('input file is not 1D or 3D')
print(np.shape(dn))
sys.exit()
else :
tt = np.linspace(0, args.t, np.shape(dn)[0])
dd = dn
if args.jumpcheck == True :
same_ts = []
told = tt[0]
found_duplicate = False
duplicates = []
length_dup = 1
for i in range(1,len(tt)) :
tnow = tt[i]
if tnow == told :
found_duplicate = True
length_dup += 1
else :
if found_duplicate == True : # duplicate string ended
duplicates.append([i-length_dup, length_dup])
length_dup = 1
found_duplicate = False
told = tnow
if found_duplicate == True : # no more data
duplicates.append([i-length_dup+1, length_dup])
# print(tt)
# print(duplicates)
for i, k in duplicates :
if i == 0 : # special case 1, starting w/ dups
tprev = tt[0]
tnext = tt[i+k+1]
tdel = tnext-tprev
for j in range(k) :
tt[i+j] += tdel * 1e-4 * float(j)/k
elif i+k == len(tt) : # special case 2, ending w/ dups
tprev = tt[i-1]
tnext = tt[-1]
tdel = tnext-tprev
for j in range(k) :
tt[i+j] -= tdel * 1e-4 * float(k-j-1)/k
else :
tprev = tt[i-1]
tnext = tt[i+k+1]
for j in range(k) :
tdup = tt[i]
if j<k/2 :
tdel = tdup-tprev
else :
tdel = tnext-tdup
tt[i+j] += tdel * 1e-4 * float(j - k/2.0)/k
# print(tt)
# sys.exit(0)
tend = tt[-1]
period = 2.3e-4
rl_min = period/5.0
rl_max = tend/2.0
ruler_lengths = set_ruler_lengths(rl_min, rl_max, tend)
if np.size(np.shape(dd))==2 and np.shape(dd)[1]==3 :
# print('3d')
trail_lengths = trail_length_3d(ruler_lengths, tt, dd)
elif np.size(np.shape(dd))==1 :
# print('1d')
trail_lengths = trail_length_1d(ruler_lengths, tt, dd)
else :
print('input file is not 1D or 3D')
print(np.shape(dd))
sys.exit()
for i, rl in enumerate(ruler_lengths) :
print(rl, trail_lengths[i])
| gpl-3.0 | -4,184,026,960,048,453,000 | 34.894737 | 152 | 0.558977 | false |
fw1121/bcbio-nextgen | bcbio/pipeline/qcsummary.py | 1 | 48654 | """Quality control and summary metrics for next-gen alignments and analysis.
"""
import collections
import contextlib
import csv
import os
import shutil
import subprocess
import pandas as pd
import lxml.html
import yaml
from datetime import datetime
# allow graceful during upgrades
try:
import matplotlib
matplotlib.use('Agg', force=True)
import matplotlib.pyplot as plt
plt.ioff()
except ImportError:
plt = None
try:
from fadapa import Fadapa
except ImportError:
Fadapa = None
import pybedtools
import pysam
import toolz as tz
import toolz.dicttoolz as dtz
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.log import logger
from bcbio.pipeline import config_utils, run_info
from bcbio.install import _get_data_dir
from bcbio.provenance import do
import bcbio.rnaseq.qc
from bcbio.rnaseq.coverage import plot_gene_coverage
import bcbio.pipeline.datadict as dd
from bcbio.variation import bedutils
from bcbio import broad
from bcbio.variation import coverage_experimental as cov
from bcbio.variation.coverage import decorate_problem_regions
# ## High level functions to generate summary
def generate_parallel(samples, run_parallel):
"""Provide parallel preparation of summary information for alignment and variant calling.
"""
sum_samples = run_parallel("pipeline_summary", samples)
qsign_info = run_parallel("qsignature_summary", [sum_samples])
summary_file = write_project_summary(sum_samples, qsign_info)
samples = []
for data in sum_samples:
if "summary" not in data[0]:
data[0]["summary"] = {}
data[0]["summary"]["project"] = summary_file
if qsign_info:
data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"]
samples.append(data)
samples = _add_researcher_summary(samples, summary_file)
return samples
def pipeline_summary(data):
"""Provide summary information on processing sample.
"""
work_bam = data.get("work_bam")
if data["sam_ref"] is not None and work_bam and work_bam.endswith(".bam"):
logger.info("Generating summary files: %s" % str(data["name"]))
data["summary"] = _run_qc_tools(work_bam, data)
elif data["analysis"].lower().startswith("smallrna-seq"):
work_bam = data["clean_fastq"]
data["summary"] = _run_qc_tools(work_bam, data)
return [[data]]
def prep_pdf(qc_dir, config):
"""Create PDF from HTML summary outputs in QC directory.
Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1
Thanks to: https://www.biostars.org/p/16991/
Works around issues with CSS conversion on CentOS by adjusting CSS.
"""
html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html")
html_fixed = "%s-fixed%s" % os.path.splitext(html_file)
try:
topdf = config_utils.get_program("wkhtmltopdf", config)
except config_utils.CmdNotFound:
topdf = None
if topdf and utils.file_exists(html_file):
out_file = "%s.pdf" % os.path.splitext(html_file)[0]
if not utils.file_exists(out_file):
cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s"
% (html_file, html_fixed))
do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf")
cmd = [topdf, html_fixed, out_file]
do.run(cmd, "Convert QC HTML to PDF")
return out_file
def _run_qc_tools(bam_file, data):
"""Run a set of third party quality control tools, returning QC directory and metrics.
:param bam_file: alignments in bam format
:param data: dict with all configuration information
:returns: dict with output of different tools
"""
metrics = {}
to_run = []
if "fastqc" not in tz.get_in(("config", "algorithm", "tools_off"), data, []):
to_run.append(("fastqc", _run_fastqc))
if data["analysis"].lower().startswith("rna-seq"):
# to_run.append(("rnaseqc", bcbio.rnaseq.qc.sample_summary))
# to_run.append(("coverage", _run_gene_coverage))
# to_run.append(("complexity", _run_complexity))
to_run.append(("qualimap", _rnaseq_qualimap))
elif data["analysis"].lower().startswith("chip-seq"):
to_run.append(["bamtools", _run_bamtools_stats])
elif not data["analysis"].lower().startswith("smallrna-seq"):
to_run += [("bamtools", _run_bamtools_stats), ("gemini", _run_gemini_stats)]
if data["analysis"].lower().startswith(("standard", "variant2")):
to_run.append(["qsignature", _run_qsignature_generator])
if "qualimap" in tz.get_in(("config", "algorithm", "tools_on"), data, []):
to_run.append(("qualimap", _run_qualimap))
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
metrics = {}
for program_name, qc_fn in to_run:
cur_qc_dir = os.path.join(qc_dir, program_name)
cur_metrics = qc_fn(bam_file, data, cur_qc_dir)
metrics.update(cur_metrics)
# if (ratio < 0.60 and data['config']["algorithm"].get("kraken", None) and
# (data["analysis"].lower().startswith("rna-seq") or
# data["analysis"].lower().startswith("standard"))):
if data['config']["algorithm"].get("kraken", None):
ratio = bam.get_aligned_reads(bam_file, data)
cur_metrics = _run_kraken(data, ratio)
metrics.update(cur_metrics)
bam.remove("%s-downsample%s" % os.path.splitext(bam_file))
metrics["Name"] = data["name"][-1]
metrics["Quality format"] = utils.get_in(data,
("config", "algorithm",
"quality_format"),
"standard").lower()
return {"qc": qc_dir, "metrics": metrics}
# ## Generate project level QC summary for quickly assessing large projects
def write_project_summary(samples, qsign_info=None):
"""Write project summary information on the provided samples.
write out dirs, genome resources,
"""
work_dir = samples[0][0]["dirs"]["work"]
out_file = os.path.join(work_dir, "project-summary.yaml")
upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"])
if "dir" in samples[0][0]["upload"] else "")
test_run = samples[0][0].get("test_run", False)
date = str(datetime.now())
prev_samples = _other_pipeline_samples(out_file, samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump({"date": date}, out_handle,
default_flow_style=False, allow_unicode=False)
if test_run:
yaml.safe_dump({"test_run": True}, out_handle, default_flow_style=False,
allow_unicode=False)
if qsign_info:
qsign_out = utils.deepish_copy(qsign_info[0])
qsign_out.pop("out_dir", None)
yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False,
allow_unicode=False)
yaml.safe_dump({"upload": upload_dir}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle,
default_flow_style=False, allow_unicode=False)
return out_file
def _other_pipeline_samples(summary_file, cur_samples):
"""Retrieve samples produced previously by another pipeline in the summary output.
"""
cur_descriptions = set([s[0]["description"] for s in cur_samples])
out = []
if os.path.exists(summary_file):
with open(summary_file) as in_handle:
for s in yaml.load(in_handle).get("samples", []):
if s["description"] not in cur_descriptions:
out.append(s)
return out
def _save_fields(sample):
to_save = ["dirs", "genome_resources", "genome_build", "sam_ref", "metadata",
"description"]
saved = {k: sample[k] for k in to_save if k in sample}
if "summary" in sample:
saved["summary"] = {"metrics": sample["summary"]["metrics"]}
# check if disambiguation was run
if "disambiguate" in sample:
if utils.file_exists(sample["disambiguate"]["summary"]):
disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"])
saved["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0]
disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0]
if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple))
else sample["config"]["algorithm"]["disambiguate"])
saved["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1]
saved["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2]
return saved
def _parse_disambiguate(disambiguatestatsfilename):
"""Parse disambiguation stats from given file.
"""
disambig_stats = [0, 0, 0]
with open(disambiguatestatsfilename, "r") as in_handle:
for i, line in enumerate(in_handle):
fields = line.strip().split("\t")
if i == 0:
assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs']
else:
disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])]
return disambig_stats
# ## Generate researcher specific summaries
def _add_researcher_summary(samples, summary_yaml):
"""Generate summary files per researcher if organized via a LIMS.
"""
by_researcher = collections.defaultdict(list)
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
by_researcher[researcher].append(data["description"])
out_by_researcher = {}
for researcher, descrs in by_researcher.items():
out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher,
set(descrs), samples[0][0])
out = []
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
data["summary"]["researcher"] = out_by_researcher[researcher]
out.append([data])
return out
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data):
"""Generate a CSV file with summary information for a researcher on this project.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")),
"%s-summary.tsv" % run_info.clean_name(researcher))
metrics = ["Total reads", "Mapped reads", "Mapped reads pct", "Duplicates", "Duplicates pct"]
with open(summary_yaml) as in_handle:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(["Name"] + metrics)
for sample in yaml.safe_load(in_handle)["samples"]:
if sample["description"] in descrs:
row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "")
for x in metrics]
writer.writerow(row)
return out_file
# ## Run and parse read information from FastQC
class FastQCParser:
def __init__(self, base_dir, sample=None):
self._dir = base_dir
self.sample = sample
def get_fastqc_summary(self):
ignore = set(["Total Sequences", "Filtered Sequences",
"Filename", "File type", "Encoding"])
stats = {}
for stat_line in self._fastqc_data_section("Basic Statistics")[1:]:
k, v = stat_line.split("\t")[:2]
if k not in ignore:
stats[k] = v
return stats
def _fastqc_data_section(self, section_name):
out = []
in_section = False
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file):
with open(data_file) as in_handle:
for line in in_handle:
if line.startswith(">>%s" % section_name):
in_section = True
elif in_section:
if line.startswith(">>END"):
break
out.append(line.rstrip("\r\n"))
return out
def save_sections_into_file(self):
data_file = os.path.join(self._dir, "fastqc_data.txt")
if os.path.exists(data_file) and Fadapa:
parser = Fadapa(data_file)
module = [m[1] for m in parser.summary()][2:9]
for m in module:
out_file = os.path.join(self._dir, m.replace(" ", "_") + ".tsv")
dt = self._get_module(parser, m)
dt.to_csv(out_file, sep="\t", index=False)
def _get_module(self, parser, module):
"""
Get module using fadapa package
"""
dt = []
lines = parser.clean_data(module)
header = lines[0]
for data in lines[1:]:
if data[0].startswith("#"): #some modules have two headers
header = data
continue
if data[0].find("-") > -1: # expand positions 1-3 to 1, 2, 3
f, s = map(int, data[0].split("-"))
for pos in range(f, s):
dt.append([str(pos)] + data[1:])
else:
dt.append(data)
dt = pd.DataFrame(dt)
dt.columns = [h.replace(" ", "_") for h in header]
dt['sample'] = self.sample
return dt
def _run_gene_coverage(bam_file, data, out_dir):
out_file = os.path.join(out_dir, "gene_coverage.pdf")
ref_file = utils.get_in(data, ("genome_resources", "rnaseq", "transcripts"))
count_file = data["count_file"]
if utils.file_exists(out_file):
return out_file
with file_transaction(data, out_file) as tx_out_file:
plot_gene_coverage(bam_file, ref_file, count_file, tx_out_file)
return {"gene_coverage": out_file}
def _run_kraken(data, ratio):
"""Run kraken, generating report in specified directory and parsing metrics.
Using only first paired reads.
"""
logger.info("Number of aligned reads < than 0.60 in %s: %s" % (str(data["name"]), ratio))
logger.info("Running kraken to determine contaminant: %s" % str(data["name"]))
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
kraken_out = os.path.join(qc_dir, "kraken")
out = out_stats = None
db = data['config']["algorithm"]["kraken"]
kraken_cmd = config_utils.get_program("kraken", data["config"])
if db == "minikraken":
db = os.path.join(_get_data_dir(), "genomes", "kraken", "minikraken")
else:
if not os.path.exists(db):
logger.info("kraken: no database found %s, skipping" % db)
return {"kraken_report": "null"}
if not os.path.exists(os.path.join(kraken_out, "kraken_out")):
work_dir = os.path.dirname(kraken_out)
utils.safe_makedir(work_dir)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
fn_file = data["files"][0]
if fn_file.endswith("bam"):
logger.info("kraken: need fasta files as input")
return {"kraken_report": "null"}
with tx_tmpdir(data, work_dir) as tx_tmp_dir:
with utils.chdir(tx_tmp_dir):
out = os.path.join(tx_tmp_dir, "kraken_out")
out_stats = os.path.join(tx_tmp_dir, "kraken_stats")
cat = "zcat" if fn_file.endswith(".gz") else "cat"
cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick "
"--preload --min-hits 2 "
"--threads {num_cores} "
"--out {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals())
do.run(cl, "kraken: %s" % data["name"][-1])
if os.path.exists(kraken_out):
shutil.rmtree(kraken_out)
shutil.move(tx_tmp_dir, kraken_out)
metrics = _parse_kraken_output(kraken_out, db, data)
return metrics
def _parse_kraken_output(out_dir, db, data):
"""Parse kraken stat info comming from stderr,
generating report with kraken-report
"""
in_file = os.path.join(out_dir, "kraken_out")
stat_file = os.path.join(out_dir, "kraken_stats")
out_file = os.path.join(out_dir, "kraken_summary")
kraken_cmd = config_utils.get_program("kraken-report", data["config"])
classify = unclassify = None
with open(stat_file, 'r') as handle:
for line in handle:
if line.find(" classified") > -1:
classify = line[line.find("(") + 1:line.find(")")]
if line.find(" unclassified") > -1:
unclassify = line[line.find("(") + 1:line.find(")")]
if os.path.getsize(in_file) > 0 and not os.path.exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals())
do.run(cl, "kraken report: %s" % data["name"][-1])
kraken = {"kraken_clas": classify, "kraken_unclas": unclassify}
kraken_sum = _summarize_kraken(out_file)
kraken.update(kraken_sum)
return kraken
def _summarize_kraken(fn):
"""get the value at species level"""
kraken = {}
list_sp, list_value = [], []
with open(fn) as handle:
for line in handle:
cols = line.strip().split("\t")
sp = cols[5].strip()
if len(sp.split(" ")) > 1 and not sp.startswith("cellular"):
list_sp.append(sp)
list_value.append(cols[0])
kraken = {"kraken_sp": list_sp, "kraken_value": list_value}
return kraken
def _run_fastqc(bam_file, data, fastqc_out):
"""Run fastqc, generating report in specified directory and parsing metrics.
Downsamples to 10 million reads to avoid excessive processing times with large
files, unless we're running a Standard/smallRNA-seq/QC pipeline.
Handles fastqc 0.11+, which use a single HTML file and older versions that use
a directory of files + images. The goal is to eventually move to only 0.11+
"""
sentry_file = os.path.join(fastqc_out, "fastqc_report.html")
if not os.path.exists(sentry_file):
work_dir = os.path.dirname(fastqc_out)
utils.safe_makedir(work_dir)
ds_bam = (bam.downsample(bam_file, data, 1e7)
if data.get("analysis", "").lower() not in ["standard", "smallrna-seq"]
else None)
bam_file = ds_bam if ds_bam else bam_file
frmt = "bam" if bam_file.endswith("bam") else "fastq"
fastqc_name = utils.splitext_plus(os.path.basename(bam_file))[0]
num_cores = data["config"]["algorithm"].get("num_cores", 1)
with tx_tmpdir(data, work_dir) as tx_tmp_dir:
with utils.chdir(tx_tmp_dir):
cl = [config_utils.get_program("fastqc", data["config"]),
"-t", str(num_cores), "--extract", "-o", tx_tmp_dir, "-f", frmt, bam_file]
do.run(cl, "FastQC: %s" % data["name"][-1])
tx_fastqc_out = os.path.join(tx_tmp_dir, "%s_fastqc" % fastqc_name)
tx_combo_file = os.path.join(tx_tmp_dir, "%s_fastqc.html" % fastqc_name)
if os.path.exists("%s.zip" % tx_fastqc_out):
os.remove("%s.zip" % tx_fastqc_out)
if not os.path.exists(sentry_file) and os.path.exists(tx_combo_file):
utils.safe_makedir(fastqc_out)
shutil.move(os.path.join(tx_fastqc_out, "fastqc_data.txt"), fastqc_out)
shutil.move(tx_combo_file, sentry_file)
elif not os.path.exists(sentry_file):
if os.path.exists(fastqc_out):
shutil.rmtree(fastqc_out)
shutil.move(tx_fastqc_out, fastqc_out)
parser = FastQCParser(fastqc_out, data["name"][-1])
stats = parser.get_fastqc_summary()
parser.save_sections_into_file()
return stats
def _run_complexity(bam_file, data, out_dir):
try:
import pandas as pd
import statsmodels.formula.api as sm
except ImportError:
return {"Unique Starts Per Read": "NA"}
SAMPLE_SIZE = 1000000
base, _ = os.path.splitext(os.path.basename(bam_file))
utils.safe_makedir(out_dir)
out_file = os.path.join(out_dir, base + ".pdf")
df = bcbio.rnaseq.qc.starts_by_depth(bam_file, data["config"], SAMPLE_SIZE)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tmp_out_file:
df.plot(x='reads', y='starts', title=bam_file + " complexity")
fig = plt.gcf()
fig.savefig(tmp_out_file)
print "file saved as", out_file
print "out_dir is", out_dir
return bcbio.rnaseq.qc.estimate_library_complexity(df)
# ## Qualimap
def _parse_num_pct(k, v):
num, pct = v.split(" / ")
return {k: num.replace(",", "").strip(), "%s pct" % k: pct.strip()}
def _parse_qualimap_globals(table):
"""Retrieve metrics of interest from globals table.
"""
out = {}
want = {"Mapped reads": _parse_num_pct,
"Duplication rate": lambda k, v: {k: v}}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col in want:
out.update(want[col](col, val))
return out
def _parse_qualimap_globals_inregion(table):
"""Retrieve metrics from the global targeted region table.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Mapped reads":
out.update(_parse_num_pct("%s (in regions)" % col, val))
return out
def _parse_qualimap_coverage(table):
"""Parse summary qualimap coverage metrics.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Mean":
out["Coverage (Mean)"] = val
return out
def _parse_qualimap_insertsize(table):
"""Parse insert size metrics.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
if col == "Median":
out["Insert size (Median)"] = val
return out
def _parse_qualimap_metrics(report_file):
"""Extract useful metrics from the qualimap HTML report file.
"""
out = {}
parsers = {"Globals": _parse_qualimap_globals,
"Globals (inside of regions)": _parse_qualimap_globals_inregion,
"Coverage": _parse_qualimap_coverage,
"Coverage (inside of regions)": _parse_qualimap_coverage,
"Insert size": _parse_qualimap_insertsize,
"Insert size (inside of regions)": _parse_qualimap_insertsize}
root = lxml.html.parse(report_file).getroot()
for table in root.xpath("//div[@class='table-summary']"):
header = table.xpath("h3")[0].text
if header in parsers:
out.update(parsers[header](table))
new_names = []
for metric in out:
new_names.append(metric + "_qualimap_1e7reads_est")
out = dict(zip(new_names, out.values()))
return out
def _bed_to_bed6(orig_file, out_dir):
"""Convert bed to required bed6 inputs.
"""
bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file)))
if not utils.file_exists(bed6_file):
with open(bed6_file, "w") as out_handle:
for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)):
region = [x for x in list(region) if x]
fillers = [str(i), "1.0", "+"]
full = region + fillers[:6 - len(region)]
out_handle.write("\t".join(full) + "\n")
return bed6_file
def _run_qualimap(bam_file, data, out_dir):
"""Run qualimap to assess alignment quality metrics.
"""
report_file = os.path.join(out_dir, "qualimapReport.html")
if not os.path.exists(report_file):
ds_bam = bam.downsample(bam_file, data, 1e7)
bam_file = ds_bam if ds_bam else bam_file
utils.safe_makedir(out_dir)
num_cores = data["config"]["algorithm"].get("num_cores", 1)
qualimap = config_utils.get_program("qualimap", data["config"])
resources = config_utils.get_resources("qualimap", data["config"])
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
num_cores)
cmd = ("unset DISPLAY && {qualimap} bamqc -bam {bam_file} -outdir {out_dir} "
"-nt {num_cores} --java-mem-size={max_mem}")
species = data["genome_resources"]["aliases"].get("ensembl", "").upper()
if species in ["HUMAN", "MOUSE"]:
cmd += " -gd {species}"
regions = bedutils.merge_overlaps(dd.get_variant_regions(data), data)
if regions:
bed6_regions = _bed_to_bed6(regions, out_dir)
cmd += " -gff {bed6_regions}"
do.run(cmd.format(**locals()), "Qualimap: %s" % data["name"][-1])
return _parse_qualimap_metrics(report_file)
# ## RNAseq Qualimap
def _parse_metrics(metrics):
# skipped metrics can sometimes be in unicode, replace unicode with NA if it exists
metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics)
missing = set(["Genes Detected", "Transcripts Detected",
"Mean Per Base Cov."])
correct = set(["Intergenic pct", "Intronic pct", "Exonic pct"])
to_change = dict({"5'-3' bias": 1, "Intergenic pct": "Intergenic Rate",
"Intronic pct": "Intronic Rate", "Exonic pct": "Exonic Rate",
"Not aligned": 0, 'Aligned to genes': 0, 'Non-unique alignment': 0,
"No feature assigned": 0, "Duplication Rate of Mapped": 1,
"Fragment Length Mean": 1,
"rRNA": 1, "Ambiguou alignment": 0})
total = ["Not aligned", "Aligned to genes", "No feature assigned"]
out = {}
total_reads = sum([int(metrics[name]) for name in total])
out['rRNA rate'] = 1.0 * int(metrics["rRNA"]) / total_reads
out['Mapped'] = sum([int(metrics[name]) for name in total[1:]])
out['Mapping Rate'] = 1.0 * int(out['Mapped']) / total_reads
[out.update({name: 0}) for name in missing]
[metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in correct]
for name in to_change:
if not to_change[name]:
continue
if to_change[name] == 1:
out.update({name: float(metrics[name])})
else:
out.update({to_change[name]: float(metrics[name])})
return out
def _detect_duplicates(bam_file, out_dir, config):
"""
Detect duplicates metrics with Picard
"""
out_file = os.path.join(out_dir, "dup_metrics")
if not utils.file_exists(out_file):
broad_runner = broad.runner_from_config(config)
(dup_align_bam, metrics_file) = broad_runner.run_fn("picard_mark_duplicates", bam_file, remove_dups=True)
shutil.move(metrics_file, out_file)
metrics = []
with open(out_file) as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if line and not line[0].startswith("#"):
metrics.append(line)
metrics = dict(zip(metrics[0], metrics[1]))
return {"Duplication Rate of Mapped": metrics["PERCENT_DUPLICATION"]}
def _transform_browser_coor(rRNA_interval, rRNA_coor):
"""
transform interval format to browser coord: chr:start-end
"""
with open(rRNA_coor, 'w') as out_handle:
with open(rRNA_interval, 'r') as in_handle:
for line in in_handle:
c, bio, source, s, e = line.split("\t")[:5]
if bio.startswith("rRNA"):
out_handle.write(("{0}:{1}-{2}\n").format(c, s, e))
def _detect_rRNA(config, bam_file, rRNA_file, ref_file, out_dir, single_end):
"""
Calculate rRNA with gatk-framework
"""
if not utils.file_exists(rRNA_file):
return {'rRNA': 0}
out_file = os.path.join(out_dir, "rRNA.counts")
if not utils.file_exists(out_file):
out_file = _count_rRNA_reads(bam_file, out_file, ref_file, rRNA_file, single_end, config)
with open(out_file) as in_handle:
for line in in_handle:
if line.find("CountReads counted") > -1:
rRNA_reads = line.split()[6]
break
return {'rRNA': rRNA_reads}
def _count_rRNA_reads(in_bam, out_file, ref_file, rRNA_interval, single_end, config):
"""Use GATK counter to count reads in rRNA genes
"""
bam.index(in_bam, config)
if not utils.file_exists(out_file):
with file_transaction(out_file) as tx_out_file:
rRNA_coor = os.path.join(os.path.dirname(out_file), "rRNA.list")
_transform_browser_coor(rRNA_interval, rRNA_coor)
params = ["-T", "CountReads",
"-R", ref_file,
"-I", in_bam,
"-log", tx_out_file,
"-L", rRNA_coor,
"--filter_reads_with_N_cigar",
"-allowPotentiallyMisencodedQuals"]
jvm_opts = broad.get_gatk_framework_opts(config)
cmd = [config_utils.get_program("gatk-framework", config)] + jvm_opts + params
do.run(cmd, "counts rRNA for %s" % in_bam)
return out_file
def _parse_qualimap_rnaseq(table):
"""
Retrieve metrics of interest from globals table.
"""
out = {}
for row in table.xpath("table/tr"):
col, val = [x.text for x in row.xpath("td")]
col = col.replace(":", "").strip()
val = val.replace(",", "")
m = {col: val}
if val.find("/") > -1:
m = _parse_num_pct(col, val.replace("%", ""))
out.update(m)
return out
def _parse_rnaseq_qualimap_metrics(report_file):
"""Extract useful metrics from the qualimap HTML report file.
"""
out = {}
parsers = ["Reads alignment", "Reads genomic origin", "Transcript coverage profile"]
root = lxml.html.parse(report_file).getroot()
for table in root.xpath("//div[@class='table-summary']"):
header = table.xpath("h3")[0].text
if header in parsers:
out.update(_parse_qualimap_rnaseq(table))
return out
def _rnaseq_qualimap(bam_file, data, out_dir):
"""
Run qualimap for a rnaseq bam file and parse results
"""
report_file = os.path.join(out_dir, "qualimapReport.html")
config = data["config"]
gtf_file = dd.get_gtf_file(data)
ref_file = dd.get_ref_file(data)
single_end = not bam.is_paired(bam_file)
if not utils.file_exists(report_file):
utils.safe_makedir(out_dir)
bam.index(bam_file, config)
cmd = _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file, single_end)
do.run(cmd, "Qualimap for {}".format(data["name"][-1]))
metrics = _parse_rnaseq_qualimap_metrics(report_file)
metrics.update(_detect_duplicates(bam_file, out_dir, config))
metrics.update(_detect_rRNA(config, bam_file, gtf_file, ref_file, out_dir, single_end))
metrics.update({"Fragment Length Mean": bam.estimate_fragment_size(bam_file)})
metrics = _parse_metrics(metrics)
return metrics
def _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file=None, single_end=None):
"""
Create command lines for qualimap
"""
qualimap = config_utils.get_program("qualimap", config)
resources = config_utils.get_resources("qualimap", config)
num_cores = resources.get("cores", 1)
max_mem = config_utils.adjust_memory(resources.get("memory", "4G"),
num_cores)
cmd = ("unset DISPLAY && {qualimap} rnaseq -outdir {out_dir} -a proportional -bam {bam_file} "
"-gtf {gtf_file} --java-mem-size={max_mem}").format(**locals())
return cmd
# ## Lightweight QC approaches
def _parse_bamtools_stats(stats_file):
out = {}
want = set(["Total reads", "Mapped reads", "Duplicates", "Median insert size"])
with open(stats_file) as in_handle:
for line in in_handle:
parts = line.split(":")
if len(parts) == 2:
metric, stat_str = parts
metric = metric.split("(")[0].strip()
if metric in want:
stat_parts = stat_str.split()
if len(stat_parts) == 2:
stat, pct = stat_parts
pct = pct.replace("(", "").replace(")", "")
else:
stat = stat_parts[0]
pct = None
out[metric] = stat
if pct:
out["%s pct" % metric] = pct
return out
def _parse_offtargets(bam_file):
"""
Add to metrics off-targets reads if it exitst
"""
off_target = bam_file.replace(".bam", "-offtarget-stats.yaml")
if os.path.exists(off_target):
res = yaml.load(open(off_target))
return res
return {}
def _run_bamtools_stats(bam_file, data, out_dir):
"""Run bamtools stats with reports on mapped reads, duplicates and insert sizes.
"""
stats_file = os.path.join(out_dir, "bamtools_stats.txt")
if not utils.file_exists(stats_file):
utils.safe_makedir(out_dir)
bamtools = config_utils.get_program("bamtools", data["config"])
with file_transaction(data, stats_file) as tx_out_file:
cmd = "{bamtools} stats -in {bam_file}"
if bam.is_paired(bam_file):
cmd += " -insert"
cmd += " > {tx_out_file}"
do.run(cmd.format(**locals()), "bamtools stats", data)
out = _parse_bamtools_stats(stats_file)
out.update(_parse_offtargets(bam_file))
return out
## Variant statistics from gemini
def _run_gemini_stats(bam_file, data, out_dir):
"""Retrieve high level variant statistics from Gemini.
"""
out = {}
gemini_dbs = [d for d in
[tz.get_in(["population", "db"], x) for x in data.get("variants", [])] if d]
if len(gemini_dbs) > 0:
gemini_db = gemini_dbs[0]
gemini_stat_file = "%s-stats.yaml" % os.path.splitext(gemini_db)[0]
if not utils.file_uptodate(gemini_stat_file, gemini_db):
gemini = config_utils.get_program("gemini", data["config"])
tstv = subprocess.check_output([gemini, "stats", "--tstv", gemini_db])
gt_counts = subprocess.check_output([gemini, "stats", "--gts-by-sample", gemini_db])
dbsnp_count = subprocess.check_output([gemini, "query", gemini_db, "-q",
"SELECT count(*) FROM variants WHERE in_dbsnp==1"])
out["Transition/Transversion"] = tstv.split("\n")[1].split()[-1]
for line in gt_counts.split("\n"):
parts = line.rstrip().split()
if len(parts) > 0 and parts[0] != "sample":
name, hom_ref, het, hom_var, _, total = parts
out[name] = {}
out[name]["Variations (heterozygous)"] = int(het)
out[name]["Variations (homozygous)"] = int(hom_var)
# same total variations for all samples, keep that top level as well.
out["Variations (total)"] = int(total)
out["Variations (in dbSNP)"] = int(dbsnp_count.strip())
if out.get("Variations (total)") > 0:
out["Variations (in dbSNP) pct"] = "%.1f%%" % (out["Variations (in dbSNP)"] /
float(out["Variations (total)"]) * 100.0)
with open(gemini_stat_file, "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
else:
with open(gemini_stat_file) as in_handle:
out = yaml.safe_load(in_handle)
res = {}
for k, v in out.iteritems():
if not isinstance(v, dict):
res.update({k: v})
if k == data['name'][-1]:
res.update(v)
return res
## qsignature
def _run_qsignature_generator(bam_file, data, out_dir):
""" Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary
:param bam_file: (str) path of the bam_file
:param data: (list) list containing the all the dictionary
for this sample
:param out_dir: (str) path of the output
:returns: (dict) dict with the normalize vcf file
"""
position = dd.get_qsig_file(data)
mixup_check = dd.get_mixup_check(data)
if mixup_check and mixup_check.startswith("qsignature"):
if not position:
logger.info("There is no qsignature for this species: %s"
% tz.get_in(['genome_build'], data))
return {}
jvm_opts = "-Xms750m -Xmx2g"
limit_reads = 20000000
if mixup_check == "qsignature_full":
slice_bam = bam_file
jvm_opts = "-Xms750m -Xmx8g"
limit_reads = 100000000
else:
slice_bam = _slice_chr22(bam_file, data)
qsig = config_utils.get_program("qsignature", data["config"])
if not qsig:
return {}
utils.safe_makedir(out_dir)
out_name = os.path.basename(slice_bam).replace("bam", "qsig.vcf")
out_file = os.path.join(out_dir, out_name)
log_file = os.path.join(out_dir, "qsig.log")
cores = dd.get_cores(data)
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureGenerator "
"--noOfThreads {cores} "
"-log {log_file} -i {position} "
"-i {down_file} ")
if not os.path.exists(out_file):
down_file = bam.downsample(slice_bam, data, limit_reads)
if not down_file:
down_file = slice_bam
file_qsign_out = "{0}.qsig.vcf".format(down_file)
do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % data["name"][-1])
if os.path.exists(file_qsign_out):
with file_transaction(data, out_file) as file_txt_out:
shutil.move(file_qsign_out, file_txt_out)
else:
raise IOError("File doesn't exist %s" % file_qsign_out)
return {'qsig_vcf': out_file}
return {}
def qsignature_summary(*samples):
"""Run SignatureCompareRelatedSimple module from qsignature tool.
Creates a matrix of pairwise comparison among samples. The
function will not run if the output exists
:param samples: list with only one element containing all samples information
:returns: (dict) with the path of the output to be joined to summary
"""
warnings, similar = [], []
qsig = config_utils.get_program("qsignature", samples[0][0]["config"])
if not qsig:
return [[]]
jvm_opts = "-Xms750m -Xmx8g"
work_dir = samples[0][0]["dirs"]["work"]
count = 0
for data in samples:
data = data[0]
vcf = tz.get_in(["summary", "metrics", "qsig_vcf"], data)
if vcf:
count += 1
vcf_name = data["name"][-1] + ".qsig.vcf"
out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature"))
if not os.path.lexists(os.path.join(out_dir, vcf_name)):
os.symlink(vcf, os.path.join(out_dir, vcf_name))
if count > 0:
qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature"))
out_file = os.path.join(qc_out_dir, "qsignature.xml")
out_ma_file = os.path.join(qc_out_dir, "qsignature.ma")
out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings")
log = os.path.join(work_dir, "qsignature", "qsig-summary.log")
if not os.path.exists(out_file):
with file_transaction(samples[0][0], out_file) as file_txt_out:
base_cmd = ("{qsig} {jvm_opts} "
"org.qcmg.sig.SignatureCompareRelatedSimple "
"-log {log} -dir {out_dir} "
"-o {file_txt_out} ")
do.run(base_cmd.format(**locals()), "qsignature score calculation")
error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file,
out_warn_file, samples[0][0])
return [{'total samples': count,
'similar samples pairs': len(similar),
'warnings samples pairs': len(warnings),
'error samples': list(error),
'out_dir': qc_out_dir}]
else:
return []
def _parse_qsignature_output(in_file, out_file, warning_file, data):
""" Parse xml file produced by qsignature
:param in_file: (str) with the path to the xml file
:param out_file: (str) with the path to output file
:param warning_file: (str) with the path to warning file
:returns: (list) with samples that could be duplicated
"""
name = {}
error, warnings, similar = set(), set(), set()
same, replicate, related = 0, 0.1, 0.18
mixup_check = dd.get_mixup_check(data)
if mixup_check == "qsignature_full":
same, replicate, related = 0, 0.01, 0.061
with open(in_file, 'r') as in_handle:
with file_transaction(data, out_file) as out_tx_file:
with file_transaction(data, warning_file) as warn_tx_file:
with open(out_tx_file, 'w') as out_handle:
with open(warn_tx_file, 'w') as warn_handle:
et = lxml.etree.parse(in_handle)
for i in list(et.iter('file')):
name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "")
for i in list(et.iter('comparison')):
msg = None
pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]])
out_handle.write("%s\t%s\t%s\n" %
(name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score']))
if float(i.attrib['score']) == same:
msg = 'qsignature ERROR: read same samples:%s\n'
error.add(pair)
elif float(i.attrib['score']) < replicate:
msg = 'qsignature WARNING: read similar/replicate samples:%s\n'
warnings.add(pair)
elif float(i.attrib['score']) < related:
msg = 'qsignature NOTE: read relative samples:%s\n'
similar.add(pair)
if msg:
logger.info(msg % pair)
warn_handle.write(msg % pair)
return error, warnings, similar
def _slice_chr22(in_bam, data):
"""
return only one BAM file with only chromosome 22
"""
sambamba = config_utils.get_program("sambamba", data["config"])
out_file = "%s-chr%s" % os.path.splitext(in_bam)
if not utils.file_exists(out_file):
bam.index(in_bam, data['config'])
with contextlib.closing(pysam.Samfile(in_bam, "rb")) as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
chromosome = "22"
if "chr22" in bam_contigs:
chromosome = "chr22"
with file_transaction(data, out_file) as tx_out_file:
cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals())
out = subprocess.check_output(cmd, shell=True)
return out_file
def report_summary(samples, run_parallel):
"""
Run coverage report for exome data
with bcbiocov package
"""
work_dir = dd.get_work_dir(samples[0][0])
yaml_file = os.path.join(work_dir, "project-summary.yaml")
if not dd.get_report(samples[0][0]):
return samples
parent_dir = utils.safe_makedir(os.path.join(work_dir,"report"))
qsignature_fn = os.path.join(work_dir, "qc", "qsignature", "qsignature.ma")
with utils.chdir(parent_dir):
logger.info("copy qsignature")
if qsignature_fn:
if utils.file_exists(qsignature_fn) and not utils.file_exists("qsignature.ma"):
shutil.copy(qsignature_fn, "qsignature.ma")
logger.info("summarize metrics")
_merge_metrics(yaml.load(open(yaml_file)))
out_dir = utils.safe_makedir("fastqc")
logger.info("summarize fastqc")
with utils.chdir(out_dir):
_merge_fastq(samples)
out_dir = utils.safe_makedir("coverage")
out_dir = utils.safe_makedir("variants")
samples = run_parallel("coverage_report", samples)
try:
import bcbreport.prepare as bcbreport
bcbreport.report(parent_dir)
except:
loger.info("skipping report. No bcbreport installed.")
pass
return samples
def coverage_report(data):
"""
Run heavy coverage and variants process in parallel
"""
data = cov.coverage(data)
data = cov.variants(data)
problem_regions = dd.get_problem_region_dir(data)
name = dd.get_sample_name(data)
coverage = data['coverage']
annotated = None
if problem_regions and coverage:
annotated = decorate_problem_regions(coverage, problem_regions)
data['coverage'] = {'all': coverage, 'problems': annotated}
return [[data]]
def _merge_metrics(yaml_data):
"""
parse project.yaml file to get metrics for each bam
"""
project = yaml_data
out_file = os.path.join("metrics", "metrics.tsv")
dt_together = []
with file_transaction(out_file) as out_tx:
for s in project['samples']:
m = s['summary']['metrics']
for me in m:
if isinstance(m[me], list):
m[me] = ":".join(m[me])
dt = pd.DataFrame(m, index=['1'])
# dt = pd.DataFrame.from_dict(m)
dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns]
dt['sample'] = s['description']
dt_together.append(dt)
dt_together = utils.rbind(dt_together)
dt_together.to_csv(out_tx, index=False, sep="\t")
def _get_module(fastq_list, module, wide=True):
dt_together = []
for sample in fastq_list:
dt = []
itern = fastq_list[sample].clean_data(module)
header = itern[0]
total = fastq_list[sample].clean_data("Basic Statistics")[4][1]
for data in itern[1:]:
if data[0].startswith("#"):
header = data
continue
if wide:
if data[0].find("-") > -1:
f, s = map(int, data[0].split("-"))
for pos in range(f, s):
dt.append([str(pos)] + data[1:])
else:
dt.append(data)
dt = pd.DataFrame(dt)
dt.columns = [h.replace(" ", "_") for h in header]
dt['sample'] = sample
dt['total'] = total
dt_together.append(dt)
dt_together = utils.rbind(dt_together)
return dt_together
def _merge_fastq(data):
"""
merge all fastqc samples into one by module
"""
fastqc_list = {}
for sample in data:
name = dd.get_sample_name(sample[0])
fn = os.path.join(dd.get_work_dir(sample[0]), "qc", dd.get_sample_name(sample[0]), "fastqc", "fastqc_data.txt")
fastqc_list[name] = Fadapa(fn)
module = [m[1] for m in fastqc_list[name].summary()][2:9]
for m in module:
out_file = os.path.join(m.replace(" ", "_") + ".tsv")
dt = _get_module(fastqc_list, m)
dt.to_csv(out_file, sep="\t", index=False)
return [data]
| mit | 7,761,196,467,507,781,000 | 42.171251 | 119 | 0.571731 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/types/topic_view_service.py | 1 | 1202 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={"GetTopicViewRequest",},
)
class GetTopicViewRequest(proto.Message):
r"""Request message for
[TopicViewService.GetTopicView][google.ads.googleads.v8.services.TopicViewService.GetTopicView].
Attributes:
resource_name (str):
Required. The resource name of the topic view
to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 5,047,127,096,734,223,000 | 29.820513 | 100 | 0.710483 | false |
bsmedberg/socorro | socorro/external/crash_data_base.py | 1 | 3506 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from socorro.external import (
MissingArgumentError,
ResourceNotFound,
ResourceUnavailable,
ServiceUnavailable
)
from socorro.external.crashstorage_base import CrashIDNotFound
from socorro.lib import external_common
class CrashDataBase(object):
"""
Common implementation of the crash data service for all crashstorage
schemes. Any external service that wants to implement a CrashData service
may subclass from this service. All they'd have to do is implement the
'get_storage' method to return an appropriate instance of their own
crashstorage class.
"""
def __init__(self, *args, **kwargs):
super(CrashDataBase, self).__init__()
self.config = kwargs['config']
self.all_services = kwargs['all_services']
def get_storage(self):
"""derived classes must implement this method to return an instance
of their own crashstorage class"""
raise NotImplementedError
def get(self, **kwargs):
"""Return JSON data of a crash report, given its uuid. """
filters = [
('uuid', None, 'str'),
('datatype', None, 'str')
]
params = external_common.parse_arguments(filters, kwargs)
if not params.uuid:
raise MissingArgumentError('uuid')
if not params.datatype:
raise MissingArgumentError('datatype')
# get a generic crashstorage instance from whatever external resource
# is implementing this service.
store = self.get_storage()
datatype_method_mapping = {
'raw': 'get_raw_dump',
'meta': 'get_raw_crash',
'processed': 'get_processed',
'unredacted': 'get_unredacted_processed',
}
get = store.__getattribute__(datatype_method_mapping[params.datatype])
try:
if params.datatype == 'raw':
return (get(params.uuid), 'application/octet-stream')
else:
return get(params.uuid)
except CrashIDNotFound:
if params.datatype in ('processed', 'unredacted'):
# try to fetch a raw crash just to ensure that the raw crash
# exists. If this line fails, there's no reason to actually
# submit the priority job.
try:
store.get_raw_crash(params.uuid)
except CrashIDNotFound:
raise ResourceNotFound(params.uuid)
# search through the existing other services to find the
# Priorityjob service.
try:
priorityjob_service_impl = self.all_services[
'Priorityjobs'
]
except KeyError:
raise ServiceUnavailable('Priorityjobs')
# get the underlying implementation of the Priorityjob
# service and instantiate it.
priority_job_service = priorityjob_service_impl.cls(
config=self.config
)
# create the priority job for this crash_ids
priority_job_service.create(uuid=params.uuid)
raise ResourceUnavailable(params.uuid)
raise ResourceNotFound(params.uuid)
| mpl-2.0 | 8,774,703,158,655,654,000 | 37.108696 | 78 | 0.595836 | false |
EarToEarOak/RTLSDR-Scanner | rtlsdr_scanner/plot_spect.py | 1 | 16014 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import threading
import time
from matplotlib import cm, patheffects
import matplotlib
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import Normalize
from matplotlib.dates import DateFormatter
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
from matplotlib.text import Text
from matplotlib.ticker import ScalarFormatter, AutoMinorLocator
import numpy
from rtlsdr_scanner.constants import Markers, PlotFunc
from rtlsdr_scanner.events import EventThread, Event, post_event
from rtlsdr_scanner.misc import format_time, format_precision
from rtlsdr_scanner.spectrum import split_spectrum, Measure, smooth_spectrum, Extent, \
diff_spectrum, get_peaks
from rtlsdr_scanner.utils_mpl import utc_to_mpl
class Spectrogram(object):
def __init__(self, notify, figure, settings):
self.notify = notify
self.figure = figure
self.settings = settings
self.data = [[], [], []]
self.axes = None
self.plot = None
self.extent = None
self.bar = None
self.barBase = None
self.lines = {}
self.labels = {}
self.overflowLabels = {}
self.overflow = {'left': [],
'right': []}
self.threadPlot = None
self.__setup_plot()
self.set_grid(self.settings.grid)
def __setup_plot(self):
gs = GridSpec(1, 2, width_ratios=[9.5, 0.5])
self.axes = self.figure.add_subplot(gs[0],
facecolor=self.settings.background)
self.axes.set_xlabel("Frequency (MHz)")
self.axes.set_ylabel('Time')
numFormatter = ScalarFormatter(useOffset=False)
timeFormatter = DateFormatter("%H:%M:%S")
self.axes.xaxis.set_major_formatter(numFormatter)
self.axes.yaxis.set_major_formatter(timeFormatter)
self.axes.xaxis.set_minor_locator(AutoMinorLocator(10))
self.axes.yaxis.set_minor_locator(AutoMinorLocator(10))
self.axes.set_xlim(self.settings.start, self.settings.stop)
now = time.time()
self.axes.set_ylim(utc_to_mpl(now), utc_to_mpl(now - 10))
self.bar = self.figure.add_subplot(gs[1])
norm = Normalize(vmin=-50, vmax=0)
self.barBase = ColorbarBase(self.bar, norm=norm,
cmap=cm.get_cmap(self.settings.colourMap))
self.__setup_measure()
self.__setup_overflow()
self.hide_measure()
def __setup_measure(self):
dashesHalf = [1, 5, 5, 5, 5, 5]
self.lines[Markers.HFS] = Line2D([0, 0], [0, 0], dashes=dashesHalf,
color='purple')
self.lines[Markers.HFE] = Line2D([0, 0], [0, 0], dashes=dashesHalf,
color='purple')
self.lines[Markers.OFS] = Line2D([0, 0], [0, 0], dashes=dashesHalf,
color='#996600')
self.lines[Markers.OFE] = Line2D([0, 0], [0, 0], dashes=dashesHalf,
color='#996600')
if matplotlib.__version__ >= '1.3':
effect = patheffects.withStroke(linewidth=3, foreground="w",
alpha=0.75)
self.lines[Markers.HFS].set_path_effects([effect])
self.lines[Markers.HFE].set_path_effects([effect])
self.lines[Markers.OFS].set_path_effects([effect])
self.lines[Markers.OFE].set_path_effects([effect])
for line in self.lines.itervalues():
self.axes.add_line(line)
bbox = self.axes.bbox
box = dict(boxstyle='round', fc='white', ec='purple', clip_box=bbox)
self.labels[Markers.HFS] = Text(0, 0, '-3dB', fontsize='xx-small',
ha="center", va="top", bbox=box,
color='purple')
self.labels[Markers.HFE] = Text(0, 0, '-3dB', fontsize='xx-small',
ha="center", va="top", bbox=box,
color='purple')
box['ec'] = '#996600'
self.labels[Markers.OFS] = Text(0, 0, 'OBW', fontsize='xx-small',
ha="center", va="top", bbox=box,
color='#996600')
self.labels[Markers.OFE] = Text(0, 0, 'OBW', fontsize='xx-small',
ha="center", va="top", bbox=box,
color='#996600')
for label in self.labels.itervalues():
self.axes.add_artist(label)
def __setup_overflow(self):
bbox = self.axes.bbox
box = dict(boxstyle='round', fc='white', ec='black', alpha=0.5,
clip_box=bbox)
self.overflowLabels['left'] = Text(0, 0.9, '', fontsize='xx-small',
ha="left", va="top", bbox=box,
transform=self.axes.transAxes,
alpha=0.5)
self.overflowLabels['right'] = Text(1, 0.9, '', fontsize='xx-small',
ha="right", va="top", bbox=box,
transform=self.axes.transAxes,
alpha=0.5)
for label in self.overflowLabels.itervalues():
self.axes.add_artist(label)
def __clear_overflow(self):
for label in self.overflowLabels:
self.overflow[label] = []
def __draw_vline(self, marker, x):
line = self.lines[marker]
label = self.labels[marker]
yLim = self.axes.get_ylim()
xLim = self.axes.get_xlim()
if xLim[0] < x < xLim[1]:
line.set_visible(True)
line.set_xdata([x, x])
line.set_ydata([yLim[0], yLim[1]])
self.axes.draw_artist(line)
label.set_visible(True)
label.set_position((x, yLim[1]))
self.axes.draw_artist(label)
elif x is not None and x < xLim[0]:
self.overflow['left'].append(marker)
elif x is not None and x > xLim[1]:
self.overflow['right'].append(marker)
def __draw_overflow(self):
for pos, overflow in self.overflow.iteritems():
if len(overflow) > 0:
text = ''
for measure in overflow:
if len(text) > 0:
text += '\n'
text += self.labels[measure].get_text()
label = self.overflowLabels[pos]
if pos == 'left':
textMath = '$\\blacktriangleleft$\n' + text
elif pos == 'right':
textMath = '$\\blacktriangleright$\n' + text
label.set_text(textMath)
label.set_visible(True)
self.axes.draw_artist(label)
def draw_measure(self, measure, show):
if self.axes.get_renderer_cache() is None:
return
self.hide_measure()
self.__clear_overflow()
if show[Measure.HBW]:
xStart, xEnd, _y = measure.get_hpw()
self.__draw_vline(Markers.HFS, xStart)
self.__draw_vline(Markers.HFE, xEnd)
if show[Measure.OBW]:
xStart, xEnd, _y = measure.get_obw()
self.__draw_vline(Markers.OFS, xStart)
self.__draw_vline(Markers.OFE, xEnd)
self.__draw_overflow()
def hide_measure(self):
for line in self.lines.itervalues():
line.set_visible(False)
for label in self.labels.itervalues():
label.set_visible(False)
for label in self.overflowLabels.itervalues():
label.set_visible(False)
def scale_plot(self, force=False):
if self.figure is not None and self.plot is not None:
extent = self.plot.get_extent()
if self.settings.autoF or force:
if extent[0] == extent[1]:
extent[1] += 1
self.axes.set_xlim(extent[0], extent[1])
if self.settings.autoL or force:
vmin, vmax = self.plot.get_clim()
self.barBase.set_clim(vmin, vmax)
try:
self.barBase.draw_all()
except:
pass
if self.settings.autoT or force:
self.axes.set_ylim(extent[2], extent[3])
def redraw_plot(self):
if self.figure is not None:
post_event(self.notify, EventThread(Event.DRAW))
def get_axes(self):
return self.axes
def get_axes_bar(self):
return self.barBase.ax
def get_bar(self):
return self.barBase
def get_plot_thread(self):
return self.threadPlot
def set_title(self, title):
self.axes.set_title(title, fontsize='medium')
def set_plot(self, spectrum, extent, annotate=False):
self.extent = extent
self.threadPlot = ThreadPlot(self, self.settings,
self.axes,
spectrum,
self.extent,
self.barBase,
annotate)
self.threadPlot.start()
def clear_plots(self):
children = self.axes.get_children()
for child in children:
if child.get_gid() is not None:
if child.get_gid() in ['plot', 'peak', 'peakText',
'peakShadow', 'peakThres']:
child.remove()
def set_grid(self, on):
if on:
self.axes.grid(True, color='w')
else:
self.axes.grid(False)
self.redraw_plot()
def set_colourmap(self, colourMap):
if self.plot is not None:
self.plot.set_cmap(colourMap)
self.barBase.set_cmap(colourMap)
try:
self.barBase.draw_all()
except:
pass
def close(self):
self.figure.clear()
self.figure = None
class ThreadPlot(threading.Thread):
def __init__(self, parent, settings, axes, data, extent,
barBase, annotate):
threading.Thread.__init__(self)
self.name = "Plot"
self.parent = parent
self.settings = settings
self.axes = axes
self.data = data
self.extent = extent
self.barBase = barBase
self.annotate = annotate
def run(self):
if self.data is None:
self.parent.threadPlot = None
return
total = len(self.data)
if total > 0:
if self.settings.plotFunc == PlotFunc.NONE:
peakF, peakL, peakT = self.__plot(self.data)
elif self.settings.plotFunc == PlotFunc.SMOOTH:
peakF, peakL, peakT = self.__plot_smooth()
elif self.settings.plotFunc == PlotFunc.DIFF:
peakF, peakL, peakT = self.__plot_diff()
if self.annotate:
self.__plot_peak(peakF, peakL, peakT)
if self.settings.peaks:
self.__plot_peaks()
self.parent.scale_plot()
self.parent.redraw_plot()
self.parent.threadPlot = None
def __plot(self, spectrum):
width = len(spectrum[min(self.data)])
height = len(spectrum)
c = numpy.ma.masked_all((height, width))
self.parent.clear_plots()
j = height
for ys in reversed(spectrum):
j -= 1
_xs, zs = split_spectrum(spectrum[ys])
for i in range(len(zs)):
try:
c[j, i] = zs[i]
except IndexError:
continue
norm = None
if not self.settings.autoL:
minY, maxY = self.barBase.get_clim()
norm = Normalize(vmin=minY, vmax=maxY)
extent = self.extent.get_ft()
self.parent.plot = self.axes.imshow(c, aspect='auto',
extent=extent,
norm=norm,
cmap=cm.get_cmap(self.settings.colourMap),
interpolation='spline16',
gid="plot")
return self.extent.get_peak_flt()
def __plot_smooth(self):
data = smooth_spectrum(self.data,
self.settings.smoothFunc,
self.settings.smoothRatio)
self.extent = Extent(data)
return self.__plot(data)
def __plot_diff(self):
data = diff_spectrum(self.data)
self.extent = Extent(data)
self.parent.extent = self.extent
return self.__plot(data)
def __plot_peak(self, peakF, peakL, peakT):
self.__clear_markers()
y = utc_to_mpl(peakT)
start, stop = self.axes.get_xlim()
textX = ((stop - start) / 50.0) + peakF
when = format_time(peakT)
text = '{}\n{}\n{when}'.format(*format_precision(self.settings,
peakF, peakL,
fancyUnits=True),
when=when)
if matplotlib.__version__ < '1.3':
self.axes.annotate(text,
xy=(peakF, y), xytext=(textX, y),
ha='left', va='bottom', size='x-small',
color='w', gid='peakText')
self.axes.plot(peakF, y, marker='x', markersize=10, color='w',
mew=3, gid='peakShadow')
self.axes.plot(peakF, y, marker='x', markersize=10, color='r',
gid='peak')
else:
effect = patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)
self.axes.annotate(text,
xy=(peakF, y), xytext=(textX, y),
ha='left', va='bottom', size='x-small',
path_effects=[effect], gid='peakText')
self.axes.plot(peakF, y, marker='x', markersize=10, color='r',
path_effects=[effect], gid='peak')
def __plot_peaks(self):
sweep, indices = get_peaks(self.data, self.settings.peaksThres)
lastTime = utc_to_mpl(max(self.data))
for i in indices:
self.axes.plot(sweep.keys()[i], lastTime,
linestyle='None',
marker='+', markersize=10, color='r',
gid='peakThres')
def __clear_markers(self):
children = self.axes.get_children()
for child in children:
if child.get_gid() is not None:
if child.get_gid() in ['peak', 'peakText',
'peakShadow', 'peakThres']:
child.remove()
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
| gpl-3.0 | 6,269,624,982,629,573,000 | 36.415888 | 87 | 0.513301 | false |
jasongrout/jupyterlab-extension | setup.py | 1 | 5340 | # -*- coding: utf-8 -*-
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from setuptools import setup, find_packages, Command
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
from subprocess import check_call
import os
import sys
import platform
here = os.path.dirname(os.path.abspath(__file__))
node_root = os.path.join(here, 'jupyterlab_extension')
is_repo = os.path.exists(os.path.join(here, '.git'))
npm_path = os.pathsep.join([
os.path.join(node_root, 'node_modules', '.bin'),
os.environ.get('PATH', os.defpath),
])
from distutils import log
log.set_verbosity(log.DEBUG)
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])
LONG_DESCRIPTION = 'A pre-alpha JupyterLab demo.'
def js_prerelease(command, strict=False):
"""decorator for building minified js/css prior to another command"""
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):
# sdist, nothing to do
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e:
missing = [t for t in jsdeps.targets if not os.path.exists(t)]
if strict or missing:
log.warn('rebuilding js and css failed')
if missing:
log.error('missing files: %s' % missing)
raise e
else:
log.warn('rebuilding js and css failed (not a problem)')
log.warn(str(e))
command.run(self)
update_package_data(self.distribution)
return DecoratedCommand
def update_package_data(distribution):
"""update package_data to catch changes during setup"""
build_py = distribution.get_command_obj('build_py')
# distribution.package_data = find_package_data()
# re-init build_py options which load package_data
build_py.finalize_options()
class NPM(Command):
description = 'install package.json dependencies using npm'
user_options = []
node_modules = os.path.join(node_root, 'node_modules')
targets = [
os.path.join(here, 'jupyterlab_extension', 'build', 'bundle.js'),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def has_npm(self):
try:
check_call(['npm', '--version'])
return True
except:
return False
def should_run_npm_install(self):
package_json = os.path.join(node_root, 'package.json')
node_modules_exists = os.path.exists(self.node_modules)
return self.has_npm()
def run(self):
has_npm = self.has_npm()
if not has_npm:
log.error("`npm` unavailable. If you're running this command using sudo, make sure `npm` is available to sudo")
env = os.environ.copy()
env['PATH'] = npm_path
if self.should_run_npm_install():
log.info("Installing build dependencies with npm. This may take a while...")
check_call(['npm', 'install'], cwd=node_root, stdout=sys.stdout, stderr=sys.stderr)
check_call(['npm', 'run', 'build'], cwd=node_root, stdout=sys.stdout, stderr=sys.stderr)
os.utime(self.node_modules, None)
for t in self.targets:
if not os.path.exists(t):
msg = 'Missing file: %s' % t
if not has_npm:
msg += '\nnpm is required to build a development version of widgetsnbextension'
raise ValueError(msg)
# update package data in case this created new files
update_package_data(self.distribution)
version_ns = {}
with open(os.path.join(here, 'jupyterlab_extension', '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = {
'name': 'jupyterlab_extension',
'version': version_ns['__version__'],
'description': 'A pre-alpha Jupyter lab environment notebook server extension.',
'long_description': LONG_DESCRIPTION,
'License': 'BSD',
'include_package_data': True,
'install_requires': ['notebook>=4.2.0'],
'packages': find_packages(),
'zip_safe': False,
'package_data': {'jupyterlab_extension': [
'build/*',
'lab.html'
]},
'cmdclass': {
'build_py': js_prerelease(build_py),
'egg_info': js_prerelease(egg_info),
'sdist': js_prerelease(sdist, strict=True),
'jsdeps': NPM,
},
'author': 'Jupyter Development Team',
'author_email': '[email protected]',
'url': 'http://jupyter.org',
'keywords': ['ipython', 'jupyter', 'Web'],
'classifiers': [
'Development Status :: 1 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
}
setup(**setup_args)
| bsd-3-clause | -5,766,849,120,819,335,000 | 32.797468 | 124 | 0.603371 | false |
ox-it/talks.ox | talks/users/forms.py | 1 | 3163 | from __future__ import absolute_import
from django import forms
from django.core.exceptions import ValidationError
from django.db.models.query_utils import Q
from talks.events import typeahead, datasources
from django.contrib.auth.models import User
from talks.users.models import Collection, TalksUser, TalksUserCollection, DEFAULT_COLLECTION_NAME, COLLECTION_ROLES_EDITOR, COLLECTION_ROLES_READER, COLLECTION_ROLES_OWNER
from talks.contributors.forms import XMLFriendlyTextField
class CollectionForm(forms.ModelForm):
title = XMLFriendlyTextField(
max_length=250,
required=True
)
description = XMLFriendlyTextField(
widget=forms.Textarea(attrs={'rows': 8}),
required=False,
)
editor_set = forms.ModelMultipleChoiceField(
queryset=TalksUser.objects.filter().distinct(),
label="Other Editors",
help_text="Share editing with another Talks Editor by typing in their full email address",
required=False,
widget=typeahead.MultipleTypeahead(datasources.TALKSUSERS_EMAIL_EXACT_DATA_SOURCE),
)
class Meta:
model = Collection
fields = ('title', 'description', 'public', 'editor_set')
labels = {
'public': "Make this list public?"
}
help_texts = {
'title': "If you wish to make this list public please make sure the list has a distinctive title and description - e.g.: Recommended talks for 3rd Year Biology"
}
def save(self):
collection = super(CollectionForm, self).save(commit=False)
collection.save()
# clear the list of editors and repopulate with the contents of the form
collection.editor_set.through.objects.filter(role=COLLECTION_ROLES_EDITOR, collection=collection).delete()
if 'editor_set' in self.cleaned_data:
for user in self.cleaned_data['editor_set']:
if collection.user_collection_permission(user) == 'owner':
pass
else:
TalksUserCollection.objects.create(user=user,
collection=collection,
role=COLLECTION_ROLES_EDITOR)
collection.save()
return collection
def clean(self):
cleaned_data = self.cleaned_data
public = cleaned_data.get('public')
title = cleaned_data.get('title')
collection = super(CollectionForm, self).save(commit=False) # get the collection instance without saving the form
number_of_readers = collection.get_number_of_readers()
# If we're making the collection public, ensure that the collection title is not 'My Collection'
if public and (title == DEFAULT_COLLECTION_NAME):
raise ValidationError({'title': 'Please change the title of your list to something less generic before making your list public'})
if not public and (number_of_readers > 0):
raise ValidationError({'public': 'Unable to revoke public status - there are already ' + str(number_of_readers) + ' readers following this list.'})
| apache-2.0 | 9,132,082,129,076,920,000 | 42.328767 | 172 | 0.653494 | false |
Som-Energia/somenergia-generationkwh | generationkwh/amortizations_test.py | 1 | 3982 | # -*- coding:utf8 -*-
import unittest
from .amortizations import pendingAmortizations
class Amortization_Test(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def test_pendingAmortizations_unpaid(self):
self.assertEqual(
pendingAmortizations(
purchase_date=False,
current_date='2002-01-01',
investment_amount=1000,
amortized_amount=0,
), [
])
def test_pendingAmortizations_justFirstAmortization(self):
self.assertEqual(
pendingAmortizations(
purchase_date='2000-01-01',
current_date='2002-01-01',
investment_amount=1000,
amortized_amount=0,
), [
(1, 24, '2002-01-01', 40),
])
def test_pendingAmortizations_justBeforeFirstOne(self):
self.assertEqual(
pendingAmortizations(
purchase_date='2000-01-01',
current_date='2001-12-31',
investment_amount=1000,
amortized_amount=0,
), [])
def test_pendingAmortizations_justSecondOne(self):
self.assertEqual(
pendingAmortizations(
purchase_date='2000-01-01',
current_date='2003-01-01',
investment_amount=1000,
amortized_amount=0,
), [
(1, 24, '2002-01-01', 40),
(2, 24, '2003-01-01', 40),
])
def test_pendingAmortizations_alreadyAmortized(self):
self.assertEqual(
pendingAmortizations(
purchase_date='2000-01-01',
current_date='2003-01-01',
investment_amount=1000,
amortized_amount=40,
), [
(2, 24, '2003-01-01', 40),
])
def test_pendingAmortizations_lastDouble(self):
self.assertEqual(
pendingAmortizations(
purchase_date='2000-01-01',
current_date='2025-01-01',
investment_amount=1000,
amortized_amount=920,
), [
(24, 24, '2025-01-01', 80),
])
def test_pendingAmortizations_allDone(self):
self.assertEqual(
pendingAmortizations(
purchase_date='2000-01-01',
current_date='2050-01-01',
investment_amount=1000,
amortized_amount=1000,
), [
])
def test_pendingAmortizations_allPending(self):
self.assertEqual(
pendingAmortizations(
purchase_date='2000-01-01',
current_date='2040-01-01',
investment_amount=1000,
amortized_amount=0,
), [
( 1, 24, '2002-01-01', 40),
( 2, 24, '2003-01-01', 40),
( 3, 24, '2004-01-01', 40),
( 4, 24, '2005-01-01', 40),
( 5, 24, '2006-01-01', 40),
( 6, 24, '2007-01-01', 40),
( 7, 24, '2008-01-01', 40),
( 8, 24, '2009-01-01', 40),
( 9, 24, '2010-01-01', 40),
(10, 24, '2011-01-01', 40),
(11, 24, '2012-01-01', 40),
(12, 24, '2013-01-01', 40),
(13, 24, '2014-01-01', 40),
(14, 24, '2015-01-01', 40),
(15, 24, '2016-01-01', 40),
(16, 24, '2017-01-01', 40),
(17, 24, '2018-01-01', 40),
(18, 24, '2019-01-01', 40),
(19, 24, '2020-01-01', 40),
(20, 24, '2021-01-01', 40),
(21, 24, '2022-01-01', 40),
(22, 24, '2023-01-01', 40),
(23, 24, '2024-01-01', 40),
(24, 24, '2025-01-01', 80),
])
# vim: et ts=4 sw=4
| agpl-3.0 | 1,300,526,490,195,909,400 | 31.373984 | 62 | 0.443747 | false |
sanguinariojoe/FreeCAD | src/Mod/Draft/draftguitools/gui_mirror.py | 9 | 9314 | # ***************************************************************************
# * (c) 2009, 2010 Yorik van Havre <[email protected]> *
# * (c) 2009, 2010 Ken Cline <[email protected]> *
# * (c) 2020 Eliud Cabrera Castillo <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides GUI tools to create mirrored objects.
The mirror tool creates a `Part::Mirroring` object, which is the same
as the one created by the Part module.
Perhaps in the future a specific Draft `Mirror` object can be defined.
"""
## @package gui_mirror
# \ingroup draftguitools
# \brief Provides GUI tools to create mirrored objects.
## \addtogroup draftguitools
# @{
from PySide.QtCore import QT_TRANSLATE_NOOP
import FreeCAD as App
import FreeCADGui as Gui
import Draft_rc
import DraftVecUtils
import WorkingPlane
import draftguitools.gui_base_original as gui_base_original
import draftguitools.gui_tool_utils as gui_tool_utils
from draftutils.messages import _msg
from draftutils.translate import translate
# The module is used to prevent complaints from code checkers (flake8)
True if Draft_rc.__name__ else False
class Mirror(gui_base_original.Modifier):
"""Gui Command for the Mirror tool."""
def GetResources(self):
"""Set icon, menu and tooltip."""
return {'Pixmap': 'Draft_Mirror',
'Accel': "M, I",
'MenuText': QT_TRANSLATE_NOOP("Draft_Mirror", "Mirror"),
'ToolTip': QT_TRANSLATE_NOOP("Draft_Mirror", "Mirrors the selected objects along a line defined by two points.")}
def Activated(self):
"""Execute when the command is called."""
super(Mirror, self).Activated(name="Mirror")
self.ghost = None
if self.ui:
if not Gui.Selection.getSelection():
self.ui.selectUi(on_close_call=self.finish)
_msg(translate("draft", "Select an object to mirror"))
self.call = \
self.view.addEventCallback("SoEvent",
gui_tool_utils.selectObject)
else:
self.proceed()
def proceed(self):
"""Proceed with the command if one object was selected."""
if self.call:
self.view.removeEventCallback("SoEvent", self.call)
self.sel = Gui.Selection.getSelection()
self.ui.pointUi(title=translate("draft", self.featureName), icon="Draft_Mirror")
self.ui.modUi()
self.ui.xValue.setFocus()
self.ui.xValue.selectAll()
# self.ghost = trackers.ghostTracker(self.sel)
# TODO: solve this (see below)
self.call = self.view.addEventCallback("SoEvent", self.action)
_msg(translate("draft", "Pick start point of mirror line"))
self.ui.isCopy.hide()
def finish(self, closed=False, cont=False):
"""Terminate the operation of the tool."""
if self.ghost:
self.ghost.finalize()
super(Mirror, self).finish()
if cont and self.ui:
if self.ui.continueMode:
Gui.Selection.clearSelection()
self.Activated()
def mirror(self, p1, p2, copy=False):
"""Mirror the real shapes."""
sel = '['
for o in self.sel:
if len(sel) > 1:
sel += ', '
sel += 'FreeCAD.ActiveDocument.' + o.Name
sel += ']'
Gui.addModule("Draft")
_cmd = 'Draft.mirror'
_cmd += '('
_cmd += sel + ', '
_cmd += DraftVecUtils.toString(p1) + ', '
_cmd += DraftVecUtils.toString(p2)
_cmd += ')'
_cmd_list = ['m = ' + _cmd,
'FreeCAD.ActiveDocument.recompute()']
self.commit(translate("draft", "Mirror"),
_cmd_list)
def action(self, arg):
"""Handle the 3D scene events.
This is installed as an EventCallback in the Inventor view.
Parameters
----------
arg: dict
Dictionary with strings that indicates the type of event received
from the 3D view.
"""
if arg["Type"] == "SoKeyboardEvent":
if arg["Key"] == "ESCAPE":
self.finish()
elif arg["Type"] == "SoLocation2Event": # mouse movement detection
(self.point,
ctrlPoint, info) = gui_tool_utils.getPoint(self, arg)
if len(self.node) > 0:
last = self.node[-1]
if self.ghost:
if self.point != last:
# TODO: the following doesn't work at the moment
mu = self.point.sub(last).normalize()
# This part used to test for the GUI to obtain
# the camera view but this is unnecessary
# as this command is always launched in the GUI.
_view = Gui.ActiveDocument.ActiveView
mv = _view.getViewDirection().negative()
mw = mv.cross(mu)
_plane = WorkingPlane.plane(u=mu, v=mv, w=mw,
pos=last)
tm = _plane.getPlacement().toMatrix()
m = self.ghost.getMatrix()
m = m.multiply(tm.inverse())
m.scale(App.Vector(1, 1, -1))
m = m.multiply(tm)
m.scale(App.Vector(-1, 1, 1))
self.ghost.setMatrix(m)
if self.extendedCopy:
if not gui_tool_utils.hasMod(arg, gui_tool_utils.MODALT):
self.finish()
gui_tool_utils.redraw3DView()
elif arg["Type"] == "SoMouseButtonEvent":
if (arg["State"] == "DOWN") and (arg["Button"] == "BUTTON1"):
if self.point:
self.ui.redraw()
if (self.node == []):
self.node.append(self.point)
self.ui.isRelative.show()
if self.ghost:
self.ghost.on()
_msg(translate("draft",
"Pick end point of mirror line"))
if self.planetrack:
self.planetrack.set(self.point)
else:
last = self.node[0]
if (self.ui.isCopy.isChecked()
or gui_tool_utils.hasMod(arg, gui_tool_utils.MODALT)):
self.mirror(last, self.point, True)
else:
self.mirror(last, self.point)
if gui_tool_utils.hasMod(arg, gui_tool_utils.MODALT):
self.extendedCopy = True
else:
self.finish(cont=True)
def numericInput(self, numx, numy, numz):
"""Validate the entry fields in the user interface.
This function is called by the toolbar or taskpanel interface
when valid x, y, and z have been entered in the input fields.
"""
self.point = App.Vector(numx, numy, numz)
if not self.node:
self.node.append(self.point)
if self.ghost:
self.ghost.on()
_msg(translate("draft", "Pick end point of mirror line"))
else:
last = self.node[-1]
if self.ui.isCopy.isChecked():
self.mirror(last, self.point, True)
else:
self.mirror(last, self.point)
self.finish()
Gui.addCommand('Draft_Mirror', Mirror())
## @}
| lgpl-2.1 | -7,578,842,251,887,063,000 | 42.12037 | 129 | 0.492914 | false |
nicodv/bgg | bgg/util/retry.py | 1 | 3841 | """
Module that implements a retry decorator.
You can, for example, do this:
@retry(5)
def my_function():
...
And 'my_function', upon an exception, will be retried 4 more times until
a final exception is raised. 'retry' will wait a little bit longer after each
failure before retrying.
Very useful for, for example, retrying a download if timeouts occur frequently.
Customization of exceptions and exception handlers is possible.
"""
from time import sleep
from functools import wraps
def _warning_printer(func, exception, tries_remaining):
"""Simple exception handler that prints a warning.
:param exception: The exception instance which was raised
:param int tries_remaining: The number of tries remaining
"""
print("Caught '{0}' in {1}, {2} tries remaining.".format(
exception, func.__name__, tries_remaining))
def _error_printer(func, exception, tries):
"""Exception handler that prints an error.
:param exception: The exception instance which was raised
:param int tries: Total number of tries
"""
try:
print("{} failed (reason: {}), giving up after {} tries.".format(
func.__name__, exception.reason, int(tries)))
except AttributeError:
print("{} failed, giving up after {} tries.".format(
func.__name__, int(tries)))
def retry(max_tries, delay=1, backoff=2, exceptions=(Exception,),
on_retry=_warning_printer, on_fail=_error_printer):
"""Function decorator implementing retry logic.
The decorator will call the function up to max_tries times if it raises
an exception.
By default it catches instances of the Exception class and subclasses.
This will recover after all but the most fatal errors. You may specify a
custom tuple of exception classes with the 'exceptions' argument; the
function will only be retried if it raises one of the specified
exceptions.
Additionally you may specify a on_retry function which will be
called prior to retrying with the number of remaining tries and the
exception instance. This is primarily intended to give the opportunity to
log the failure. on_fail is another function called after failure if no
retries remain.
:param int max_tries: Maximum number of retries
:param int or float delay: Sleep this many seconds * backoff *
try number after failure
:param int or float backoff: Multiply delay by this after each failure
:param tuple exceptions: A tuple of exception classes; default (Exception,)
:param func on_retry: An on-retry exception handler function
(args should be: function, exception, tries_remaining)
:param func on_fail: A final exception handler function
(args should be: function, exception, tries_remaining)
"""
assert max_tries > 0
def dec(func):
# 'wraps' updates a wrapper function to look like the wrapped function
@wraps(func)
def f2(*args, **kwargs):
mydelay = delay
tries = reversed(range(max_tries))
for tries_remaining in tries:
try:
return func(*args, **kwargs)
except exceptions as e:
if tries_remaining > 0:
# call on_retry exception handler after an exception
if on_retry is not None:
on_retry(func, e, tries_remaining)
sleep(mydelay)
mydelay *= backoff
else:
# no more retries, call the on_fail exception handler
if on_fail is not None:
on_fail(func, e, max_tries)
else:
raise e
return f2
return dec
| mit | 5,816,004,726,221,605,000 | 36.656863 | 79 | 0.634731 | false |
sjtindell/pulp_deb | plugins/pulp_deb/plugins/distributors/steps.py | 1 | 4603 | from gettext import gettext as _
import logging
import os
import subprocess
import gzip
from pulp.plugins.util import misc
from pulp.plugins.util.publish_step import PluginStep, AtomicDirectoryPublishStep
from pulp_deb.common import constants
from pulp_deb.plugins.distributors import configuration
from pulp_deb.plugins.importers.sync import generate_internal_storage_path
from pulp_deb.common.model import DebPackage
_logger = logging.getLogger(__name__)
class WebPublisher(PluginStep):
"""
Web publisher class that is responsible for the actual publishing
of a repository via a web server
"""
def __init__(self, repo, publish_conduit, config):
"""
:param repo: Pulp managed Yum repository
:type repo: pulp.plugins.model.Repository
:param publish_conduit: Conduit providing access to relative Pulp functionality
:type publish_conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit
:param config: Pulp configuration for the distributor
:type config: pulp.plugins.config.PluginCallConfiguration
"""
super(WebPublisher, self).__init__(constants.PUBLISH_STEP_WEB_PUBLISHER,
repo, publish_conduit, config)
publish_dir = configuration.get_web_publish_dir(repo, config)
self.web_working_dir = os.path.join(self.get_working_dir(), repo.id)
master_publish_dir = configuration.get_master_publish_dir(repo, config)
atomic_publish_step = AtomicDirectoryPublishStep(self.get_working_dir(),
[(repo.id, publish_dir)],
master_publish_dir,
step_type=constants.PUBLISH_STEP_OVER_HTTP)
atomic_publish_step.description = _('Making files available via web.')
self.add_child(PublishContentStep(working_dir=self.web_working_dir))
self.add_child(PublishMetadataStep(working_dir=self.web_working_dir))
self.add_child(atomic_publish_step)
class PublishMetadataStep(PluginStep):
"""
Repository Metadata
"""
def __init__(self, **kwargs):
super(PublishMetadataStep, self).__init__(constants.PUBLISH_STEP_METADATA, **kwargs)
self.context = None
self.redirect_context = None
self.description = _('Publishing Metadata.')
def process_main(self, item=None):
"""
Publish all the deb metadata or create a blank deb if this has never been synced
"""
# Write out repo metadata into the working directory
packfile_name = os.path.join(self.get_working_dir(), "Packages")
packfile_gz_name = packfile_name + '.gz'
with open(packfile_name, 'wb') as dpkg_out:
packfile_gz = gzip.open(packfile_gz_name, 'wb')
try:
proc = subprocess.Popen(['dpkg-scanpackages', '-m', '.'],
cwd=self.get_working_dir(),
stdout=subprocess.PIPE)
(out, err) = proc.communicate()
dpkg_out.write(out)
packfile_gz.write(out)
finally:
packfile_gz.close()
class PublishContentStep(PluginStep):
"""
Publish Content
"""
def __init__(self, **kwargs):
super(PublishContentStep, self).__init__(constants.PUBLISH_STEP_CONTENT, **kwargs)
self.description = _('Publishing Deb Content.')
def initialize(self):
"""
Perform setup required before we start processing the individual units
"""
misc.mkdir(self.get_working_dir())
def _get_total(self):
return self.get_repo().content_unit_counts[constants.DEB_TYPE_ID]
def get_iterator(self):
"""
This method returns a generator to loop over items.
The items created by this generator will be iterated over by the process_item method.
:return: generator of items
:rtype: GeneratorType of items
"""
units_iterator = self.get_conduit().get_units(as_generator=True)
return units_iterator
def process_main(self, item=None):
"""
Publish an individual deb file
"""
filename = item.metadata["file_name"]
tmp = os.path.join(self.get_working_dir(), filename)
store = "/var/lib/pulp/content/deb/" + generate_internal_storage_path(filename)
os.symlink(store, tmp)
if os.path.exists(tmp):
self.progress_successes += 1
| gpl-2.0 | -2,664,845,375,098,594,300 | 37.358333 | 100 | 0.61612 | false |
mifumagalli/mypython | redshifts/zfit.py | 1 | 48275 | """
Gui to inspect spectra in 1/2D
"""
try:
import Tkinter as tkinter
import tkFont as tkfont
from Tkinter import Tk
import tkFileDialog as filedialog
except:
import tkinter
from tkinter import font as tkfont
from tkinter import Tk
from tkinter import filedialog
from astropy.io import fits
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import os
import numpy as np
import scipy
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy import interpolate
from scipy import signal
from astropy.io import fits
from astropy.table import Table
import sys, getopt
class zfitwin(tkinter.Tk):
""" The basic class of the widget """
def __init__(self,parent, startfile=None, z_start=0.0):
""" My constructor """
self.tk = Tk()
#set min and preferred size of main gui
self.minwinwidth=300
self.minwinheight=300
screen_width = self.winfo_screenwidth()
screen_height = self.winfo_screenheight()
self.preferwinwidth=int(screen_width*0.8)
self.preferwinheight=int(screen_height*0.8)
self.minsize(width=self.minwinwidth, height=self.minwinheight)
self.geometry("{}x{}".format(self.preferwinwidth,self.preferwinheight))
#tweak the aspect ratio of the menu and data gui
self.menuaspect=[1,0.24] #Ruari 24/05 fixes bug where different resolutions cause the menu to be cut off
self.dataaspect=[1,1-0.24] #Ruari 24/05 fixes bug where different resolutions cause the menu to be cut off
self.dpi=80
#find exect dir
self.execdir=__file__.split('zfit.py')[0]
if(len(self.execdir)==0):
self.execdir='./'
#Fiddle with font
default_font = tkfont.nametofont("TkDefaultFont")
scalefont = int(screen_height/1080.0*14)
default_font.configure(size=scalefont)
#init gui frame
self.initialize(startfile, z_start)
def initialize(self, startfile, z_start):
""" This init the basic gui """
#create a menu frame
self.menuframe=tkinter.Frame(self,width=int(self.preferwinwidth*self.menuaspect[0]),
height=int(self.preferwinheight*self.menuaspect[1]))
self.menuframe.grid_propagate(0)
self.menuframe.grid()
#create a data frame
self.dataframe=tkinter.Frame(self,width=int(self.preferwinwidth*self.dataaspect[0]),
height=int(self.preferwinheight*self.dataaspect[1]))
self.dataframe.grid_propagate(0)
self.dataframe.grid()
#stick the 2D image in a separate window
self.imgframe=tkinter.Toplevel(width=600,height=600)
#update for later use of units
self.update()
#now initialise the menu frame
self.init_menuframe()
#now initialise the data frame
self.init_dataframe(startfile)
#If zstart exists show the lines automatically
if z_start != 0.0:
self.displaylines()
self.shwlinstate.set(1)
self.redshiftline.set("{}".format(z_start))
def init_menuframe(self):
""" This init the menu specific part """
#exit button
self.menu_exit = tkinter.Button(self.menuframe,text=u"EXIT",command=self.OnExit)
self.menu_exit.grid(column=0,row=0)
#save button
self.menu_save = tkinter.Button(self.menuframe,text=u"Save",command=self.OnSave)
self.menu_save.grid(column=0,row=1)
#choice of spectra
self.menu_select = tkinter.Button(self.menuframe,text=u"Open Spectrum",
command=self.SelectFile)
self.menu_select.grid(column=0,row=2)
#current spectrum
self.currspec=tkinter.StringVar()
self.currspec.set('Spect: Demo')
self.current=tkinter.Label(self.menuframe,textvariable = self.currspec)
self.current.grid(column=0,row=3)
self.mouse_position=tkinter.StringVar()
self.mouse_position.set('Mouse:(None,None)')
self.mouse_position_w=tkinter.Label(self.menuframe,textvariable = self.mouse_position)
self.mouse_position_w.grid(column=0,row=4,columnspan=3)
#Message window
self.generic_message=tkinter.StringVar()
self.generic_message.set('zfit-> Ready to go!')
self.generic_message_w=tkinter.Label(self.menuframe,textvariable = self.generic_message)
self.generic_message_w.grid(column=5,row=3,columnspan=3)
#line control stuff
self.init_linecontrol()
#templates control stuff
self.init_templcontrol()
def init_dataframe(self, startfile):
""" This init the data specific part """
#Work out the geometry of the different data parts
#canvas for spectrum ...
self.pltspec_width=self.dataframe.winfo_width()
self.pltspec_height=int(self.dataframe.winfo_height()*0.6)
#canvas for twod spec
self.twodspc_width=self.dataframe.winfo_width()
self.twodspc_height=int((self.dataframe.winfo_height()-self.pltspec_height)*0.6)
#canvas for twod err
self.twoderr_width=self.dataframe.winfo_width()
self.twoderr_height=int((self.dataframe.winfo_height()-self.pltspec_height)*0.5)
#work out dimensions for twod image
self.twodimg_width=self.imgframe.winfo_width()
self.twodimg_height=self.imgframe.winfo_height()
#now open with default spectrum and plot
#self.filename=os.path.abspath(self.execdir)+"/test_spectrum.fits" RUari Jul 17 17
if startfile==None:
self.filename=os.path.abspath(self.execdir)+"/test_spectrum.fits"
else:
self.filename=startfile
self.currspec.set('Spect: '+startfile)
self.fits=fits.open(self.filename)
#unpack
self.fitwav1d=self.fits[2].data
self.fitspe1d=self.fits[0].data
self.fitspe1d_original=np.copy(self.fitspe1d)
self.fiterr1d=self.fits[1].data
self.fitspe2d=self.fits[4].data
self.fiterr2d=self.fits[5].data
self.fitimg=self.fits[6].data
#load sky model and normalise to source flux
skyspe=fits.open('{}/templates/sky/SKY_SPECTRUM_0001.fits'.format(self.execdir))
skycnt=fits.open('{}/templates/sky/SKY_CONTINUUM_0001.fits'.format(self.execdir))
#compute continuum subtracted sky model
self.wavesky=np.array(skyspe[1].data['LAMBDA'])
cont_resampled=interp1d(skycnt[1].data['LAMBDA'],skycnt[1].data['FLUX'],bounds_error=False,fill_value=0)(skyspe[1].data['LAMBDA'])
self.fluxsky=np.array(skyspe[1].data['DATA'])-cont_resampled
self.fluxsky=self.fluxsky/np.max(self.fluxsky)*0.5*np.max(self.fitspe1d)
self.drawdata()
#set tmpfitxcorr to None to avoid error or later init
self.tmpfitxcorr=None
#set smoothwindow
self.smooth=3
def init_linecontrol(self):
""" This controls operation with emission lines """
#just say what it is
linelabel=tkinter.Label(self.menuframe,text = "Emission lines")
linelabel.grid(column=1,row=0,columnspan=2)
#drop down menu to select emission lines
llab = tkinter.Label(self.menuframe, text="Select Lines: ")
llab.grid(column=1,row=1)
self.linelist = tkinter.StringVar(self.menuframe)
self.linelist.set("gal_vac") # default value
self.lineselect = tkinter.OptionMenu(self.menuframe, self.linelist,"gal_vac","gal_air","lbg","lls","tell")
self.lineselect.grid(column=2,row=1)
#set the linelist in trace state
self.linelist.trace("w",self.displaylines)
#line redshift window
zlab = tkinter.Label(self.menuframe, text="z = ")
zlab.grid(column=1,row=2)
self.redshiftline = tkinter.StringVar()
self.redlinecntr = tkinter.Entry(self.menuframe,textvariable=self.redshiftline)
self.redlinecntr.grid(column=2,row=2)
self.redshiftline.set("0.0000")
#set the redshift in a trace state
self.redshiftline.trace("w",self.displaylines)
#display lines
self.shwlinstate=tkinter.IntVar()
self.lineshow = tkinter.Checkbutton(self.menuframe, text="Show Lines",
variable=self.shwlinstate,command=self.displaylines)
self.lineshow.grid(column=1,row=3)
#fit lines
self.line_fit = tkinter.Button(self.menuframe,text=u"FitLines",command=self.fitlines)
self.line_fit.grid(column=2,row=3)
def init_templcontrol(self):
""" Control the options for template fitting """
#just say what it is
templabel=tkinter.Label(self.menuframe,text = "Templates")
templabel.grid(column=3,row=0,columnspan=4)
#drop down menu to select template family
llab = tkinter.Label(self.menuframe, text="Pick template: ")
llab.grid(column=3,row=1)
self.tempgroup= tkinter.StringVar(self.menuframe)
self.tempgroup.set("Select")
self.tempselect = tkinter.OptionMenu(self.menuframe,self.tempgroup,"kinney","lbgs","sdss")
self.tempselect.grid(column=4,row=1)
self.tempgroup.trace("w",self.loadtemplate)
#just say what it is
self.currenttemplate=tkinter.StringVar(self.menuframe)
self.currenttemplate.set("Current: None")
self.tempchoice=tkinter.Label(self.menuframe,textvariable = self.currenttemplate)
self.tempchoice.grid(column=5,row=1,columnspan=2)
#D not use trace for template, as these are expensive to compute
#template redshift window
zlab = tkinter.Label(self.menuframe, text="z = ")
zlab.grid(column=3,row=2)
self.redshifttemp = tkinter.StringVar()
self.redtempcntr = tkinter.Entry(self.menuframe,textvariable=self.redshifttemp)
self.redtempcntr.grid(column=4,row=2)
self.redshifttemp.set("0.0000")
#rmag window
rmg = tkinter.Label(self.menuframe, text="flux = ")
rmg.grid(column=3,row=3)
self.magtemp = tkinter.StringVar()
self.magtemcntr = tkinter.Entry(self.menuframe,textvariable=self.magtemp)
self.magtemcntr.grid(column=4,row=3)
self.magtemp.set("1.00")
#display template
self.shwtempstate=tkinter.IntVar()
self.tempshow = tkinter.Button(self.menuframe,text="Show Template",command=self.displaytemplate)
self.tempshow.grid(column=3,row=4)
self.temphide = tkinter.Button(self.menuframe,text="Hide Template",command=self.hidetemplate)
self.temphide.grid(column=4,row=4)
#fit template
self.template_fit = tkinter.Button(self.menuframe,text=u"FitTemplate",command=self.fittemplate)
self.template_fit.grid(column=5,row=2)
#toggle sky
self.shwskystate=tkinter.IntVar()
self.template_sky=tkinter.Button(self.menuframe,text=u"Sky On/Off",command=self.togglesky)
self.template_sky.grid(column=5,row=4)
def OnExit(self):
""" Quit all on exit """
self.fits.close()
self.quit()
self.destroy()
def OnSave(self):
""" Save screen """
print('Placeholder')
def SelectFile(self):
""" Select and open file as one wishes """
#select file
self.filename=filedialog.askopenfilename(initialdir='./')
#update name
self.currspec.set("Spec: "+self.filename.split("/")[-1])
#close old and reopen
self.fits.close()
self.fits=fits.open(self.filename)
#unpack
self.fitwav1d=self.fits[2].data
self.fitspe1d=self.fits[0].data
self.fitspe1d_original=np.copy(self.fits[0].data)
self.fiterr1d=self.fits[1].data
self.fitspe2d=self.fits[4].data
self.fiterr2d=self.fits[5].data
self.fitimg=self.fits[6].data
#redraw
self.drawdata(refresh=True)
def drawdata(self,refresh=False):
"""
Once the spectrum is set, populate the data part of the gui
refresh -> True, wipe all canvas before redrawing
"""
if(refresh):
#now destroy all data canvas
self.twodimagePlot.get_tk_widget().destroy()
self.spectrumPlot.get_tk_widget().destroy()
self.twodspcPlot.get_tk_widget().destroy()
self.twoderrPlot.get_tk_widget().destroy()
#refresh 2D image
self.init_twodimage()
#refresh the spectrum
self.init_spectrum()
#refresh 2D spec
self.init_twodspec()
#refresh 2D err
self.init_twoderr()
def init_twodimage(self):
""" Draw the 2D image """
#create properties for this plot
self.twodimagePlot_prop={}
#figure staff
self.twodimagePlot_prop["figure"] = Figure(figsize=(self.twodimg_width/self.dpi,self.twodimg_height/self.dpi),
dpi=self.dpi)
self.twodimagePlot_prop["axis"] = self.twodimagePlot_prop["figure"].add_subplot(111)
#call plotting routine
self.update_twodimage()
#send it to canvas - connect event
self.twodimagePlot = FigureCanvasTkAgg(self.twodimagePlot_prop["figure"],master=self.imgframe)
#Draw is required in matplotlib > 2.2, show is kept for legacy only
try:
self.twodimagePlot.draw()
except:
self.twodimagePlot.show()
#need to set tight layout after showing
self.twodimagePlot_prop["figure"].tight_layout()
#enable event on click
self.twodimagePlot.mpl_connect("button_press_event", self.pressbutton)
self.twodimagePlot.mpl_connect("key_press_event", self.presskey)
self.twodimagePlot.get_tk_widget().grid()
def update_twodimage(self,update=False):
"""
Code that updates the 2D image
Update = True, redraw
"""
self.twodimagePlot_prop["image"] =self.twodimagePlot_prop["axis"].imshow(self.fitimg,origin='lower',aspect='auto')
self.twodimagePlot_prop["image"].set_cmap('hot')
#self.twodimagePlot_prop["axis"].set_xlabel('Pix')
#self.twodimagePlot_prop["axis"].set_ylabel('Pix')
def init_spectrum(self):
""" Draw the spectrum """
#create properties for this plot
self.spectrumPlot_prop={}
self.spectrumPlot_prop["xmin"]=np.min(np.nan_to_num(self.fitwav1d))
self.spectrumPlot_prop["xmax"]=np.max(np.nan_to_num(self.fitwav1d))
self.spectrumPlot_prop["ymin"]=np.min(np.nan_to_num(self.fitspe1d))
self.spectrumPlot_prop["ymax"]=np.max(np.nan_to_num(self.fitspe1d))
#figure stuff
self.spectrumPlot_prop["figure"]= Figure(figsize=(0.99*self.pltspec_width/self.dpi,0.96*self.pltspec_height/self.dpi),
dpi=self.dpi)
self.spectrumPlot_prop["axis"]= self.spectrumPlot_prop["figure"].add_subplot(111)
#call plotting routine
self.update_spectrum()
#send it to canvas
self.spectrumPlot = FigureCanvasTkAgg(self.spectrumPlot_prop["figure"],master=self.dataframe)
try:
self.spectrumPlot.draw()
except:
self.spectrumPlot.show()
#enable event on click
self.spectrumPlot_prop["figure"].tight_layout()
self.spectrumPlot.mpl_connect("button_press_event", self.pressbutton)
self.spectrumPlot.mpl_connect("motion_notify_event", self.movemouse)
self.spectrumPlot.mpl_connect("key_press_event", self.presskey)
self.spectrumPlot.get_tk_widget().grid(column=0,row=0)
def update_spectrum(self,update=False):
"""
Code that updates the spectrum
Update = True, redraw
"""
if(update):
self.spectrumPlot_prop["axis"].cla()
#plot main data
self.spectrumPlot_prop["axis"].step(self.fitwav1d,self.fitspe1d,where='mid')
self.spectrumPlot_prop["axis"].step(self.fitwav1d,self.fiterr1d,color='red',\
linestyle='--',zorder=1,where='mid')
self.spectrumPlot_prop["axis"].set_xlim(self.spectrumPlot_prop["xmin"],self.spectrumPlot_prop["xmax"])
self.spectrumPlot_prop["axis"].set_ylim(self.spectrumPlot_prop["ymin"],self.spectrumPlot_prop["ymax"])
self.spectrumPlot_prop["axis"].set_xlabel('Wavelength')
#self.spectrumPlot_prop["axis"].set_ylabel('Flux')
#if needed plot sky
if(self.shwskystate.get()):
self.spectrumPlot_prop["axis"].step(self.wavesky,self.fluxsky,where='mid',color='black')
#if needed, plot lines
if(self.shwlinstate.get()):
#set redshift
try:
redsh=float(self.redshiftline.get())
except:
redsh=0.0
#loop over lines and draw
for lw,lnam in self.infoline:
#find the obs wave
lwplot=lw*(1+redsh)
if((lwplot > self.spectrumPlot_prop["xmin"]) & (lwplot < self.spectrumPlot_prop["xmax"])):
self.spectrumPlot_prop["axis"].axvline(lwplot, color='grey', linestyle='--')
self.spectrumPlot_prop["axis"].text(lwplot,self.spectrumPlot_prop["ymax"],lnam,
verticalalignment='top',rotation=90,fontsize=12)
#if needed, plot template
if(self.shwtempstate.get()):
self.spectrumPlot_prop["axis"].plot(self.fitwav1d,self.templatedata_current,color='black',zorder=3)
#plot zero line
self.spectrumPlot_prop["axis"].plot([self.spectrumPlot_prop["xmin"],self.spectrumPlot_prop["xmax"]],
[0,0],color='green',zorder=2,linestyle=':')
#finally draw
if(update):
self.spectrumPlot.draw()
def init_twodspec(self):
""" Draw the 2D spectrum """
#create properties for this plot
self.twodspcPlot_prop={}
#figure staff
self.twodspcPlot_prop["figure"]= Figure(figsize=(0.99*self.twodspc_width/self.dpi,0.96*self.twodspc_height/self.dpi),
dpi=self.dpi)
self.twodspcPlot_prop["axis"] = self.twodspcPlot_prop["figure"].add_subplot(111)
#call plotting routine
self.update_twodspec()
#send it to canvas
self.twodspcPlot = FigureCanvasTkAgg(self.twodspcPlot_prop["figure"],master=self.dataframe)
try:
self.twodspcPlot.draw()
except:
self.twodspcPlot.show()
#enable event on click
self.twodspcPlot_prop["figure"].tight_layout()
self.twodspcPlot.mpl_connect("button_press_event", self.pressbutton)
self.twodspcPlot.mpl_connect("key_press_event", self.presskey)
self.twodspcPlot.mpl_connect("motion_notify_event", self.movemouse)
self.twodspcPlot.get_tk_widget().grid(column=0,row=1,sticky='NW')
def wavemap(self,x,pos):
""" Utility to map the pixel in 2D image to wavelegth """
#wavelength mapper
index=np.arange(0,len(self.fitwav1d))
wave=np.interp(x,index,self.fitwav1d)
'The two args are the value and tick position'
return "%.1f" % wave
def inv_wavemap(self,x):
""" Utility to map wavelegth to pixel in 2D mage """
#wavelength mapper
index=np.arange(0,len(self.fitwav1d))
pix=np.interp(x,self.fitwav1d,index,left=0,right=len(self.fitwav1d))
return pix
def update_twodspec(self,update=False):
"""
Code that updates the 2D spectrum
Update = True, redraw
"""
if(update):
self.twodspcPlot_prop["axis"].cla()
self.twodspcPlot_prop["image"]=self.twodspcPlot_prop["axis"].imshow(np.rot90(self.fitspe2d),origin='lower',aspect='auto')
self.twodspcPlot_prop["image"].set_cmap('hot')
#control level
medianlevel=np.median(np.nan_to_num(self.fitspe2d))
stdlevel=np.std(np.nan_to_num(self.fitspe2d))
self.twodspcPlot_prop["image"].set_clim(medianlevel-3.*stdlevel,medianlevel+3*stdlevel)
#wave mapper
self.twodspcPlot_prop["axis"].xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(self.wavemap))
#now set X axis as in 1d spectrum
xpixmin=self.inv_wavemap(self.spectrumPlot_prop["xmin"])
xpixmax=self.inv_wavemap(self.spectrumPlot_prop["xmax"])
#force minimum maximum
if(xpixmin == xpixmax):
xpixmin = xpixmax-1
if(xpixmax == 0):
xpixmax = 1
self.twodspcPlot_prop["axis"].set_xlim(xpixmin,xpixmax)
self.twodspcPlot_prop["axis"].set_xlabel('Wavelength')
if(update):
self.twodspcPlot.draw()
def init_twoderr(self):
""" Draw the 2D error """
#create properties for this plot
self.twoderrPlot_prop={}
#figure staff
#self.twoderr.grid(column=1,row=2,sticky='NW')
self.twoderrPlot_prop['figure'] = Figure(figsize=(0.99*self.twoderr_width/self.dpi,0.96*self.twoderr_height/self.dpi),
dpi=self.dpi)
self.twoderrPlot_prop['axis'] = self.twoderrPlot_prop['figure'].add_subplot(111)
#call plotting routine
self.update_twoderr()
#send it to canvas
self.twoderrPlot = FigureCanvasTkAgg(self.twoderrPlot_prop['figure'],master=self.dataframe)
try:
self.twoderrPlot.draw()
except:
self.twoderrPlot.show()
#enable event on click
self.twoderrPlot_prop['figure'].tight_layout()
self.twoderrPlot.mpl_connect("button_press_event", self.pressbutton)
self.twoderrPlot.mpl_connect("key_press_event", self.presskey)
self.twoderrPlot.mpl_connect("motion_notify_event", self.movemouse)
self.twoderrPlot.get_tk_widget().grid(column=0,row=2,sticky='NW')
def update_twoderr(self,update=False):
"""
Code that updates the 2D error
Update = True, redraw
"""
if(update):
self.twoderrPlot_prop["axis"].cla()
self.twoderrPlot_prop['image'] =self.twoderrPlot_prop['axis'].imshow(np.rot90(self.fiterr2d),origin='lower',aspect='auto')
self.twoderrPlot_prop['image'].set_cmap('hot')
#control level
medianlevel=np.median(np.nan_to_num(self.fiterr2d))
stdlevel=np.std(np.nan_to_num(self.fiterr2d))
self.twoderrPlot_prop["image"].set_clim(medianlevel-3.*stdlevel,medianlevel+3*stdlevel)
#wave mapper
self.twoderrPlot_prop["axis"].xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(self.wavemap))
#now set X axis as in 1d spectrum
xpixmin=self.inv_wavemap(self.spectrumPlot_prop["xmin"])
xpixmax=self.inv_wavemap(self.spectrumPlot_prop["xmax"])
#force minimum maximum
if(xpixmin == xpixmax):
xpixmin = xpixmax-1
if(xpixmax == 0):
xpixmax = 1
self.twoderrPlot_prop["axis"].set_xlim(xpixmin,xpixmax)
self.twoderrPlot_prop["axis"].set_xlabel('Wavelength')
if(update):
self.twoderrPlot.draw()
def displaylines(self,*args):
""" Display the line list by refreshing plot in update state """
#first parse the line lists
linefile=self.execdir+"/lines/"+self.linelist.get()+".lst"
self.infoline = Table.read(linefile, format='ascii.basic')
#self.infoline=np.loadtxt(linefile, dtype={'names': ('wave', 'tag'),
# 'formats': ('f4', 'S4')})
#refresh plot
self.update_spectrum(update=True)
def loadtemplate(self,*args):
""" Load template from disk and preselect some
useful default
"""
#if so, start dialogue to pick the desired template
self.picktemplate=filedialog.askopenfilename(initialdir='{}/templates/{}'.format(self.execdir,self.tempgroup.get()))
#set current template
self.currenttemplate.set("Current: "+self.picktemplate.split("/")[-1])
#load current template
if('sdss' in self.tempgroup.get()):
#load fits
fitstemp=fits.open(self.picktemplate)
#grab flux
self.templatedata={'flux':fitstemp[0].data[0,:]}
#cosntruct wave
waveinx=np.arange(0,len(self.templatedata['flux']),1)
wavevac=10**(waveinx*1.*fitstemp[0].header['COEFF1']+1.*fitstemp[0].header['COEFF0'])
##go to air
#self.templatedata['wave']= wavevac/(1.0+2.735182e-4+131.4182/wavevac**2+2.76249e8/wavevac**4)
#remain in vac
self.templatedata['wave']= wavevac
else:
#load text
#self.templatedata=np.loadtxt(self.picktemplate, dtype={'names': ('wave', 'flux'),
# 'formats': ('f10', 'f10')},usecols=(0,1))
self.templatedata = Table.read(self.picktemplate, format='ascii.basic')
#set sensible pick in redshift and adjust data as needed
if('lbg' in self.tempgroup.get()):
self.redshifttemp.set("3.000")
elif('kinney' in self.tempgroup.get()):
self.templatedata['flux']=self.templatedata['flux']/1e-14
elif('sdss' in self.tempgroup.get()):
self.templatedata['flux']=self.templatedata['flux']*100.
else:
self.redshifttemp.set("0.000")
def displaytemplate(self,*args):
""" Compute and display template """
self.shwtempstate.set(1)
#compute template given current values
self.adapttemplate()
#refresh plot
self.update_spectrum(update=True)
def hidetemplate(self,*args):
""" Hide template """
self.shwtempstate.set(0)
#refresh plot
self.update_spectrum(update=True)
def adapttemplate(self):
""" Interpolate a template over the data """
#redshift factor
redhfactor=(1+float(self.redshifttemp.get()))
#now construct interpolation
thisw=self.templatedata['wave']*redhfactor
thisf=self.templatedata['flux']
intflx = interp1d(thisw,thisf,kind='linear',bounds_error=False,fill_value=0.0)
#apply normalisation
self.templatedata_current=intflx(self.fitwav1d)*float(self.magtemp.get())
def togglesky(self,*args):
""" Switch on/off sky """
if(self.shwskystate.get()):
self.shwskystate.set(0)
else:
self.shwskystate.set(1)
#refresh plot
self.update_spectrum(update=True)
def fitlines(self):
""" Fit the line list """
#loop over lines inside spectrum
#lounch a new window
self.lnfit=tkinter.Toplevel(self.tk)
#add a display
fig=Figure(figsize=(self.preferwinwidth/self.dpi,self.preferwinheight/self.dpi),dpi=self.dpi)
#pick z
try:
redsh=float(self.redshiftline.get())
except:
redsh=0.0
lines_good_wave_rest=[]
lines_good_wave_obs=[]
lines_good_name=[]
for lw,lnam in self.infoline:
lwplot=lw*(1+redsh)
if((lwplot > min(self.fitwav1d)+8) & (lwplot < max(self.fitwav1d)-8)):
#do a boxchart in 6A bin to see if line exists
inside=np.where((self.fitwav1d > lwplot-4)& (self.fitwav1d < lwplot+4))
continuum=np.where(((self.fitwav1d > lwplot-20)& (self.fitwav1d < lwplot-10)) |
((self.fitwav1d > lwplot+10)& (self.fitwav1d < lwplot+20)))
clevel=np.median(self.fitspe1d[continuum])
flux=np.sum((self.fitspe1d[inside]-clevel))
noise=np.sqrt(np.sum(self.fiterr1d[inside]**2))
#cut in SN
if(flux/noise > 2):
#stash
lines_good_wave_rest.append(lw)
lines_good_wave_obs.append(lwplot)
lines_good_name.append(lnam)
#generate a 4x? grid of plots
nlines=len(lines_good_wave_rest)
ncol=4
nraw=int(nlines/ncol)
if(nlines%ncol > 0):
nraw=nraw+1
czall=[]
#loop on good stuff for fits
for ii in range(nlines):
#select region to fit
fitwindow=np.where((self.fitwav1d > lines_good_wave_obs[ii]-10) & (self.fitwav1d < lines_good_wave_obs[ii]+10))
continuum=np.where(((self.fitwav1d > lines_good_wave_obs[ii]-20)& (self.fitwav1d < lines_good_wave_obs[ii]-10)) |
((self.fitwav1d > lines_good_wave_obs[ii]+10)& (self.fitwav1d < lines_good_wave_obs[ii]+20)))
clevel=np.median(self.fitspe1d[continuum])
p0=np.array([10.,1.*float(lines_good_wave_obs[ii]),2.,0.])
#fit a Gaussian
yval=np.nan_to_num(self.fitspe1d[fitwindow]-clevel)
yerr=np.nan_to_num(self.fiterr1d[fitwindow]*1.)
xval=np.nan_to_num(self.fitwav1d[fitwindow]*1.)
popt,pcov=curve_fit(self.gauss,xval,yval,p0=p0, sigma=yerr)
perr = np.sqrt(np.diag(pcov))
#eval fit
xg=np.arange(min(xval)-2,max(xval)+2,0.2)
fitg=self.gauss(xg,*popt)
#grab fits
czfit=popt[1]/lines_good_wave_rest[ii]-1.
czfiterr=perr[1]/lines_good_wave_rest[ii]
czall.append(czfit)
#display
ax = fig.add_subplot(nraw,ncol,ii+1)
ax.plot(xval,yval)
ax.plot(xval,yerr,color='red',linestyle="--",zorder=1)
ax.plot(xg,fitg,color='black',linestyle=":")
ax.set_title("{0}{1} z = {2:.6} +/- {3:.5}".format(lines_good_name[ii],int(lines_good_wave_rest[ii]),czfit,czfiterr))
#send message to user and reset redshift
bestz=np.median(np.array(czall))
bestez=np.std(np.array(czall))
self.generic_message.set(r'zfit-> Best fit is {:6.5f}+/-{:6.5f}'.format(bestz,bestez))
self.redshiftline.set(bestz)
#send figure to canvas
self.linefitplot = FigureCanvasTkAgg(fig,master=self.lnfit)
try:
self.linefitplot.draw()
except:
self.linefitplot.show()
#fig.tight_layout()
self.linefitplot.get_tk_widget().grid()
def fittemplate(self):
""" Fit the template """
#init the template correlation
realdata={'wave':self.fitwav1d,'flux':self.fitspe1d,'error':self.fiterr1d}
##Testing sequence
#realdata={'wave':self.templatedata['wave']*(1+0.4329),'flux':self.templatedata['flux'],
# 'error':self.templatedata['flux']}
print('Computing correlation... be patient!')
#find the wavelength range covering the min/max extent
absmin=np.min([np.min(self.templatedata['wave']),np.min(realdata['wave'])])
absmax=np.max([np.max(self.templatedata['wave']),np.max(realdata['wave'])])
#resample in log
deltal=5e-4
lnwave=np.arange(np.log(absmin),np.log(absmax),deltal)
#resample with spline (s controls the smoothing)
x=np.nan_to_num(self.templatedata['wave'])
y=np.nan_to_num(self.templatedata['flux'])
resamp_templ=interpolate.splrep(np.log(x),y,s=0)
x=np.nan_to_num(realdata['wave'])
y=np.nan_to_num(realdata['flux'])
resamp_real=interpolate.splrep(np.log(x),y,s=0)
#put everything on the same array - zero padding the extrapolation
flux_templ=interpolate.splev(lnwave,resamp_templ,der=0,ext=1)
flux_real=interpolate.splev(lnwave,resamp_real,der=0,ext=1)
#masking strong sky lines
mask=np.where((lnwave > np.log(5569.)) & (lnwave < np.log(5584.)))
flux_real[mask]=0
mask=np.where((lnwave > np.log(6292.)) & (lnwave < np.log(6308.)))
flux_real[mask]=0
mask=np.where((lnwave > np.log(6356.)) & (lnwave < np.log(6369.)))
flux_real[mask]=0
mask=np.where((lnwave > 8.6752) & (lnwave < 8.6860))
flux_real[mask]=0
mask=np.where((lnwave > 8.8274) & (lnwave < 8.8525))
flux_real[mask]=0
mask=np.where((lnwave > 8.8862) & (lnwave < np.log(12000.)))
flux_real[mask]=0
#correlate
xcorr=np.correlate(flux_real,flux_templ,mode='full')
#find the peak in the second half in units of redshift
indxmax=np.argmax(xcorr)-len(xcorr)/2
peakz=np.exp(indxmax*deltal)-1
#print peakz
#find the reshift axis
indxarr=np.arange(0,len(lnwave),1)
self.xcorr_redax=np.exp(indxarr*deltal)-1
self.xcorr_xcorr=xcorr[len(xcorr)/2:]
self.xcorr_redshift=peakz
#set the redshift in template window
self.redshifttemp.set("{}".format(self.xcorr_redshift))
#trigger display options
#lounch a new window
self.tmlfit=tkinter.Toplevel(self.tk)
#add xcorr to display
#create properties for this plot
self.tmpfitxcorr_prop={}
self.tmpfitxcorr_prop["xmin"]=np.min(np.nan_to_num(self.xcorr_redax))
self.tmpfitxcorr_prop["xmax"]=np.max(np.nan_to_num(self.xcorr_redax))
self.tmpfitxcorr_prop["ymin"]=np.min(np.nan_to_num(self.xcorr_xcorr))
self.tmpfitxcorr_prop["ymax"]=np.max(np.nan_to_num(self.xcorr_xcorr))
self.tmpfitxcorr_prop["figure"]=Figure(figsize=(self.preferwinwidth/self.dpi*0.75,self.preferwinheight/self.dpi*0.75),dpi=self.dpi)
self.tmpfitxcorr_prop["axis"]= self.tmpfitxcorr_prop["figure"].add_subplot(111)
#call plotting routine
self.update_xcorrplot()
#send it to canvas
self.tmpfitxcorr = FigureCanvasTkAgg(self.tmpfitxcorr_prop["figure"],master=self.tmlfit)
self.tmpfitxcorr.show()
#enable event on click
self.tmpfitxcorr_prop["figure"].tight_layout()
self.tmpfitxcorr.mpl_connect("button_press_event", self.pressbutton)
self.tmpfitxcorr.mpl_connect("key_press_event", self.presskey)
self.tmpfitxcorr.get_tk_widget().grid(column=0,row=0)
def update_xcorrplot(self,update=False):
""" Update plot for xcorrplot """
if(update):
self.tmpfitxcorr_prop["axis"].cla()
#plot main data
self.tmpfitxcorr_prop["axis"].plot(self.xcorr_redax,self.xcorr_xcorr)
self.tmpfitxcorr_prop["axis"].axvline(self.xcorr_redshift, color='grey', linestyle='--')
self.tmpfitxcorr_prop["axis"].set_xlim(self.tmpfitxcorr_prop["xmin"],self.tmpfitxcorr_prop["xmax"])
self.tmpfitxcorr_prop["axis"].set_ylim(self.tmpfitxcorr_prop["ymin"],self.tmpfitxcorr_prop["ymax"])
self.tmpfitxcorr_prop["axis"].set_xlabel('Redshift')
self.tmpfitxcorr_prop["axis"].set_ylabel('XCORR')
#finally draw
if(update):
self.tmpfitxcorr.draw()
def movemouse(self,event):
""" Do stuff when mouse moves """
if(event.canvas == self.spectrumPlot):
self.mouse_position.set('Mouse:({},{})'.format(event.xdata,event.ydata))
elif(event.canvas == self.twodspcPlot):
try:
self.mouse_position.set('Mouse:({},{})'.format(self.wavemap(event.xdata,0.0),event.ydata))
except:
self.mouse_position.set('Mouse:(None,None)')
elif(event.canvas == self.twoderrPlot):
try:
self.mouse_position.set('Mouse:({},{})'.format(self.wavemap(event.xdata,0.0),event.ydata))
except:
self.mouse_position.set('Mouse:(None,None)')
def pressbutton(self,event):
""" Do stuff when data plot is pressed with mouse """
#this is how to redirect events
if(event.canvas == self.twoderrPlot):
#set focus
self.twoderrPlot.get_tk_widget().focus_set()
if(event.canvas == self.twodspcPlot):
#set focus
self.twodspcPlot.get_tk_widget().focus_set()
if(event.canvas == self.twodimagePlot):
#set focus
self.twodimagePlot.get_tk_widget().focus_set()
if(event.canvas == self.spectrumPlot):
#set focus
self.spectrumPlot.get_tk_widget().focus_set()
#for right click, trigger line selector
if(event.button == 3):
self.lineselectorwidget(event)
if(event.canvas == self.tmpfitxcorr):
#set focus
self.tmpfitxcorr.get_tk_widget().focus_set()
def presskey(self,event):
""" Do stuff when data plot is pressed with key """
#quit on q
if(event.key == "q"):
self.OnExit()
#keyboard event when focus on spectrum
if(event.canvas == self.spectrumPlot):
self.spetrumPlot_events(event)
#keyboard event when focus on xcorr
if(event.canvas == self.tmpfitxcorr):
self.tmpfitxcorr_events(event)
def tmpfitxcorr_events(self,event):
""" Handle events of xcorr plot """
#set bottom plot
if(event.key == "b"):
self.tmpfitxcorr_prop["ymin"]=event.ydata
self.update_xcorrplot(update=True)
#set top plot
if(event.key == "t"):
self.tmpfitxcorr_prop["ymax"]=event.ydata
self.update_xcorrplot(update=True)
#set left plot
if(event.key == "l"):
self.tmpfitxcorr_prop["xmin"]=event.xdata
self.update_xcorrplot(update=True)
#set right plot
if(event.key == "r"):
self.tmpfitxcorr_prop["xmax"]=event.xdata
self.update_xcorrplot(update=True)
#zoom in
if(event.key == "i"):
#find the current width in x
currentwidth=self.tmpfitxcorr_prop["xmax"]-self.tmpfitxcorr_prop["xmin"]
#zoom in by factor of 2
currentwidth=currentwidth*0.5
#zoom around selected wave
self.tmpfitxcorr_prop["xmin"]=event.xdata-currentwidth/2.
self.tmpfitxcorr_prop["xmax"]=event.xdata+currentwidth/2.
self.update_xcorrplot(update=True)
#zoom out
if(event.key == "o"):
#find the current width in x
currentwidth=self.tmpfitxcorr_prop["xmax"]-self.tmpfitxcorr_prop["xmin"]
#zoom out by factor of 2
currentwidth=currentwidth*2
#zoom around selected wave
self.tmpfitxcorr_prop["xmin"]=event.xdata-currentwidth/2.
self.tmpfitxcorr_prop["xmax"]=event.xdata+currentwidth/2.
self.update_xcorrplot(update=True)
#pan left
if(event.key == "["):
#find the current width in x
currentwidth=self.tmpfitxcorr_prop["xmax"]-self.tmpfitxcorr_prop["xmin"]
#pan left
self.tmpfitxcorr_prop["xmin"]=self.tmpfitxcorr_prop["xmin"]-currentwidth/2
self.tmpfitxcorr_prop["xmax"]=self.tmpfitxcorr_prop["xmax"]-currentwidth/2
self.update_xcorrplot(update=True)
#pan right
if(event.key == "]"):
#find the current width in x
currentwidth=self.tmpfitxcorr_prop["xmax"]-self.tmpfitxcorr_prop["xmin"]
#pan right
self.tmpfitxcorr_prop["xmin"]=self.tmpfitxcorr_prop["xmin"]+currentwidth/2
self.tmpfitxcorr_prop["xmax"]=self.tmpfitxcorr_prop["xmax"]+currentwidth/2
self.update_xcorrplot(update=True)
#set reset plot
if(event.key == "W"):
self.tmpfitxcorr_prop["xmin"]=np.min(np.nan_to_num(self.xcorr_redax))
self.tmpfitxcorr_prop["xmax"]=np.max(np.nan_to_num(self.xcorr_redax))
self.tmpfitxcorr_prop["ymin"]=np.min(np.nan_to_num(self.xcorr_xcorr))
self.tmpfitxcorr_prop["ymax"]=np.max(np.nan_to_num(self.xcorr_xcorr))
self.update_xcorrplot(update=True)
#mark new redshift
if(event.key == "z"):
#update relevent info
self.xcorr_redshift=event.xdata
self.redshifttemp.set("{}".format(self.xcorr_redshift))
#refresh plot
self.update_xcorrplot(update=True)
#display template
self.displaytemplate()
def spetrumPlot_events(self,event):
"""" Handle events of spectrum plot """
#set bottom plot
if(event.key == "b"):
self.spectrumPlot_prop["ymin"]=event.ydata
self.update_spectrum(update=True)
#set top plot
if(event.key == "t"):
self.spectrumPlot_prop["ymax"]=event.ydata
self.update_spectrum(update=True)
#set left plot
if(event.key == "l"):
self.spectrumPlot_prop["xmin"]=event.xdata
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#set right plot
if(event.key == "r"):
self.spectrumPlot_prop["xmax"]=event.xdata
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#zoom in
if(event.key == "i"):
#find the current width in x
currentwidth=self.spectrumPlot_prop["xmax"]-self.spectrumPlot_prop["xmin"]
#zoom in by factor of 2
currentwidth=currentwidth*0.5
#zoom around selected wave
self.spectrumPlot_prop["xmin"]=event.xdata-currentwidth/2.
self.spectrumPlot_prop["xmax"]=event.xdata+currentwidth/2.
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#zoom out
if(event.key == "o"):
#find the current width in x
currentwidth=self.spectrumPlot_prop["xmax"]-self.spectrumPlot_prop["xmin"]
#zoom out by factor of 2
currentwidth=currentwidth*2
#zoom around selected wave
self.spectrumPlot_prop["xmin"]=event.xdata-currentwidth/2.
self.spectrumPlot_prop["xmax"]=event.xdata+currentwidth/2.
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#pan left
if(event.key == "["):
#find the current width in x
currentwidth=self.spectrumPlot_prop["xmax"]-self.spectrumPlot_prop["xmin"]
#pan left
self.spectrumPlot_prop["xmin"]=self.spectrumPlot_prop["xmin"]-currentwidth/2
self.spectrumPlot_prop["xmax"]=self.spectrumPlot_prop["xmax"]-currentwidth/2
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#pan right
if(event.key == "]"):
#find the current width in x
currentwidth=self.spectrumPlot_prop["xmax"]-self.spectrumPlot_prop["xmin"]
#pan right
self.spectrumPlot_prop["xmin"]=self.spectrumPlot_prop["xmin"]+currentwidth/2
self.spectrumPlot_prop["xmax"]=self.spectrumPlot_prop["xmax"]+currentwidth/2
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#set reset plot
if(event.key == "W"):
self.spectrumPlot_prop["xmin"]=np.min(np.nan_to_num(self.fitwav1d))
self.spectrumPlot_prop["xmax"]=np.max(np.nan_to_num(self.fitwav1d))
self.spectrumPlot_prop["ymin"]=np.min(np.nan_to_num(self.fitspe1d))
self.spectrumPlot_prop["ymax"]=np.max(np.nan_to_num(self.fitspe1d))
self.update_spectrum(update=True)
#update 2d spectra accordingly
self.update_twodspec(update=True)
self.update_twoderr(update=True)
#smooth plot
if(event.key == "S"):
self.fitspe1d=signal.medfilt(self.fitspe1d,self.smooth)
self.smooth=self.smooth+2
self.update_spectrum(update=True)
#unsmooth smooth
if(event.key == "U"):
self.fitspe1d=self.fitspe1d_original
self.smooth=3
self.update_spectrum(update=True)
def lineselectorwidget(self,event):
""" Control what happens when right-click on 1D spectrum
- trigger construction of line list selector
"""
#refresh lines as needed
self.displaylines()
#lounch a new window
self.lnsel=tkinter.Toplevel(self.tk)
#pick z
try:
redsh=float(self.redshiftline.get())
except:
redsh=0.0
#create line buttons for those visibles
self.wlineselect = tkinter.DoubleVar()
self.wlinepos = event.xdata
i=0
for lw,lnam in self.infoline:
lwplot=lw*(1+redsh)
tkinter.Radiobutton(self.lnsel, text=lnam+"{}".format(int(lw)),
variable=self.wlineselect, value=lw,
command=self.pickedline).grid(row = i%30, column = i/30, sticky = "NWSE")
i=i+1
self.tk.wait_window(self.lnsel)
def pickedline(self):
""" If one pick a line, find redshift """
#find the redshift
redshift=self.wlinepos/self.wlineselect.get()-1
#set it - auto trigger refresh
self.shwlinstate.set(1)
self.redshiftline.set("{}".format(redshift))
#destroy window
self.lnsel.destroy()
def gauss(self,x, *p):
""" Gaussian model for line fit """
A, mu, sigma, zero = p
gg=A*np.exp(-1.*(x-mu)*(x-mu)/(2.*sigma*sigma))+zero
return gg
def zfit(startfile=None, z_start=0.0):
""" Mains that runs the gui """
app = zfitwin(None, startfile=startfile, z_start=z_start)
app.title('Fit your redshift!')
app.mainloop()
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:],"i:z:",["ifile=","redshift="])
startfile = None
z_start = 0.0
#cube_range = None
for opt, arg in opts:
if opt in ("-i", "--ifile"):
startfile = arg
elif opt in ("-z", "--redshift"):
z_start = float(arg)
#elif opt in ("-c", "--cube"):
# cube = float(arg)
zfit(startfile=startfile, z_start=z_start)
| gpl-2.0 | 7,933,736,069,385,149,000 | 36.833072 | 139 | 0.588938 | false |
airbnb/streamalert | streamalert_cli/terraform/monitoring.py | 1 | 3570 | """
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from streamalert.shared.logger import get_logger
from streamalert_cli.terraform.common import monitoring_topic_arn
LOGGER = get_logger(__name__)
def generate_monitoring(cluster_name, cluster_dict, config):
"""Add the CloudWatch Monitoring module to the Terraform cluster dict.
Example configuration:
"cloudwatch_monitoring": {
"enabled": true,
"kinesis_alarms_enabled": true,
"lambda_alarms_enabled": true,
"settings": {
"lambda_invocation_error_period": "600",
"kinesis_iterator_age_error_period": "600",
"kinesis_write_throughput_exceeded_threshold": "100"
}
}
Args:
cluster_name (str): The name of the currently generating cluster
cluster_dict (defaultdict): The dict containing all Terraform config for a given cluster.
config (dict): The loaded config from the 'conf/' directory
Returns:
bool: Result of applying the cloudwatch_monitoring module
"""
prefix = config['global']['account']['prefix']
infrastructure_config = config['global'].get('infrastructure')
monitoring_config = config['clusters'][cluster_name]['modules']['cloudwatch_monitoring']
if not (infrastructure_config and 'monitoring' in infrastructure_config):
LOGGER.error('Invalid config: Make sure you declare global infrastructure options!')
return False
if not monitoring_config.get('enabled', False):
LOGGER.info('CloudWatch Monitoring not enabled, skipping...')
return True
sns_topic_arn = monitoring_topic_arn(config)
cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)] = {
'source': './modules/tf_monitoring',
'sns_topic_arn': sns_topic_arn,
'kinesis_alarms_enabled': False,
'lambda_alarms_enabled': False
}
if monitoring_config.get('lambda_alarms_enabled', True):
cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)].update({
'lambda_functions': ['{}_{}_streamalert_classifier'.format(prefix, cluster_name)],
'lambda_alarms_enabled': True
})
if monitoring_config.get('kinesis_alarms_enabled', True):
cluster_dict['module']['cloudwatch_monitoring_{}'.format(cluster_name)].update({
'kinesis_stream': '${{module.kinesis_{}.stream_name}}'.format(cluster_name),
'kinesis_alarms_enabled': True
})
# Add support for custom settings for tweaking alarm thresholds, eval periods, and periods
# Note: This does not strictly check for proper variable names, since there are so many.
# Instead, Terraform will error out if an improper name is used.
# Also, every value in these settings should be a string, so cast for safety.
for setting_name, setting_value in monitoring_config.get('settings', {}).items():
cluster_dict['module']['cloudwatch_monitoring_{}'.format(
cluster_name)][setting_name] = str(setting_value)
return True
| apache-2.0 | -359,748,981,042,700,000 | 40.034483 | 97 | 0.685154 | false |
sarisabban/ProtVR | FlaskApp/ProtVR.py | 1 | 1848 | # Author: Sari Sabban
# Email: [email protected]
# URL: https://github.com/sarisabban
#
# Created By: Sari Sabban
# Created Date: 20 March 2017
import urllib
def ProtVR(x):
lis=list()
filename='HELLO C '
filename=urllib.urlopen('http://files.rcsb.org/view/'+x+'.pdb')
lis.append('<script src="/static/aframe.min.js"></script>\n')
lis.append('<a-scene>\n')
lis.append('\t<a-sky color="#111111"></a-sky>\n')
for line in filename:
line=line.decode()
if line.startswith('ATOM'):
splitline=line.split()
try:
coordinates=(splitline[11],splitline[6],splitline[7],splitline[8])
except:
coordinates=(splitline[10],splitline[6],splitline[7],splitline[8])
if coordinates[0]=='N':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#2D2DE1"></a-sphere>'
elif coordinates[0]=='C':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#2DE12D"></a-sphere>'
elif coordinates[0]=='O':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#E14343"></a-sphere>'
elif coordinates[0]=='H':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#CBCBCB"></a-sphere>'
elif coordinates[0]=='S':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#CBAE38"></a-sphere>'
elif coordinates[0]=='I':
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#830083"></a-sphere>'
else:
js='\t<a-sphere position="',coordinates[1],coordinates[2],coordinates[3],'" radius="1" color="#6F6F6F"></a-sphere>'
result=' '.join(js)
lis.append(result)
lis.append('</a-scene>')
final=' '.join(lis)
return(final)
#print(final)
#ProtVR('2HIU')
| mit | 2,022,517,550,096,471,300 | 39.173913 | 119 | 0.653139 | false |
mitsei/dlkit | tests/cataloging/test_queries.py | 1 | 9061 | """Unit tests of cataloging queries."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.osid import errors
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def catalog_query_class_fixture(request):
# From test_templates/resource.py::BinQuery::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'CATALOGING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_catalog_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_catalog(create_form)
request.cls.fake_id = Id('resource.Resource%3A1%40ODL.MIT.EDU')
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_catalog(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def catalog_query_test_fixture(request):
# From test_templates/resource.py::BinQuery::init_template
if not is_never_authz(request.cls.service_config):
request.cls.query = request.cls.svc_mgr.get_catalog_query()
@pytest.mark.usefixtures("catalog_query_class_fixture", "catalog_query_test_fixture")
class TestCatalogQuery(object):
"""Tests for CatalogQuery"""
def test_match_id(self):
"""Tests match_id"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_id(True, True)
def test_match_any_id(self):
"""Tests match_any_id"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_any_id(True)
def test_clear_id_terms(self):
"""Tests clear_id_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['id'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_id_terms()
if is_no_authz(self.service_config):
assert 'id' not in self.query._query_terms
def test_match_ancestor_catalog_id(self):
"""Tests match_ancestor_catalog_id"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_ancestor_catalog_id(True, True)
def test_clear_ancestor_catalog_id_terms(self):
"""Tests clear_ancestor_catalog_id_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['ancestorCatalogId'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_ancestor_catalog_id_terms()
if is_no_authz(self.service_config):
assert 'ancestorCatalogId' not in self.query._query_terms
def test_supports_ancestor_catalog_query(self):
"""Tests supports_ancestor_catalog_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.supports_ancestor_catalog_query()
def test_get_ancestor_catalog_query(self):
"""Tests get_ancestor_catalog_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.get_ancestor_catalog_query()
def test_match_any_ancestor_catalog(self):
"""Tests match_any_ancestor_catalog"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_any_ancestor_catalog(True)
def test_clear_ancestor_catalog_terms(self):
"""Tests clear_ancestor_catalog_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['ancestorCatalog'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_ancestor_catalog_terms()
if is_no_authz(self.service_config):
assert 'ancestorCatalog' not in self.query._query_terms
def test_match_descendant_catalog_id(self):
"""Tests match_descendant_catalog_id"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_descendant_catalog_id(True, True)
def test_clear_descendant_catalog_id_terms(self):
"""Tests clear_descendant_catalog_id_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['descendantCatalogId'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_descendant_catalog_id_terms()
if is_no_authz(self.service_config):
assert 'descendantCatalogId' not in self.query._query_terms
def test_supports_descendant_catalog_query(self):
"""Tests supports_descendant_catalog_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.supports_descendant_catalog_query()
def test_get_descendant_catalog_query(self):
"""Tests get_descendant_catalog_query"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.query.get_descendant_catalog_query()
def test_match_any_descendant_catalog(self):
"""Tests match_any_descendant_catalog"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.match_any_descendant_catalog(True)
def test_clear_descendant_catalog_terms(self):
"""Tests clear_descendant_catalog_terms"""
# From test_templates/resource.py::BinQuery::clear_group_terms_template
if is_no_authz(self.service_config):
self.query._query_terms['descendantCatalog'] = 'foo'
if not is_never_authz(self.service_config):
self.query.clear_descendant_catalog_terms()
if is_no_authz(self.service_config):
assert 'descendantCatalog' not in self.query._query_terms
def test_get_catalog_query_record(self):
"""Tests get_catalog_query_record"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.query.get_catalog_query_record(True)
| mit | -4,018,463,533,155,172,400 | 40.949074 | 176 | 0.648825 | false |
danieldmm/minerva | kw_evaluation_runs/aac_full_text_kw_selection_exp.py | 1 | 12856 | # Experiments with the ACL corpus like the ones for LREC'16
#
# Copyright: (c) Daniel Duma 2016
# Author: Daniel Duma <[email protected]>
# For license information, see LICENSE.TXT
from __future__ import print_function
from __future__ import absolute_import
# from proc.nlp_functions import AZ_ZONES_LIST, CORESC_LIST
from evaluation.experiment import Experiment
from db.ez_connect import ez_connect
from kw_evaluation_runs.thesis_settings import CONTEXT_EXTRACTION_C6
# BOW files to prebuild for generating document representation.
prebuild_bows = {
"full_text": {"function": "getDocBOWfull", "parameters": [1]},
}
# bow_name is just about the name of the file containing the BOWs
prebuild_indeces = {
}
# the name of the index is important, should be unique
prebuild_general_indexes = {
# "full_text_aac_2010": {"type": "standard_multi", "bow_name": "full_text", "parameters": [1]},
"az_ilc_az_annotated_aac_2010":
{"type": "ilc_mashup",
"ilc_method": "ilc_annotated", # method that generates the bow
"mashup_method": "az_annotated",
# parameter has to match a parameter of a prebuilt bow
"parameters": [1],
"ilc_parameters": ["paragraph"],
"max_year": 2010 # cut-off point for adding files to index
},
}
doc_methods = {
# "_full_text": {"type": "standard_multi", "index": "az_ilc_az_annotated_aac_2010_1", "parameters": ["paragraph"],
# "runtime_parameters": {"_full_text": 1}}
"_all_text": {"type": "standard_multi", "index": "az_ilc_az_annotated_aac_2010_1", "parameters": ["paragraph"],
"runtime_parameters": {"_all_text": 1}},
# "mixed": {"type": "standard_multi",
# "index": "az_ilc_az_annotated_aac_2010_1",
# "parameters": ["paragraph"],
# "runtime_parameters": {"_all_text": 1}},
}
# this is the dict of query extraction methods
qmethods = {
"sentence": {"parameters": [
CONTEXT_EXTRACTION_C6
],
"method": "Sentences",
},
}
experiment = {
"name": "aac_full_text_kw_selection",
"description":
"Full-text indexing of AAC to test kw selection",
# dict of bag-of-word document representations to prebuild
"prebuild_bows": prebuild_bows,
# dict of per-file indexes to prebuild
"prebuild_indeces": prebuild_indeces,
# dict of general indexes to prebuild
"prebuild_general_indexes": prebuild_general_indexes,
# dictionary of document representation methods to test
"doc_methods": doc_methods,
# dictionary of query generation methods to test
"qmethods": qmethods,
# list of files in the test set
"test_files": [],
# SQL condition to automatically generate the list above
"test_files_condition": "metadata.num_in_collection_references:>0 AND metadata.year:>2010",
# how to sort test files
"test_files_sort": "metadata.num_in_collection_references:desc",
# This lets us pick just the first N files from which to generate queries
"max_test_files": 1000,
# Use Lucene DefaultSimilarity? As opposed to FieldAgnosticSimilarity
"use_default_similarity": True,
# Annotate sentences with AZ/CoreSC/etc?
"rhetorical_annotations": [],
# Run annotators? If False, it is assumed the sentences are already annotated
"run_rhetorical_annotators": False,
# Separate queries by AZ/CSC, etc?
"use_rhetorical_annotation": False,
## "weight_values":[],
# use full-collection retrival? If False, it runs "citation resolution"
"full_corpus": True,
# "compute_once", "train_weights", "test_selectors", "extract_kw", "test_kw_selection"
"type": "test_kw_selection",
# If full_corpus, this is the cut-off year for including documents in the general index.
# In this way we can separate test files and retrieval files.
"index_max_year": 2010,
# how many chunks to split each file for statistics on where the citation occurs
"numchunks": 10,
# name of CSV file to save results in
"output_filename": "results.csv",
"pivot_table": "",
"max_results_recall": 200,
# should queries be classified based on some rhetorical class of the sentence: "az", "csc_type", "" or None
"queries_classification": "",
# do not process more than this number of queries of the same type (type on line above)
"max_per_class_results": 1000,
# do not generate queries for more than this number of citations
"max_queries_generated": 1000,
# of all precomputed queries, which classes should be processed/evaluated?
"queries_to_process": ["ALL"],
# what "zones" to try to train weights for
"train_weights_for": [], # ["Bac"], ["Hyp","Mot","Bac","Goa","Obj","Met","Exp","Mod","Obs","Res","Con"]
# add another doc_method showing the score based on analytical random chance?
"add_random_control_result": False,
# "precomputed_queries_filename": "precomputed_queries.json",
"precomputed_queries_filename": "precomputed_queries_new1k.json",
# "files_dict_filename": "files_dict.json",
"files_dict_filename": "files_dict_new1k.json",
# what to extract as a citation's context
"context_extraction": "sentence",
"context_extraction_parameter": CONTEXT_EXTRACTION_C6,
# exact name of index to use for extracting idf scores etc. for document feature annotation
# "features_index_name": "idx_full_text_aac_2010_1",
"features_index_name": "idx_az_ilc_az_annotated_aac_2010_1_paragraph",
"features_field_name": "_all_text",
# parameters to keyword selection method
"keyword_selection_parameters": {
# "NBestSelector10": {"class": "NBestSelector",
# "parameters": {"N": 10}
# },
# "NBestSelector20": {"class": "NBestSelector",
# "parameters": {"N": 20}
# },
# "NBestSelector5": {"class": "NBestSelector",
# "parameters": {"N": 5}
# },
# "MinimalSetSelector": {"class": "MinimalSetSelector",
# "parameters": {
# "filter_stopwords": False
# }
# },
# "MultiMaximalSetSelector": {"class": "MultiMaximalSetSelector",
# "parameters": {
# "filter_stopwords": False
# }
# },
# "AllSelector": {"class": "AllSelector",
# "parameters": {
# "filter_stopwords": False
# }
# },
# "KPtester_nokp": {"class": "KPtester",
# "parameters": {"use_kps": False,
# "use_c3_stopword_list": True,
# "filter_stopwords": False,
# }
# },
#
# "KPtester_kp_add": {"class": "KPtester",
# "parameters": {"use_kps": True,
# "kp_method": "add",
# "use_c3_stopword_list": True,
# "filter_stopwords": False,
# }
# },
#
# "KPtester_kp_add_avg": {"class": "KPtester",
# "parameters": {"use_kps": True,
# "kp_method": "add",
# "use_c3_stopword_list": True,
# "filter_stopwords": False,
# "kp_score_compute": "avg"}
# },
# "KPtester_kp_sub": {"class": "KPtester",
# "parameters": {"use_kps": True,
# "kp_method": "sub",
# "use_c3_stopword_list": True,
# "filter_stopwords": False,
# }
# },
# "KPtester_kp_sub_avg": {"class": "KPtester",
# "parameters": {"use_kps": True,
# "kp_method": "sub",
# "use_c3_stopword_list": True,
# "filter_stopwords": False,
# "kp_score_compute": "avg"}
# },
#
"StopwordTester": {"class": "StopwordTester",
"parameters": {
}
},
# "QueryTester_sw_c6": {"class": "QueryTester",
# "parameters": {
# "filter_stopwords": True,
# "use_weights": True,
# }
# },
# "QueryTester_sw_c3": {"class": "QueryTester",
# "parameters": {
# "filter_stopwords": False,
# "use_weights": True,
# "use_c3_stopword_list": True
# }
# },
# "QueryTester_noswfilter": {"class": "QueryTester",
# "parameters": {
# "filter_stopwords": False,
# "use_weights": True,
# "use_all_original_text": True
# }
# },
},
# how many folds to use for training/evaluation
"cross_validation_folds": 4,
# an upper limit on the number of data points to use
## "max_data_points": 50000,
"filter_options_resolvable": {
# Should resolvable citations exclude those that have the same first author as the test document?
"exclude_same_first_author": True,
# How many authors can the citing and cited paper maximally overlap on?
"max_overlapping_authors": None, # How many authors can the citing and cited paper maximally overlap on?
# What's the max year for considering a citation? Should match index_max_year above
# "max_year": 2010,
},
"filter_options_ilc": {
# Should resolvable citations exclude those that have the same first author as the test document?
"exclude_same_first_author": True,
# How many authors can the citing and cited paper maximally overlap on?
"max_overlapping_authors": None, # How many authors can the citing and cited paper maximally overlap on?
# What's the max year for considering a citation? Should match index_max_year above
"max_year": 2010,
},
# "expand_match_guids": True,
# "match_guid_expansion_threshold": 0.2,
# "match_guid_expansion_max_add": 5,
}
options = {
"run_prebuild_bows": 0, # should the whole BOW building process run?
"overwrite_existing_bows": 0, # if a BOW exists already, should we overwrite it?
"build_indexes": 0, # rebuild indices?
"generate_queries": 0, # precompute the queries?
"overwrite_existing_queries": 0, # force rebuilding of queries too?
"force_regenerate_resolvable_citations": 0, # find again the resolvable citations in a file?
"clear_existing_prr_results": 1, # delete previous precomputed results? i.e. start from scratch
"run_precompute_retrieval": 1,
# only applies if type == "train_weights" or "extract_kw". This is necessary for run_feature_annotation! And this is because each pipeline may do different annotation
"run_feature_annotation": 0,
# annotate scidocs with document-wide features for keyword extraction? By default, False
"refresh_results_cache": 1, # should we clean the offline reader cache and redownload it all from elastic?
"run_experiment": 1, # must be set to 1 for "run_package_features" to work
"run_package_features": 1,
# should we read the cache and repackage all feature information, or use it if it exists already?
"run_query_start_at": 0,
"list_missing_files": 0,
# "max_queries_to_process": 10,
}
def main():
corpus = ez_connect("AAC", "aws-server")
exp = Experiment(experiment, options, False)
exp.run()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,403,679,893,338,411,800 | 43.638889 | 170 | 0.529947 | false |
Einsteinish/PyTune3 | apps/reader/views.py | 1 | 103912 | import datetime
import time
import boto
import redis
import requests
import random
import zlib
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from django.db import IntegrityError
from django.db.models import Q
from django.views.decorators.cache import never_cache
from django.core.urlresolvers import reverse
from django.contrib.auth import login as login_user
from django.contrib.auth import logout as logout_user
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, Http404, UnreadablePostError
from django.conf import settings
from django.core.mail import mail_admins
#from django.core.validators import email_re
from django.core.validators import EmailValidator
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.core.mail import EmailMultiAlternatives
from django.contrib.sites.models import Site
from django.utils import feedgenerator
from django.utils.encoding import smart_unicode
from mongoengine.queryset import OperationError
from mongoengine.queryset import NotUniqueError
from apps.recommendations.models import RecommendedFeed
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds
from apps.analyzer.models import apply_classifier_authors, apply_classifier_tags
from apps.analyzer.models import get_classifiers_for_user, sort_classifiers_by_feed
from apps.profile.models import Profile
from apps.reader.models import UserSubscription, UserSubscriptionFolders, RUserStory, Feature
from apps.reader.forms import SignupForm, LoginForm, FeatureForm
from apps.rss_feeds.models import MFeedIcon, MStarredStoryCounts
from apps.search.models import MUserSearch
from apps.statistics.models import MStatistics
# from apps.search.models import SearchStarredStory
try:
from apps.rss_feeds.models import Feed, MFeedPage, DuplicateFeed, MStory, MStarredStory
except:
pass
from apps.social.models import MSharedStory, MSocialProfile, MSocialServices
from apps.social.models import MSocialSubscription, MActivity, MInteraction
from apps.categories.models import MCategory
from apps.social.views import load_social_page
from apps.rss_feeds.tasks import ScheduleImmediateFetches
from utils import json_functions as json
from utils.user_functions import get_user, ajax_login_required
from utils.feed_functions import relative_timesince
from utils.story_functions import format_story_link_date__short
from utils.story_functions import format_story_link_date__long
from utils.story_functions import strip_tags
from utils import log as logging
from utils.view_functions import get_argument_or_404, render_to, is_true
from utils.view_functions import required_params
from utils.ratelimit import ratelimit
from vendor.timezones.utilities import localtime_for_timezone
BANNED_URLS = [
"brentozar.com",
]
@never_cache
@render_to('reader/dashboard.xhtml')
def index(request, **kwargs):
if request.method == "GET" and request.subdomain and request.subdomain not in ['dev', 'www', 'debug']:
username = request.subdomain
if '.' in username:
username = username.split('.')[0]
user = User.objects.filter(username=username)
if not user:
user = User.objects.filter(username__iexact=username)
if user:
user = user[0]
if not user:
return HttpResponseRedirect('http://%s%s' % (
Site.objects.get_current().domain,
reverse('index')))
return load_social_page(request, user_id=user.pk, username=request.subdomain, **kwargs)
if request.user.is_anonymous():
return welcome(request, **kwargs)
else:
return dashboard(request, **kwargs)
def dashboard(request, **kwargs):
user = request.user
feed_count = UserSubscription.objects.filter(user=request.user).count()
recommended_feeds = RecommendedFeed.objects.filter(is_public=True,
approved_date__lte=datetime.datetime.now()
).select_related('feed')[:2]
unmoderated_feeds = []
if user.is_staff:
unmoderated_feeds = RecommendedFeed.objects.filter(is_public=False,
declined_date__isnull=True
).select_related('feed')[:2]
statistics = MStatistics.all()
social_profile = MSocialProfile.get_user(user.pk)
start_import_from_google_reader = request.session.get('import_from_google_reader', False)
if start_import_from_google_reader:
del request.session['import_from_google_reader']
if not user.is_active:
url = "https://%s%s" % (Site.objects.get_current().domain,
reverse('stripe-form'))
return HttpResponseRedirect(url)
logging.user(request, "~FBLoading dashboard")
return {
'user_profile' : user.profile,
'feed_count' : feed_count,
'account_images' : range(1, 4),
'recommended_feeds' : recommended_feeds,
'unmoderated_feeds' : unmoderated_feeds,
'statistics' : statistics,
'social_profile' : social_profile,
'start_import_from_google_reader': start_import_from_google_reader,
'debug' : settings.DEBUG,
}, "reader/dashboard.xhtml"
def welcome(request, **kwargs):
user = get_user(request)
statistics = MStatistics.all()
social_profile = MSocialProfile.get_user(user.pk)
if request.method == "POST":
if request.POST.get('submit', '').startswith('log'):
login_form = LoginForm(request.POST, prefix='login')
signup_form = SignupForm(prefix='signup')
else:
login_form = LoginForm(prefix='login')
signup_form = SignupForm(request.POST, prefix='signup')
else:
login_form = LoginForm(prefix='login')
signup_form = SignupForm(prefix='signup')
logging.user(request, "~FBLoading welcome")
return {
'user_profile' : hasattr(user, 'profile') and user.profile,
'login_form' : login_form,
'signup_form' : signup_form,
'statistics' : statistics,
'social_profile' : social_profile,
'post_request' : request.method == 'POST',
}, "reader/welcome.xhtml"
@never_cache
def login(request):
code = -1
message = ""
if request.method == "POST":
form = LoginForm(request.POST, prefix='login')
if form.is_valid():
login_user(request, form.get_user())
if request.POST.get('api'):
logging.user(form.get_user(), "~FG~BB~SKiPhone Login~FW")
code = 1
else:
logging.user(form.get_user(), "~FG~BBLogin~FW")
return HttpResponseRedirect(reverse('index'))
else:
message = form.errors.items()[0][1][0]
if request.POST.get('api'):
return HttpResponse(json.encode(dict(code=code, message=message)), content_type='application/json')
else:
return index(request)
@never_cache
def signup(request):
if request.method == "POST":
form = SignupForm(prefix='signup', data=request.POST)
if form.is_valid():
new_user = form.save()
login_user(request, new_user)
logging.user(new_user, "~FG~SB~BBNEW SIGNUP: ~FW%s" % new_user.email)
if not new_user.is_active:
url = "https://%s%s" % (Site.objects.get_current().domain,
reverse('stripe-form'))
return HttpResponseRedirect(url)
return index(request)
@never_cache
def logout(request):
logging.user(request, "~FG~BBLogout~FW")
logout_user(request)
if request.GET.get('api'):
return HttpResponse(json.encode(dict(code=1)), content_type='application/json')
else:
return HttpResponseRedirect(reverse('index'))
def autologin(request, username, secret):
next = request.GET.get('next', '')
if not username or not secret:
return HttpResponseForbidden()
profile = Profile.objects.filter(user__username=username, secret_token=secret)
if not profile:
return HttpResponseForbidden()
user = profile[0].user
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login_user(request, user)
logging.user(user, "~FG~BB~SKAuto-Login. Next stop: %s~FW" % (next if next else 'Homepage',))
if next and not next.startswith('/'):
next = '?next=' + next
return HttpResponseRedirect(reverse('index') + next)
elif next:
return HttpResponseRedirect(next)
else:
return HttpResponseRedirect(reverse('index'))
@ratelimit(minutes=1, requests=60)
@never_cache
@json.json_view
def load_feeds(request):
user = get_user(request)
feeds = {}
include_favicons = request.REQUEST.get('include_favicons', False)
flat = request.REQUEST.get('flat', False)
update_counts = request.REQUEST.get('update_counts', False)
version = int(request.REQUEST.get('v', 1))
if include_favicons == 'false': include_favicons = False
if update_counts == 'false': update_counts = False
if flat == 'false': flat = False
if flat: return load_feeds_flat(request)
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
data = dict(feeds=[], folders=[])
return data
except UserSubscriptionFolders.MultipleObjectsReturned:
UserSubscriptionFolders.objects.filter(user=user)[1:].delete()
folders = UserSubscriptionFolders.objects.get(user=user)
user_subs = UserSubscription.objects.select_related('feed').filter(user=user)
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
scheduled_feeds = []
for sub in user_subs:
pk = sub.feed_id
if update_counts and sub.needs_unread_recalc:
sub.calculate_feed_scores(silent=True)
feeds[pk] = sub.canonical(include_favicon=include_favicons)
if not sub.active: continue
if not sub.feed.active and not sub.feed.has_feed_exception:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.active_subscribers <= 0:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.next_scheduled_update < day_ago:
scheduled_feeds.append(sub.feed.pk)
if len(scheduled_feeds) > 0 and request.user.is_authenticated():
logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." %
len(scheduled_feeds))
ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))
starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=user.pk).count()
social_params = {
'user_id': user.pk,
'include_favicon': include_favicons,
'update_counts': update_counts,
}
social_feeds = MSocialSubscription.feeds(**social_params)
social_profile = MSocialProfile.profile(user.pk)
social_services = MSocialServices.profile(user.pk)
categories = None
if not user_subs:
categories = MCategory.serialize()
logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB feeds/socials%s" % (
len(feeds.keys()), len(social_feeds), '. ~FCUpdating counts.' if update_counts else ''))
data = {
'feeds': feeds.values() if version == 2 else feeds,
'social_feeds': social_feeds,
'social_profile': social_profile,
'social_services': social_services,
'user_profile': user.profile,
"is_staff": user.is_staff,
'user_id': user.pk,
'folders': json.decode(folders.folders),
'starred_count': starred_count,
'starred_counts': starred_counts,
'categories': categories
}
return data
@json.json_view
def load_feed_favicons(request):
user = get_user(request)
feed_ids = request.REQUEST.getlist('feed_ids')
if not feed_ids:
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
feed_ids = [sub['feed__pk'] for sub in user_subs.values('feed__pk')]
feed_icons = dict([(i.feed_id, i.data) for i in MFeedIcon.objects(feed_id__in=feed_ids)])
return feed_icons
def load_feeds_flat(request):
user = request.user
include_favicons = is_true(request.REQUEST.get('include_favicons', False))
update_counts = is_true(request.REQUEST.get('update_counts', True))
include_inactive = is_true(request.REQUEST.get('include_inactive', False))
feeds = {}
inactive_feeds = {}
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
scheduled_feeds = []
iphone_version = "2.1" # Preserved forever. Don't change.
latest_ios_build = "52"
latest_ios_version = "5.0.0b2"
if include_favicons == 'false': include_favicons = False
if update_counts == 'false': update_counts = False
if not user.is_authenticated():
return HttpResponseForbidden()
try:
folders = UserSubscriptionFolders.objects.get(user=user)
except UserSubscriptionFolders.DoesNotExist:
folders = []
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
if not user_subs and folders:
folders.auto_activate()
user_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=True)
if include_inactive:
inactive_subs = UserSubscription.objects.select_related('feed').filter(user=user, active=False)
for sub in user_subs:
if update_counts and sub.needs_unread_recalc:
sub.calculate_feed_scores(silent=True)
feeds[sub.feed_id] = sub.canonical(include_favicon=include_favicons)
if not sub.feed.active and not sub.feed.has_feed_exception:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.active_subscribers <= 0:
scheduled_feeds.append(sub.feed.pk)
elif sub.feed.next_scheduled_update < day_ago:
scheduled_feeds.append(sub.feed.pk)
if include_inactive:
for sub in inactive_subs:
inactive_feeds[sub.feed_id] = sub.canonical(include_favicon=include_favicons)
if len(scheduled_feeds) > 0 and request.user.is_authenticated():
logging.user(request, "~SN~FMTasking the scheduling immediate fetch of ~SB%s~SN feeds..." %
len(scheduled_feeds))
ScheduleImmediateFetches.apply_async(kwargs=dict(feed_ids=scheduled_feeds, user_id=user.pk))
flat_folders = []
flat_folders_with_inactive = []
if folders:
flat_folders = folders.flatten_folders(feeds=feeds)
flat_folders_with_inactive = folders.flatten_folders(feeds=feeds,
inactive_feeds=inactive_feeds)
social_params = {
'user_id': user.pk,
'include_favicon': include_favicons,
'update_counts': update_counts,
}
social_feeds = MSocialSubscription.feeds(**social_params)
social_profile = MSocialProfile.profile(user.pk)
social_services = MSocialServices.profile(user.pk)
starred_counts, starred_count = MStarredStoryCounts.user_counts(user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=user.pk).count()
categories = None
if not user_subs:
categories = MCategory.serialize()
logging.user(request, "~FB~SBLoading ~FY%s~FB/~FM%s~FB/~FR%s~FB feeds/socials/inactive ~FMflat~FB%s" % (
len(feeds.keys()), len(social_feeds), len(inactive_feeds), '. ~FCUpdating counts.' if update_counts else ''))
data = {
"flat_folders": flat_folders,
"flat_folders_with_inactive": flat_folders_with_inactive,
"feeds": feeds if not include_inactive else {"0": "Don't include `include_inactive=true` if you want active feeds."},
"inactive_feeds": inactive_feeds if include_inactive else {"0": "Include `include_inactive=true`"},
"social_feeds": social_feeds,
"social_profile": social_profile,
"social_services": social_services,
"user": user.username,
"user_id": user.pk,
"is_staff": user.is_staff,
"user_profile": user.profile,
"iphone_version": iphone_version,
"latest_ios_build": latest_ios_build,
"latest_ios_version": latest_ios_version,
"categories": categories,
'starred_count': starred_count,
'starred_counts': starred_counts,
'share_ext_token': user.profile.secret_token,
}
return data
@ratelimit(minutes=1, requests=10)
@never_cache
@json.json_view
def refresh_feeds(request):
start = datetime.datetime.now()
user = get_user(request)
feed_ids = request.REQUEST.getlist('feed_id')
check_fetch_status = request.REQUEST.get('check_fetch_status')
favicons_fetching = request.REQUEST.getlist('favicons_fetching')
social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id]
feed_ids = list(set(feed_ids) - set(social_feed_ids))
feeds = {}
if feed_ids or (not social_feed_ids and not feed_ids):
feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids,
check_fetch_status=check_fetch_status)
checkpoint1 = datetime.datetime.now()
social_feeds = {}
if social_feed_ids or (not social_feed_ids and not feed_ids):
social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids)
checkpoint2 = datetime.datetime.now()
favicons_fetching = [int(f) for f in favicons_fetching if f]
feed_icons = {}
if favicons_fetching:
feed_icons = dict([(i.feed_id, i) for i in MFeedIcon.objects(feed_id__in=favicons_fetching)])
for feed_id, feed in feeds.items():
if feed_id in favicons_fetching and feed_id in feed_icons:
feeds[feed_id]['favicon'] = feed_icons[feed_id].data
feeds[feed_id]['favicon_color'] = feed_icons[feed_id].color
feeds[feed_id]['favicon_fetching'] = feed.get('favicon_fetching')
user_subs = UserSubscription.objects.filter(user=user, active=True).only('feed')
sub_feed_ids = [s.feed_id for s in user_subs]
if favicons_fetching:
moved_feed_ids = [f for f in favicons_fetching if f not in sub_feed_ids]
for moved_feed_id in moved_feed_ids:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=moved_feed_id)
if duplicate_feeds and duplicate_feeds[0].feed.pk in feeds:
feeds[moved_feed_id] = feeds[duplicate_feeds[0].feed_id]
feeds[moved_feed_id]['dupe_feed_id'] = duplicate_feeds[0].feed_id
if check_fetch_status:
missing_feed_ids = list(set(feed_ids) - set(sub_feed_ids))
if missing_feed_ids:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id__in=missing_feed_ids)
for duplicate_feed in duplicate_feeds:
feeds[duplicate_feed.duplicate_feed_id] = {'id': duplicate_feed.feed_id}
interactions_count = MInteraction.user_unread_count(user.pk)
if True or settings.DEBUG or check_fetch_status:
end = datetime.datetime.now()
extra_fetch = ""
if check_fetch_status or favicons_fetching:
extra_fetch = "(%s/%s)" % (check_fetch_status, len(favicons_fetching))
logging.user(request, "~FBRefreshing %s+%s feeds %s (%.4s/%.4s/%.4s)" % (
len(feeds.keys()), len(social_feeds.keys()), extra_fetch,
(checkpoint1-start).total_seconds(),
(checkpoint2-start).total_seconds(),
(end-start).total_seconds(),
))
return {
'feeds': feeds,
'social_feeds': social_feeds,
'interactions_count': interactions_count,
}
@json.json_view
def interactions_count(request):
user = get_user(request)
interactions_count = MInteraction.user_unread_count(user.pk)
return {
'interactions_count': interactions_count,
}
@never_cache
@ajax_login_required
@json.json_view
def feed_unread_count(request):
user = request.user
feed_ids = request.REQUEST.getlist('feed_id')
force = request.REQUEST.get('force', False)
social_feed_ids = [feed_id for feed_id in feed_ids if 'social:' in feed_id]
feed_ids = list(set(feed_ids) - set(social_feed_ids))
feeds = {}
if feed_ids:
feeds = UserSubscription.feeds_with_updated_counts(user, feed_ids=feed_ids, force=force)
social_feeds = {}
if social_feed_ids:
social_feeds = MSocialSubscription.feeds_with_updated_counts(user, social_feed_ids=social_feed_ids)
if len(feed_ids) == 1:
if settings.DEBUG:
feed_title = Feed.get_by_id(feed_ids[0]).feed_title
else:
feed_title = feed_ids[0]
elif len(social_feed_ids) == 1:
feed_title = MSocialProfile.objects.get(user_id=social_feed_ids[0].replace('social:', '')).username
else:
feed_title = "%s feeds" % (len(feeds) + len(social_feeds))
logging.user(request, "~FBUpdating unread count on: %s" % feed_title)
return {'feeds': feeds, 'social_feeds': social_feeds}
def refresh_feed(request, feed_id):
user = get_user(request)
feed = get_object_or_404(Feed, pk=feed_id)
feed = feed.update(force=True, compute_scores=False)
usersub = UserSubscription.objects.get(user=user, feed=feed)
usersub.calculate_feed_scores(silent=False)
logging.user(request, "~FBRefreshing feed: %s" % feed)
return load_single_feed(request, feed_id)
@never_cache
@json.json_view
def load_single_feed(request, feed_id):
start = time.time()
user = get_user(request)
# offset = int(request.REQUEST.get('offset', 0))
# limit = int(request.REQUEST.get('limit', 6))
limit = 6
page = int(request.REQUEST.get('page', 1))
offset = limit * (page-1)
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'all')
query = request.REQUEST.get('query', '').strip()
include_story_content = is_true(request.REQUEST.get('include_story_content', True))
include_hidden = is_true(request.REQUEST.get('include_hidden', False))
message = None
user_search = None
dupe_feed_id = None
user_profiles = []
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
if not feed_id: raise Http404
feed_address = request.REQUEST.get('feed_address')
feed = Feed.get_by_id(feed_id, feed_address=feed_address)
if not feed:
raise Http404
try:
usersub = UserSubscription.objects.get(user=user, feed=feed)
except UserSubscription.DoesNotExist:
usersub = None
if feed.is_newsletter and not usersub:
# User must be subscribed to a newsletter in order to read it
raise Http404
if query:
if user.profile.is_premium:
user_search = MUserSearch.get_user(user.pk)
user_search.touch_search_date()
stories = feed.find_stories(query, order=order, offset=offset, limit=limit)
else:
stories = []
message = "You must be a premium subscriber to search."
elif read_filter == 'starred':
mstories = MStarredStory.objects(
user_id=user.pk,
story_feed_id=feed_id
).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit]
stories = Feed.format_stories(mstories)
elif usersub and (read_filter == 'unread' or order == 'oldest'):
stories = usersub.get_stories(order=order, read_filter=read_filter, offset=offset, limit=limit,
default_cutoff_date=user.profile.unread_cutoff)
else:
stories = feed.get_stories(offset, limit)
checkpoint1 = time.time()
try:
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
except redis.ConnectionError:
logging.user(request, "~BR~FK~SBRedis is unavailable for shared stories.")
checkpoint2 = time.time()
# Get intelligence classifier for user
if usersub and usersub.is_trained:
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk, feed_id=feed_id, social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk, feed_id=feed_id))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk, feed_id=feed_id))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk, feed_id=feed_id))
else:
classifier_feeds = []
classifier_authors = []
classifier_titles = []
classifier_tags = []
classifiers = get_classifiers_for_user(user, feed_id=feed_id,
classifier_feeds=classifier_feeds,
classifier_authors=classifier_authors,
classifier_titles=classifier_titles,
classifier_tags=classifier_tags)
checkpoint3 = time.time()
unread_story_hashes = []
if stories:
if (read_filter == 'all' or query) and usersub:
unread_story_hashes = UserSubscription.story_hashes(user.pk, read_filter='unread',
feed_ids=[usersub.feed_id],
usersubs=[usersub],
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
story_hashes = [story['story_hash'] for story in stories if story['story_hash']]
starred_stories = MStarredStory.objects(user_id=user.pk,
story_feed_id=feed.pk,
story_hash__in=story_hashes)\
.only('story_hash', 'starred_date', 'user_tags')
shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes)
shared_stories = []
if shared_story_hashes:
shared_stories = MSharedStory.objects(user_id=user.pk,
story_hash__in=shared_story_hashes)\
.only('story_hash', 'shared_date', 'comments')
starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date,
user_tags=story.user_tags))
for story in starred_stories])
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments))
for story in shared_stories])
checkpoint4 = time.time()
for story in stories:
if not include_story_content:
del story['story_content']
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
nowtz = localtime_for_timezone(now, user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
if usersub:
story['read_status'] = 1
if story['story_date'] < user.profile.unread_cutoff:
story['read_status'] = 1
elif (read_filter == 'all' or query) and usersub:
story['read_status'] = 1 if story['story_hash'] not in unread_story_hashes else 0
elif read_filter == 'unread' and usersub:
story['read_status'] = 0
if story['story_hash'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'],
user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['starred_timestamp'] = starred_date.strftime('%s')
story['user_tags'] = starred_stories[story['story_hash']]['user_tags']
if story['story_hash'] in shared_stories:
story['shared'] = True
shared_date = localtime_for_timezone(shared_stories[story['story_hash']]['shared_date'],
user.profile.timezone)
story['shared_date'] = format_story_link_date__long(shared_date, now)
story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
else:
story['read_status'] = 1
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, feed),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
story['score'] = UserSubscription.score_story(story['intelligence'])
# Intelligence
feed_tags = json.decode(feed.data.popular_tags) if feed.data.popular_tags else []
feed_authors = json.decode(feed.data.popular_authors) if feed.data.popular_authors else []
if usersub:
usersub.feed_opens += 1
usersub.needs_unread_recalc = True
usersub.save(update_fields=['feed_opens', 'needs_unread_recalc'])
diff1 = checkpoint1-start
diff2 = checkpoint2-start
diff3 = checkpoint3-start
diff4 = checkpoint4-start
timediff = time.time()-start
last_update = relative_timesince(feed.last_update)
time_breakdown = ""
if timediff > 1 or settings.DEBUG:
time_breakdown = "~SN~FR(~SB%.4s/%.4s/%.4s/%.4s~SN)" % (
diff1, diff2, diff3, diff4)
search_log = "~SN~FG(~SB%s~SN) " % query if query else ""
logging.user(request, "~FYLoading feed: ~SB%s%s (%s/%s) %s%s" % (
feed.feed_title[:22], ('~SN/p%s' % page) if page > 1 else '', order, read_filter, search_log, time_breakdown))
if not include_hidden:
hidden_stories_removed = 0
new_stories = []
for story in stories:
if story['score'] >= 0:
new_stories.append(story)
else:
hidden_stories_removed += 1
stories = new_stories
data = dict(stories=stories,
user_profiles=user_profiles,
feed_tags=feed_tags,
feed_authors=feed_authors,
classifiers=classifiers,
updated=last_update,
user_search=user_search,
feed_id=feed.pk,
elapsed_time=round(float(timediff), 2),
message=message)
if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed
if dupe_feed_id: data['dupe_feed_id'] = dupe_feed_id
if not usersub:
data.update(feed.canonical())
# if not usersub and feed.num_subscribers <= 1:
# data = dict(code=-1, message="You must be subscribed to this feed.")
# if page <= 3:
# import random
# time.sleep(random.randint(2, 4))
# if page == 2:
# assert False
return data
def load_feed_page(request, feed_id):
if not feed_id:
raise Http404
feed = Feed.get_by_id(feed_id)
if feed and feed.has_page and not feed.has_page_exception:
if settings.BACKED_BY_AWS.get('pages_on_node'):
url = "http://%s/original_page/%s" % (
settings.ORIGINAL_PAGE_SERVER,
feed.pk,
)
page_response = requests.get(url)
if page_response.status_code == 200:
response = HttpResponse(page_response.content, content_type="text/html; charset=utf-8")
response['Content-Encoding'] = 'gzip'
response['Last-Modified'] = page_response.headers.get('Last-modified')
response['Etag'] = page_response.headers.get('Etag')
response['Content-Length'] = str(len(page_response.content))
logging.user(request, "~FYLoading original page, proxied from node: ~SB%s bytes" %
(len(page_response.content)))
return response
if settings.BACKED_BY_AWS['pages_on_s3'] and feed.s3_page:
if settings.PROXY_S3_PAGES:
key = settings.S3_PAGES_BUCKET.get_key(feed.s3_pages_key)
if key:
compressed_data = key.get_contents_as_string()
response = HttpResponse(compressed_data, content_type="text/html; charset=utf-8")
response['Content-Encoding'] = 'gzip'
logging.user(request, "~FYLoading original page, proxied: ~SB%s bytes" %
(len(compressed_data)))
return response
else:
logging.user(request, "~FYLoading original page, non-proxied")
return HttpResponseRedirect('//%s/%s' % (settings.S3_PAGES_BUCKET_NAME,
feed.s3_pages_key))
data = MFeedPage.get_data(feed_id=feed_id)
if not data or not feed or not feed.has_page or feed.has_page_exception:
logging.user(request, "~FYLoading original page, ~FRmissing")
return render(request, 'static/404_original_page.xhtml', {},
content_type='text/html',
status=404)
logging.user(request, "~FYLoading original page, from the db")
return HttpResponse(data, content_type="text/html; charset=utf-8")
@json.json_view
def load_starred_stories(request):
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 10))
page = int(request.REQUEST.get('page', 0))
query = request.REQUEST.get('query', '').strip()
order = request.REQUEST.get('order', 'newest')
tag = request.REQUEST.get('tag')
story_hashes = request.REQUEST.getlist('h')[:100]
version = int(request.REQUEST.get('v', 1))
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
message = None
order_by = '-' if order == "newest" else ""
if page: offset = limit * (page - 1)
if query:
# results = SearchStarredStory.query(user.pk, query)
# story_ids = [result.db_id for result in results]
if user.profile.is_premium:
stories = MStarredStory.find_stories(query, user.pk, tag=tag, offset=offset, limit=limit,
order=order)
else:
stories = []
message = "You must be a premium subscriber to search."
elif tag:
if user.profile.is_premium:
mstories = MStarredStory.objects(
user_id=user.pk,
user_tags__contains=tag
).order_by('%sstarred_date' % order_by)[offset:offset+limit]
stories = Feed.format_stories(mstories)
else:
stories = []
message = "You must be a premium subscriber to read saved stories by tag."
elif story_hashes:
mstories = MStarredStory.objects(
user_id=user.pk,
story_hash__in=story_hashes
).order_by('%sstarred_date' % order_by)[offset:offset+limit]
stories = Feed.format_stories(mstories)
else:
mstories = MStarredStory.objects(
user_id=user.pk
).order_by('%sstarred_date' % order_by)[offset:offset+limit]
stories = Feed.format_stories(mstories)
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True)
story_hashes = [story['story_hash'] for story in stories]
story_feed_ids = list(set(s['story_feed_id'] for s in stories))
usersub_ids = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk')
usersub_ids = [us['feed__pk'] for us in usersub_ids]
unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids)))
unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids)
unsub_feeds = dict((feed.pk, feed.canonical(include_favicon=False)) for feed in unsub_feeds)
shared_story_hashes = MSharedStory.check_shared_story_hashes(user.pk, story_hashes)
shared_stories = []
if shared_story_hashes:
shared_stories = MSharedStory.objects(user_id=user.pk,
story_hash__in=shared_story_hashes)\
.only('story_hash', 'shared_date', 'comments')
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments))
for story in shared_stories])
nowtz = localtime_for_timezone(now, user.profile.timezone)
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
starred_date = localtime_for_timezone(story['starred_date'], user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, nowtz)
story['starred_timestamp'] = starred_date.strftime('%s')
story['read_status'] = 1
story['starred'] = True
story['intelligence'] = {
'feed': 1,
'author': 0,
'tags': 0,
'title': 0,
}
if story['story_hash'] in shared_stories:
story['shared'] = True
story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
search_log = "~SN~FG(~SB%s~SN)" % query if query else ""
logging.user(request, "~FCLoading starred stories: ~SB%s stories %s" % (len(stories), search_log))
return {
"stories": stories,
"user_profiles": user_profiles,
'feeds': unsub_feeds.values() if version == 2 else unsub_feeds,
"message": message,
}
@json.json_view
def starred_story_hashes(request):
user = get_user(request)
include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
mstories = MStarredStory.objects(
user_id=user.pk
).only('story_hash', 'starred_date').order_by('-starred_date')
if include_timestamps:
story_hashes = [(s.story_hash, s.starred_date.strftime("%s")) for s in mstories]
else:
story_hashes = [s.story_hash for s in mstories]
logging.user(request, "~FYLoading ~FCstarred story hashes~FY: %s story hashes" %
(len(story_hashes)))
return dict(starred_story_hashes=story_hashes)
def starred_stories_rss_feed(request, user_id, secret_token, tag_slug):
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
raise Http404
try:
tag_counts = MStarredStoryCounts.objects.get(user_id=user_id, slug=tag_slug)
except MStarredStoryCounts.MultipleObjectsReturned:
tag_counts = MStarredStoryCounts.objects(user_id=user_id, slug=tag_slug).first()
except MStarredStoryCounts.DoesNotExist:
raise Http404
data = {}
data['title'] = "Saved Stories - %s" % tag_counts.tag
data['link'] = "%s%s" % (
settings.PYTUNE_URL,
reverse('saved-stories-tag', kwargs=dict(tag_name=tag_slug)))
data['description'] = "Stories saved by %s on PyTune with the tag \"%s\"." % (user.username,
tag_counts.tag)
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'PyTune - %s' % settings.PYTUNE_URL
data['docs'] = None
data['author_name'] = user.username
data['feed_url'] = "%s%s" % (
settings.PYTUNE_URL,
reverse('starred-stories-rss-feed',
kwargs=dict(user_id=user_id, secret_token=secret_token, tag_slug=tag_slug)),
)
rss = feedgenerator.Atom1Feed(**data)
if not tag_counts.tag:
starred_stories = MStarredStory.objects(
user_id=user.pk
).order_by('-starred_date').limit(25)
else:
starred_stories = MStarredStory.objects(
user_id=user.pk,
user_tags__contains=tag_counts.tag
).order_by('-starred_date').limit(25)
for starred_story in starred_stories:
story_data = {
'title': starred_story.story_title,
'link': starred_story.story_permalink,
'description': (starred_story.story_content_z and
zlib.decompress(starred_story.story_content_z)),
'author_name': starred_story.story_author_name,
'categories': starred_story.story_tags,
'unique_id': starred_story.story_guid,
'pubdate': starred_story.starred_date,
}
rss.add_item(**story_data)
logging.user(request, "~FBGenerating ~SB%s~SN's saved story RSS feed (%s, %s stories): ~FM%s" % (
user.username,
tag_counts.tag,
tag_counts.count,
request.META.get('HTTP_USER_AGENT', "")[:24]
))
return HttpResponse(rss.writeString('utf-8'), content_type='application/rss+xml')
def folder_rss_feed(request, user_id, secret_token, unread_filter, folder_slug):
domain = Site.objects.get_current().domain
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
raise Http404
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=user)
feed_ids, folder_title = user_sub_folders.feed_ids_under_folder_slug(folder_slug)
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids)
if feed_ids and user.profile.is_premium:
params = {
"user_id": user.pk,
"feed_ids": feed_ids,
"offset": 0,
"limit": 20,
"order": 'newest',
"read_filter": 'all',
"cache_prefix": "RSS:"
}
story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(**params)
else:
story_hashes = []
unread_feed_story_hashes = []
mstories = MStory.objects(story_hash__in=story_hashes).order_by('-story_date')
stories = Feed.format_stories(mstories)
filtered_stories = []
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
trained_feed_ids = [sub.feed_id for sub in usersubs if sub.is_trained]
found_trained_feed_ids = list(set(trained_feed_ids) & set(found_feed_ids))
if found_trained_feed_ids:
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids,
social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
else:
classifier_feeds = []
classifier_authors = []
classifier_titles = []
classifier_tags = []
classifiers = sort_classifiers_by_feed(user=user, feed_ids=found_feed_ids,
classifier_feeds=classifier_feeds,
classifier_authors=classifier_authors,
classifier_titles=classifier_titles,
classifier_tags=classifier_tags)
for story in stories:
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, story['story_feed_id']),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
story['score'] = UserSubscription.score_story(story['intelligence'])
if unread_filter == 'focus' and story['score'] >= 1:
filtered_stories.append(story)
elif unread_filter == 'unread' and story['score'] >= 0:
filtered_stories.append(story)
stories = filtered_stories
data = {}
data['title'] = "%s from %s (%s sites)" % (folder_title, user.username, len(feed_ids))
data['link'] = "https://%s%s" % (
domain,
reverse('folder', kwargs=dict(folder_name=folder_title)))
data['description'] = "Unread stories in %s on PyTune. From %s's account and contains %s sites." % (
folder_title,
user.username,
len(feed_ids))
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'PyTune - %s' % settings.PYTUNE_URL
data['docs'] = None
data['author_name'] = user.username
data['feed_url'] = "https://%s%s" % (
domain,
reverse('folder-rss-feed',
kwargs=dict(user_id=user_id, secret_token=secret_token, unread_filter=unread_filter, folder_slug=folder_slug)),
)
rss = feedgenerator.Atom1Feed(**data)
for story in stories:
feed = Feed.get_by_id(story['story_feed_id'])
story_content = """<img src="//%s/rss_feeds/icon/%s"> %s <br><br> %s""" % (
Site.objects.get_current().domain,
story['story_feed_id'],
feed.feed_title if feed else "",
smart_unicode(story['story_content'])
)
story_data = {
'title': story['story_title'],
'link': story['story_permalink'],
'description': story_content,
'categories': story['story_tags'],
'unique_id': 'https://%s/site/%s/%s/' % (domain, story['story_feed_id'], story['guid_hash']),
'pubdate': localtime_for_timezone(story['story_date'], user.profile.timezone),
}
if story['story_authors']:
story_data['author_name'] = story['story_authors']
rss.add_item(**story_data)
if not user.profile.is_premium:
story_data = {
'title': "You must have a premium account on PyTune to have RSS feeds for folders.",
'link': "https://%s" % domain,
'description': "You must have a premium account on PyTune to have RSS feeds for folders.",
'unique_id': "https://%s/premium_only" % domain,
'pubdate': localtime_for_timezone(datetime.datetime.now(), user.profile.timezone),
}
rss.add_item(**story_data)
logging.user(request, "~FBGenerating ~SB%s~SN's folder RSS feed (%s, %s stories): ~FM%s" % (
user.username,
folder_title,
len(stories),
request.META.get('HTTP_USER_AGENT', "")[:24]
))
return HttpResponse(rss.writeString('utf-8'), content_type='application/rss+xml')
@json.json_view
def load_read_stories(request):
user = get_user(request)
offset = int(request.REQUEST.get('offset', 0))
limit = int(request.REQUEST.get('limit', 10))
page = int(request.REQUEST.get('page', 0))
order = request.REQUEST.get('order', 'newest')
query = request.REQUEST.get('query', '').strip()
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
message = None
if page: offset = limit * (page - 1)
if query:
stories = []
message = "Not implemented yet."
# if user.profile.is_premium:
# stories = MStarredStory.find_stories(query, user.pk, offset=offset, limit=limit)
# else:
# stories = []
# message = "You must be a premium subscriber to search."
else:
story_hashes = RUserStory.get_read_stories(user.pk, offset=offset, limit=limit, order=order)
mstories = MStory.objects(story_hash__in=story_hashes)
stories = Feed.format_stories(mstories)
stories = sorted(stories, key=lambda story: story_hashes.index(story['story_hash']),
reverse=bool(order=="oldest"))
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk, check_all=True)
story_hashes = [story['story_hash'] for story in stories]
story_feed_ids = list(set(s['story_feed_id'] for s in stories))
usersub_ids = UserSubscription.objects.filter(user__pk=user.pk, feed__pk__in=story_feed_ids).values('feed__pk')
usersub_ids = [us['feed__pk'] for us in usersub_ids]
unsub_feed_ids = list(set(story_feed_ids).difference(set(usersub_ids)))
unsub_feeds = Feed.objects.filter(pk__in=unsub_feed_ids)
unsub_feeds = [feed.canonical(include_favicon=False) for feed in unsub_feeds]
shared_stories = MSharedStory.objects(user_id=user.pk,
story_hash__in=story_hashes)\
.only('story_hash', 'shared_date', 'comments')
shared_stories = dict([(story.story_hash, dict(shared_date=story.shared_date,
comments=story.comments))
for story in shared_stories])
starred_stories = MStarredStory.objects(user_id=user.pk,
story_hash__in=story_hashes)\
.only('story_hash', 'starred_date')
starred_stories = dict([(story.story_hash, story.starred_date)
for story in starred_stories])
nowtz = localtime_for_timezone(now, user.profile.timezone)
for story in stories:
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
story['read_status'] = 1
story['intelligence'] = {
'feed': 1,
'author': 0,
'tags': 0,
'title': 0,
}
if story['story_hash'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['story_hash']],
user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['starred_timestamp'] = starred_date.strftime('%s')
if story['story_hash'] in shared_stories:
story['shared'] = True
story['shared_comments'] = strip_tags(shared_stories[story['story_hash']]['comments'])
search_log = "~SN~FG(~SB%s~SN)" % query if query else ""
logging.user(request, "~FCLoading read stories: ~SB%s stories %s" % (len(stories), search_log))
return {
"stories": stories,
"user_profiles": user_profiles,
"feeds": unsub_feeds,
"message": message,
}
@json.json_view
def load_river_stories__redis(request):
limit = 12
start = time.time()
user = get_user(request)
message = None
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feeds') if feed_id]
if not feed_ids:
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('f') if feed_id]
story_hashes = request.REQUEST.getlist('h')[:100]
original_feed_ids = list(feed_ids)
page = int(request.REQUEST.get('page', 1))
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'unread')
query = request.REQUEST.get('query', '').strip()
include_hidden = is_true(request.REQUEST.get('include_hidden', False))
now = localtime_for_timezone(datetime.datetime.now(), user.profile.timezone)
usersubs = []
code = 1
user_search = None
offset = (page-1) * limit
limit = page * limit
story_date_order = "%sstory_date" % ('' if order == 'oldest' else '-')
if story_hashes:
unread_feed_story_hashes = None
read_filter = 'unread'
mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
stories = Feed.format_stories(mstories)
elif query:
if user.profile.is_premium:
user_search = MUserSearch.get_user(user.pk)
user_search.touch_search_date()
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids,
read_filter='all')
feed_ids = [sub.feed_id for sub in usersubs]
stories = Feed.find_feed_stories(feed_ids, query, order=order, offset=offset, limit=limit)
mstories = stories
unread_feed_story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
read_filter="unread", order=order,
group_by_feed=False,
cutoff_date=user.profile.unread_cutoff)
else:
stories = []
mstories = []
message = "You must be a premium subscriber to search."
elif read_filter == 'starred':
mstories = MStarredStory.objects(
user_id=user.pk,
story_feed_id__in=feed_ids
).order_by('%sstarred_date' % ('-' if order == 'newest' else ''))[offset:offset+limit]
stories = Feed.format_stories(mstories)
else:
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=feed_ids,
read_filter=read_filter)
all_feed_ids = [f for f in feed_ids]
feed_ids = [sub.feed_id for sub in usersubs]
if feed_ids:
params = {
"user_id": user.pk,
"feed_ids": feed_ids,
"all_feed_ids": all_feed_ids,
"offset": offset,
"limit": limit,
"order": order,
"read_filter": read_filter,
"usersubs": usersubs,
"cutoff_date": user.profile.unread_cutoff,
}
story_hashes, unread_feed_story_hashes = UserSubscription.feed_stories(**params)
else:
story_hashes = []
unread_feed_story_hashes = []
mstories = MStory.objects(story_hash__in=story_hashes).order_by(story_date_order)
stories = Feed.format_stories(mstories)
found_feed_ids = list(set([story['story_feed_id'] for story in stories]))
stories, user_profiles = MSharedStory.stories_with_comments_and_profiles(stories, user.pk)
if not usersubs:
usersubs = UserSubscription.subs_for_feeds(user.pk, feed_ids=found_feed_ids,
read_filter=read_filter)
trained_feed_ids = [sub.feed_id for sub in usersubs if sub.is_trained]
found_trained_feed_ids = list(set(trained_feed_ids) & set(found_feed_ids))
# Find starred stories
if found_feed_ids:
if read_filter == 'starred':
starred_stories = mstories
else:
starred_stories = MStarredStory.objects(
user_id=user.pk,
story_feed_id__in=found_feed_ids
).only('story_hash', 'starred_date')
starred_stories = dict([(story.story_hash, dict(starred_date=story.starred_date,
user_tags=story.user_tags))
for story in starred_stories])
else:
starred_stories = {}
# Intelligence classifiers for all feeds involved
if found_trained_feed_ids:
classifier_feeds = list(MClassifierFeed.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids,
social_user_id=0))
classifier_authors = list(MClassifierAuthor.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_titles = list(MClassifierTitle.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
classifier_tags = list(MClassifierTag.objects(user_id=user.pk,
feed_id__in=found_trained_feed_ids))
else:
classifier_feeds = []
classifier_authors = []
classifier_titles = []
classifier_tags = []
classifiers = sort_classifiers_by_feed(user=user, feed_ids=found_feed_ids,
classifier_feeds=classifier_feeds,
classifier_authors=classifier_authors,
classifier_titles=classifier_titles,
classifier_tags=classifier_tags)
# Just need to format stories
nowtz = localtime_for_timezone(now, user.profile.timezone)
for story in stories:
if read_filter == 'starred':
story['read_status'] = 1
else:
story['read_status'] = 0
if read_filter == 'all' or query:
if (unread_feed_story_hashes is not None and
story['story_hash'] not in unread_feed_story_hashes):
story['read_status'] = 1
story_date = localtime_for_timezone(story['story_date'], user.profile.timezone)
story['short_parsed_date'] = format_story_link_date__short(story_date, nowtz)
story['long_parsed_date'] = format_story_link_date__long(story_date, nowtz)
if story['story_hash'] in starred_stories:
story['starred'] = True
starred_date = localtime_for_timezone(starred_stories[story['story_hash']]['starred_date'],
user.profile.timezone)
story['starred_date'] = format_story_link_date__long(starred_date, now)
story['starred_timestamp'] = starred_date.strftime('%s')
story['user_tags'] = starred_stories[story['story_hash']]['user_tags']
story['intelligence'] = {
'feed': apply_classifier_feeds(classifier_feeds, story['story_feed_id']),
'author': apply_classifier_authors(classifier_authors, story),
'tags': apply_classifier_tags(classifier_tags, story),
'title': apply_classifier_titles(classifier_titles, story),
}
story['score'] = UserSubscription.score_story(story['intelligence'])
if not user.profile.is_premium:
message = "The full River of News is a premium feature."
code = 0
# if page > 1:
# stories = []
# else:
# stories = stories[:5]
diff = time.time() - start
timediff = round(float(diff), 2)
logging.user(request, "~FYLoading ~FCriver stories~FY: ~SBp%s~SN (%s/%s "
"stories, ~SN%s/%s/%s feeds, %s/%s)" %
(page, len(stories), len(mstories), len(found_feed_ids),
len(feed_ids), len(original_feed_ids), order, read_filter))
if not include_hidden:
hidden_stories_removed = 0
new_stories = []
for story in stories:
if story['score'] >= 0:
new_stories.append(story)
else:
hidden_stories_removed += 1
stories = new_stories
# if page <= 1:
# import random
# time.sleep(random.randint(0, 6))
data = dict(code=code,
message=message,
stories=stories,
classifiers=classifiers,
elapsed_time=timediff,
user_search=user_search,
user_profiles=user_profiles)
if not include_hidden: data['hidden_stories_removed'] = hidden_stories_removed
return data
@json.json_view
def unread_story_hashes__old(request):
user = get_user(request)
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feed_id') if feed_id]
include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
usersubs = {}
if not feed_ids:
usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0),
user=user, active=True)
feed_ids = [sub.feed_id for sub in usersubs]
else:
usersubs = UserSubscription.objects.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0),
user=user, active=True, feed__in=feed_ids)
unread_feed_story_hashes = {}
story_hash_count = 0
usersubs = dict((sub.feed_id, sub) for sub in usersubs)
for feed_id in feed_ids:
if feed_id in usersubs:
us = usersubs[feed_id]
else:
continue
if not us.unread_count_neutral and not us.unread_count_positive:
continue
unread_feed_story_hashes[feed_id] = us.get_stories(read_filter='unread', limit=500,
withscores=include_timestamps,
hashes_only=True,
default_cutoff_date=user.profile.unread_cutoff)
story_hash_count += len(unread_feed_story_hashes[feed_id])
logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" %
(len(feed_ids), len(story_hash_count)))
return dict(unread_feed_story_hashes=unread_feed_story_hashes)
@json.json_view
def unread_story_hashes(request):
user = get_user(request)
feed_ids = [int(feed_id) for feed_id in request.REQUEST.getlist('feed_id') if feed_id]
include_timestamps = is_true(request.REQUEST.get('include_timestamps', False))
order = request.REQUEST.get('order', 'newest')
read_filter = request.REQUEST.get('read_filter', 'unread')
story_hashes = UserSubscription.story_hashes(user.pk, feed_ids=feed_ids,
order=order, read_filter=read_filter,
include_timestamps=include_timestamps,
cutoff_date=user.profile.unread_cutoff)
logging.user(request, "~FYLoading ~FCunread story hashes~FY: ~SB%s feeds~SN (%s story hashes)" %
(len(feed_ids), len(story_hashes)))
return dict(unread_feed_story_hashes=story_hashes)
@ajax_login_required
@json.json_view
def mark_all_as_read(request):
code = 1
try:
days = int(request.REQUEST.get('days', 0))
except ValueError:
return dict(code=-1, message="Days parameter must be an integer, not: %s" %
request.REQUEST.get('days'))
read_date = datetime.datetime.utcnow() - datetime.timedelta(days=days)
feeds = UserSubscription.objects.filter(user=request.user)
socialsubs = MSocialSubscription.objects.filter(user_id=request.user.pk)
for subtype in [feeds, socialsubs]:
for sub in subtype:
if days == 0:
sub.mark_feed_read()
else:
if sub.mark_read_date < read_date:
sub.needs_unread_recalc = True
sub.mark_read_date = read_date
sub.save()
logging.user(request, "~FMMarking all as read: ~SB%s days" % (days,))
return dict(code=code)
@ajax_login_required
@json.json_view
def mark_story_as_read(request):
story_ids = request.REQUEST.getlist('story_id')
try:
feed_id = int(get_argument_or_404(request, 'feed_id'))
except ValueError:
return dict(code=-1, errors=["You must pass a valid feed_id: %s" %
request.REQUEST.get('feed_id')])
try:
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feed:
feed_id = duplicate_feed[0].feed_id
try:
usersub = UserSubscription.objects.get(user=request.user,
feed=duplicate_feed[0].feed)
except (Feed.DoesNotExist):
return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id])
else:
return dict(code=-1, errors=["No feed exists for feed_id %d." % feed_id])
except UserSubscription.DoesNotExist:
usersub = None
if usersub:
data = usersub.mark_story_ids_as_read(story_ids, request=request)
else:
data = dict(code=-1, errors=["User is not subscribed to this feed."])
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'feed:%s' % feed_id)
return data
@ajax_login_required
@json.json_view
def mark_story_hashes_as_read(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
try:
story_hashes = request.REQUEST.getlist('story_hash')
except UnreadablePostError:
return dict(code=-1, message="Missing `story_hash` list parameter.")
feed_ids, friend_ids = RUserStory.mark_story_hashes_read(request.user.pk, story_hashes)
if friend_ids:
socialsubs = MSocialSubscription.objects.filter(
user_id=request.user.pk,
subscription_user_id__in=friend_ids)
for socialsub in socialsubs:
if not socialsub.needs_unread_recalc:
socialsub.needs_unread_recalc = True
socialsub.save()
r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id)
# Also count on original subscription
for feed_id in feed_ids:
usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save(update_fields=['needs_unread_recalc'])
r.publish(request.user.username, 'feed:%s' % feed_id)
hash_count = len(story_hashes)
logging.user(request, "~FYRead %s %s in feed/socialsubs: %s/%s" % (
hash_count, 'story' if hash_count == 1 else 'stories', feed_ids, friend_ids))
return dict(code=1, story_hashes=story_hashes,
feed_ids=feed_ids, friend_user_ids=friend_ids)
@ajax_login_required
@json.json_view
def mark_feed_stories_as_read(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
feeds_stories = request.REQUEST.get('feeds_stories', "{}")
feeds_stories = json.decode(feeds_stories)
data = {
'code': -1,
'message': 'Nothing was marked as read'
}
for feed_id, story_ids in feeds_stories.items():
try:
feed_id = int(feed_id)
except ValueError:
continue
try:
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
data = usersub.mark_story_ids_as_read(story_ids, request=request)
except UserSubscription.DoesNotExist:
return dict(code=-1, error="You are not subscribed to this feed_id: %d" % feed_id)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
try:
if not duplicate_feed: raise Feed.DoesNotExist
usersub = UserSubscription.objects.get(user=request.user,
feed=duplicate_feed[0].feed)
data = usersub.mark_story_ids_as_read(story_ids, request=request)
except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
return dict(code=-1, error="No feed exists for feed_id: %d" % feed_id)
r.publish(request.user.username, 'feed:%s' % feed_id)
return data
@ajax_login_required
@json.json_view
def mark_social_stories_as_read(request):
code = 1
errors = []
data = {}
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
users_feeds_stories = request.REQUEST.get('users_feeds_stories', "{}")
users_feeds_stories = json.decode(users_feeds_stories)
for social_user_id, feeds in users_feeds_stories.items():
for feed_id, story_ids in feeds.items():
feed_id = int(feed_id)
try:
socialsub = MSocialSubscription.objects.get(user_id=request.user.pk,
subscription_user_id=social_user_id)
data = socialsub.mark_story_ids_as_read(story_ids, feed_id, request=request)
except OperationError, e:
code = -1
errors.append("Already read story: %s" % e)
except MSocialSubscription.DoesNotExist:
MSocialSubscription.mark_unsub_story_ids_as_read(request.user.pk, social_user_id,
story_ids, feed_id,
request=request)
except Feed.DoesNotExist:
duplicate_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feed:
try:
socialsub = MSocialSubscription.objects.get(user_id=request.user.pk,
subscription_user_id=social_user_id)
data = socialsub.mark_story_ids_as_read(story_ids, duplicate_feed[0].feed.pk, request=request)
except (UserSubscription.DoesNotExist, Feed.DoesNotExist):
code = -1
errors.append("No feed exists for feed_id %d." % feed_id)
else:
continue
r.publish(request.user.username, 'feed:%s' % feed_id)
r.publish(request.user.username, 'social:%s' % social_user_id)
data.update(code=code, errors=errors)
return data
@required_params('story_id', feed_id=int)
@ajax_login_required
@json.json_view
def mark_story_as_unread(request):
story_id = request.REQUEST.get('story_id', None)
feed_id = int(request.REQUEST.get('feed_id', 0))
try:
usersub = UserSubscription.objects.select_related('feed').get(user=request.user, feed=feed_id)
feed = usersub.feed
except UserSubscription.DoesNotExist:
usersub = None
feed = Feed.get_by_id(feed_id)
if usersub and not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save(update_fields=['needs_unread_recalc'])
data = dict(code=0, payload=dict(story_id=story_id))
story, found_original = MStory.find_story(feed_id, story_id)
if not story:
logging.user(request, "~FY~SBUnread~SN story in feed: %s (NOT FOUND)" % (feed))
return dict(code=-1, message="Story not found.")
if usersub:
data = usersub.invert_read_stories_after_unread_story(story, request)
message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
if message:
data['code'] = -1
data['message'] = message
return data
social_subs = MSocialSubscription.mark_dirty_sharing_story(user_id=request.user.pk,
story_feed_id=feed_id,
story_guid_hash=story.guid_hash)
dirty_count = social_subs and social_subs.count()
dirty_count = ("(%s social_subs)" % dirty_count) if dirty_count else ""
RUserStory.mark_story_hash_unread(user_id=request.user.pk, story_hash=story.story_hash)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'feed:%s' % feed_id)
logging.user(request, "~FY~SBUnread~SN story in feed: %s %s" % (feed, dirty_count))
return data
@ajax_login_required
@json.json_view
@required_params('story_hash')
def mark_story_hash_as_unread(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
story_hash = request.REQUEST.get('story_hash')
feed_id, _ = MStory.split_story_hash(story_hash)
story, _ = MStory.find_story(feed_id, story_hash)
if not story:
data = dict(code=-1, message="That story has been removed from the feed, no need to mark it unread.")
return data
message = RUserStory.story_can_be_marked_read_by_user(story, request.user)
if message:
data = dict(code=-1, message=message)
return data
# Also count on original subscription
usersubs = UserSubscription.objects.filter(user=request.user.pk, feed=feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save(update_fields=['needs_unread_recalc'])
data = usersub.invert_read_stories_after_unread_story(story, request)
r.publish(request.user.username, 'feed:%s' % feed_id)
feed_id, friend_ids = RUserStory.mark_story_hash_unread(request.user.pk, story_hash)
if friend_ids:
socialsubs = MSocialSubscription.objects.filter(
user_id=request.user.pk,
subscription_user_id__in=friend_ids)
for socialsub in socialsubs:
if not socialsub.needs_unread_recalc:
socialsub.needs_unread_recalc = True
socialsub.save()
r.publish(request.user.username, 'social:%s' % socialsub.subscription_user_id)
logging.user(request, "~FYUnread story in feed/socialsubs: %s/%s" % (feed_id, friend_ids))
return dict(code=1, story_hash=story_hash, feed_id=feed_id, friend_user_ids=friend_ids)
@ajax_login_required
@json.json_view
def mark_feed_as_read(request):
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
feed_ids = request.REQUEST.getlist('feed_id')
cutoff_timestamp = int(request.REQUEST.get('cutoff_timestamp', 0))
direction = request.REQUEST.get('direction', 'older')
multiple = len(feed_ids) > 1
code = 1
errors = []
cutoff_date = datetime.datetime.fromtimestamp(cutoff_timestamp) if cutoff_timestamp else None
if cutoff_date:
logging.user(request, "~FMMark %s feeds read, %s - cutoff: %s/%s" %
(len(feed_ids), direction, cutoff_timestamp, cutoff_date))
for feed_id in feed_ids:
if 'social:' in feed_id:
user_id = int(feed_id.replace('social:', ''))
try:
sub = MSocialSubscription.objects.get(user_id=request.user.pk,
subscription_user_id=user_id)
except MSocialSubscription.DoesNotExist:
logging.user(request, "~FRCouldn't find socialsub: %s" % user_id)
continue
if not multiple:
sub_user = User.objects.get(pk=sub.subscription_user_id)
logging.user(request, "~FMMarking social feed as read: ~SB%s" % (sub_user.username,))
else:
try:
feed = Feed.objects.get(id=feed_id)
sub = UserSubscription.objects.get(feed=feed, user=request.user)
if not multiple:
logging.user(request, "~FMMarking feed as read: ~SB%s" % (feed,))
except (Feed.DoesNotExist, UserSubscription.DoesNotExist), e:
errors.append("User not subscribed: %s" % e)
continue
except (ValueError), e:
errors.append("Invalid feed_id: %s" % e)
continue
if not sub:
errors.append("User not subscribed: %s" % feed_id)
continue
try:
if direction == "older":
marked_read = sub.mark_feed_read(cutoff_date=cutoff_date)
else:
marked_read = sub.mark_newer_stories_read(cutoff_date=cutoff_date)
if marked_read and not multiple:
r.publish(request.user.username, 'feed:%s' % feed_id)
except IntegrityError, e:
errors.append("Could not mark feed as read: %s" % e)
code = -1
if multiple:
logging.user(request, "~FMMarking ~SB%s~SN feeds as read" % len(feed_ids))
r.publish(request.user.username, 'refresh:%s' % ','.join(feed_ids))
if errors:
logging.user(request, "~FMMarking read had errors: ~FR%s" % errors)
return dict(code=code, errors=errors, cutoff_date=cutoff_date, direction=direction)
def _parse_user_info(user):
return {
'user_info': {
'is_anonymous': json.encode(user.is_anonymous()),
'is_authenticated': json.encode(user.is_authenticated()),
'username': json.encode(user.username if user.is_authenticated() else 'Anonymous')
}
}
@ajax_login_required
@json.json_view
def add_url(request):
code = 0
url = request.POST['url']
folder = request.POST.get('folder', '')
new_folder = request.POST.get('new_folder')
auto_active = is_true(request.POST.get('auto_active', 1))
skip_fetch = is_true(request.POST.get('skip_fetch', False))
feed = None
if not url:
code = -1
message = 'Enter in the website address or the feed URL.'
elif any([(banned_url in url) for banned_url in BANNED_URLS]):
code = -1
message = "The publisher of this website has banned PyTune."
else:
if new_folder:
usf, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user)
usf.add_folder(folder, new_folder)
folder = new_folder
code, message, us = UserSubscription.add_subscription(user=request.user, feed_address=url,
folder=folder, auto_active=auto_active,
skip_fetch=skip_fetch)
feed = us and us.feed
if feed:
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:%s' % feed.pk)
MUserSearch.schedule_index_feeds_for_search(feed.pk, request.user.pk)
return dict(code=code, message=message, feed=feed)
@ajax_login_required
@json.json_view
def add_folder(request):
folder = request.POST['folder']
parent_folder = request.POST.get('parent_folder', '')
folders = None
logging.user(request, "~FRAdding Folder: ~SB%s (in %s)" % (folder, parent_folder))
if folder:
code = 1
message = ""
user_sub_folders_object, _ = UserSubscriptionFolders.objects.get_or_create(user=request.user)
user_sub_folders_object.add_folder(parent_folder, folder)
folders = json.decode(user_sub_folders_object.folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
else:
code = -1
message = "Gotta write in a folder name."
return dict(code=code, message=message, folders=folders)
@ajax_login_required
@json.json_view
def delete_feed(request):
feed_id = int(request.POST['feed_id'])
in_folder = request.POST.get('in_folder', None)
if not in_folder or in_folder == ' ':
in_folder = ""
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_feed(feed_id, in_folder)
feed = Feed.objects.filter(pk=feed_id)
if feed:
feed[0].count_subscribers()
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, message="Removed %s from '%s'." % (feed, in_folder))
@ajax_login_required
@json.json_view
def delete_feed_by_url(request):
message = ""
code = 0
url = request.POST['url']
in_folder = request.POST.get('in_folder', '')
if in_folder == ' ':
in_folder = ""
logging.user(request.user, "~FBFinding feed (delete_feed_by_url): %s" % url)
feed = Feed.get_feed_from_url(url, create=False)
if feed:
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_feed(feed.pk, in_folder)
code = 1
feed = Feed.objects.filter(pk=feed.pk)
if feed:
feed[0].count_subscribers()
else:
code = -1
message = "URL not found."
return dict(code=code, message=message)
@ajax_login_required
@json.json_view
def delete_folder(request):
folder_to_delete = request.POST.get('folder_name') or request.POST.get('folder_to_delete')
in_folder = request.POST.get('in_folder', None)
feed_ids_in_folder = [int(f) for f in request.REQUEST.getlist('feed_id') if f]
request.user.profile.send_opml_export_email(reason="You have deleted an entire folder of feeds, so here's a backup just in case.")
# Works piss poor with duplicate folder titles, if they are both in the same folder.
# Deletes all, but only in the same folder parent. But nobody should be doing that, right?
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_folder(folder_to_delete, in_folder, feed_ids_in_folder)
folders = json.decode(user_sub_folders.folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=folders)
@required_params('feeds_by_folder')
@ajax_login_required
@json.json_view
def delete_feeds_by_folder(request):
feeds_by_folder = json.decode(request.POST['feeds_by_folder'])
request.user.profile.send_opml_export_email(reason="You have deleted a number of feeds at once, so here's a backup just in case.")
# Works piss poor with duplicate folder titles, if they are both in the same folder.
# Deletes all, but only in the same folder parent. But nobody should be doing that, right?
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.delete_feeds_by_folder(feeds_by_folder)
folders = json.decode(user_sub_folders.folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=folders)
@ajax_login_required
@json.json_view
def rename_feed(request):
feed = get_object_or_404(Feed, pk=int(request.POST['feed_id']))
user_sub = UserSubscription.objects.get(user=request.user, feed=feed)
feed_title = request.POST['feed_title']
logging.user(request, "~FRRenaming feed '~SB%s~SN' to: ~SB%s" % (
feed.feed_title, feed_title))
user_sub.user_title = feed_title
user_sub.save()
return dict(code=1)
@ajax_login_required
@json.json_view
def rename_folder(request):
folder_to_rename = request.POST.get('folder_name') or request.POST.get('folder_to_rename')
new_folder_name = request.POST['new_folder_name']
in_folder = request.POST.get('in_folder', '')
if 'Top Level' in in_folder: in_folder = ''
code = 0
# Works piss poor with duplicate folder titles, if they are both in the same folder.
# renames all, but only in the same folder parent. But nobody should be doing that, right?
if folder_to_rename and new_folder_name:
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders.rename_folder(folder_to_rename, new_folder_name, in_folder)
code = 1
else:
code = -1
return dict(code=code)
@ajax_login_required
@json.json_view
def move_feed_to_folders(request):
feed_id = int(request.POST['feed_id'])
in_folders = request.POST.getlist('in_folders', '')
to_folders = request.POST.getlist('to_folders', '')
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders = user_sub_folders.move_feed_to_folders(feed_id, in_folders=in_folders,
to_folders=to_folders)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@ajax_login_required
@json.json_view
def move_feed_to_folder(request):
feed_id = int(request.POST['feed_id'])
in_folder = request.POST.get('in_folder', '')
to_folder = request.POST.get('to_folder', '')
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders = user_sub_folders.move_feed_to_folder(feed_id, in_folder=in_folder,
to_folder=to_folder)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@ajax_login_required
@json.json_view
def move_folder_to_folder(request):
folder_name = request.POST['folder_name']
in_folder = request.POST.get('in_folder', '')
to_folder = request.POST.get('to_folder', '')
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
user_sub_folders = user_sub_folders.move_folder_to_folder(folder_name, in_folder=in_folder, to_folder=to_folder)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@required_params('feeds_by_folder', 'to_folder')
@ajax_login_required
@json.json_view
def move_feeds_by_folder_to_folder(request):
feeds_by_folder = json.decode(request.POST['feeds_by_folder'])
to_folder = request.POST['to_folder']
new_folder = request.POST.get('new_folder', None)
request.user.profile.send_opml_export_email(reason="You have moved a number of feeds at once, so here's a backup just in case.")
user_sub_folders = get_object_or_404(UserSubscriptionFolders, user=request.user)
if new_folder:
user_sub_folders.add_folder(to_folder, new_folder)
to_folder = new_folder
user_sub_folders = user_sub_folders.move_feeds_by_folder_to_folder(feeds_by_folder, to_folder)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
return dict(code=1, folders=json.decode(user_sub_folders.folders))
@login_required
def add_feature(request):
if not request.user.is_staff:
return HttpResponseForbidden()
code = -1
form = FeatureForm(request.POST)
if form.is_valid():
form.save()
code = 1
return HttpResponseRedirect(reverse('index'))
return dict(code=code)
@json.json_view
def load_features(request):
user = get_user(request)
page = max(int(request.REQUEST.get('page', 0)), 0)
if page > 1:
logging.user(request, "~FBBrowse features: ~SBPage #%s" % (page+1))
features = Feature.objects.all()[page*3:(page+1)*3+1].values()
features = [{
'description': f['description'],
'date': localtime_for_timezone(f['date'], user.profile.timezone).strftime("%b %d, %Y")
} for f in features]
return features
@ajax_login_required
@json.json_view
def save_feed_order(request):
folders = request.POST.get('folders')
if folders:
# Test that folders can be JSON decoded
folders_list = json.decode(folders)
assert folders_list is not None
logging.user(request, "~FBFeed re-ordering: ~SB%s folders/feeds" % (len(folders_list)))
user_sub_folders = UserSubscriptionFolders.objects.get(user=request.user)
user_sub_folders.folders = folders
user_sub_folders.save()
return {}
@json.json_view
def feeds_trainer(request):
classifiers = []
feed_id = request.REQUEST.get('feed_id')
user = get_user(request)
usersubs = UserSubscription.objects.filter(user=user, active=True)
if feed_id:
feed = get_object_or_404(Feed, pk=feed_id)
usersubs = usersubs.filter(feed=feed)
usersubs = usersubs.select_related('feed').order_by('-feed__stories_last_month')
for us in usersubs:
if (not us.is_trained and us.feed.stories_last_month > 0) or feed_id:
classifier = dict()
classifier['classifiers'] = get_classifiers_for_user(user, feed_id=us.feed.pk)
classifier['feed_id'] = us.feed_id
classifier['stories_last_month'] = us.feed.stories_last_month
classifier['num_subscribers'] = us.feed.num_subscribers
classifier['feed_tags'] = json.decode(us.feed.data.popular_tags) if us.feed.data.popular_tags else []
classifier['feed_authors'] = json.decode(us.feed.data.popular_authors) if us.feed.data.popular_authors else []
classifiers.append(classifier)
user.profile.has_trained_intelligence = True
user.profile.save()
logging.user(user, "~FGLoading Trainer: ~SB%s feeds" % (len(classifiers)))
return classifiers
@ajax_login_required
@json.json_view
def save_feed_chooser(request):
is_premium = request.user.profile.is_premium
approved_feeds = [int(feed_id) for feed_id in request.POST.getlist('approved_feeds') if feed_id]
if not is_premium:
approved_feeds = approved_feeds[:64]
activated = 0
usersubs = UserSubscription.objects.filter(user=request.user)
for sub in usersubs:
try:
if sub.feed_id in approved_feeds:
activated += 1
if not sub.active:
sub.active = True
sub.save()
if sub.feed.active_subscribers <= 0:
sub.feed.count_subscribers()
elif sub.active:
sub.active = False
sub.save()
except Feed.DoesNotExist:
pass
UserSubscription.queue_new_feeds(request.user)
UserSubscription.refresh_stale_feeds(request.user, exclude_new=True)
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish(request.user.username, 'reload:feeds')
logging.user(request, "~BB~FW~SBFeed chooser: ~FC%s~SN/~SB%s" % (
activated,
usersubs.count()
))
return {'activated': activated}
@ajax_login_required
def retrain_all_sites(request):
for sub in UserSubscription.objects.filter(user=request.user):
sub.is_trained = False
sub.save()
return feeds_trainer(request)
@login_required
def activate_premium_account(request):
try:
usersubs = UserSubscription.objects.select_related('feed').filter(user=request.user)
for sub in usersubs:
sub.active = True
sub.save()
if sub.feed.premium_subscribers <= 0:
sub.feed.count_subscribers()
sub.feed.schedule_feed_fetch_immediately()
except Exception, e:
subject = "Premium activation failed"
message = "%s -- %s\n\n%s" % (request.user, usersubs, e)
mail_admins(subject, message, fail_silently=True)
request.user.profile.is_premium = True
request.user.profile.save()
return HttpResponseRedirect(reverse('index'))
@login_required
def login_as(request):
if not request.user.is_staff:
logging.user(request, "~SKNON-STAFF LOGGING IN AS ANOTHER USER!")
assert False
return HttpResponseForbidden()
username = request.GET['user']
user = get_object_or_404(User, username__iexact=username)
user.backend = settings.AUTHENTICATION_BACKENDS[0]
login_user(request, user)
return HttpResponseRedirect(reverse('index'))
def iframe_buster(request):
logging.user(request, "~FB~SBiFrame bust!")
return HttpResponse(status=204)
@required_params('story_id', feed_id=int)
@ajax_login_required
@json.json_view
def mark_story_as_starred(request):
return _mark_story_as_starred(request)
@required_params('story_hash')
@ajax_login_required
@json.json_view
def mark_story_hash_as_starred(request):
return _mark_story_as_starred(request)
def _mark_story_as_starred(request):
code = 1
feed_id = int(request.REQUEST.get('feed_id', 0))
story_id = request.REQUEST.get('story_id', None)
story_hash = request.REQUEST.get('story_hash', None)
user_tags = request.REQUEST.getlist('user_tags')
message = ""
if story_hash:
story, _ = MStory.find_story(story_hash=story_hash)
feed_id = story and story.story_feed_id
else:
story, _ = MStory.find_story(story_feed_id=feed_id, story_id=story_id)
if not story:
return {'code': -1, 'message': "Could not find story to save."}
story_db = dict([(k, v) for k, v in story._data.items()
if k is not None and v is not None])
story_db.pop('user_id', None)
story_db.pop('starred_date', None)
story_db.pop('id', None)
story_db.pop('user_tags', None)
now = datetime.datetime.now()
story_values = dict(starred_date=now, user_tags=user_tags, **story_db)
params = dict(story_guid=story.story_guid, user_id=request.user.pk)
starred_story = MStarredStory.objects(**params).limit(1)
created = False
removed_user_tags = []
if not starred_story:
params.update(story_values)
starred_story = MStarredStory.objects.create(**params)
created = True
MActivity.new_starred_story(user_id=request.user.pk,
story_title=story.story_title,
story_feed_id=feed_id,
story_id=starred_story.story_guid)
new_user_tags = user_tags
MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=1)
else:
starred_story = starred_story[0]
new_user_tags = list(set(user_tags) - set(starred_story.user_tags or []))
removed_user_tags = list(set(starred_story.user_tags or []) - set(user_tags))
starred_story.user_tags = user_tags
starred_story.save()
for tag in new_user_tags:
MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=1)
for tag in removed_user_tags:
MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1)
if random.random() < 0.01:
MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk)
MStarredStoryCounts.count_for_user(request.user.pk, total_only=True)
starred_counts, starred_count = MStarredStoryCounts.user_counts(request.user.pk, include_total=True)
if not starred_count and len(starred_counts):
starred_count = MStarredStory.objects(user_id=request.user.pk).count()
if created:
logging.user(request, "~FCStarring: ~SB%s (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags))
else:
logging.user(request, "~FCUpdating starred:~SN~FC ~SB%s~SN (~FM~SB%s~FC~SN)" % (story.story_title[:32], starred_story.user_tags))
return {'code': code, 'message': message, 'starred_count': starred_count, 'starred_counts': starred_counts}
@required_params('story_id')
@ajax_login_required
@json.json_view
def mark_story_as_unstarred(request):
return _mark_story_as_unstarred(request)
@required_params('story_hash')
@ajax_login_required
@json.json_view
def mark_story_hash_as_unstarred(request):
return _mark_story_as_unstarred(request)
def _mark_story_as_unstarred(request):
code = 1
story_id = request.POST.get('story_id', None)
story_hash = request.REQUEST.get('story_hash', None)
starred_counts = None
starred_story = None
if story_id:
starred_story = MStarredStory.objects(user_id=request.user.pk, story_guid=story_id)
if not story_id or not starred_story:
starred_story = MStarredStory.objects(user_id=request.user.pk, story_hash=story_hash or story_id)
if starred_story:
starred_story = starred_story[0]
logging.user(request, "~FCUnstarring: ~SB%s" % (starred_story.story_title[:50]))
user_tags = starred_story.user_tags
feed_id = starred_story.story_feed_id
MActivity.remove_starred_story(user_id=request.user.pk,
story_feed_id=starred_story.story_feed_id,
story_id=starred_story.story_guid)
starred_story.user_id = 0
try:
starred_story.save()
except NotUniqueError:
starred_story.delete()
MStarredStoryCounts.adjust_count(request.user.pk, feed_id=feed_id, amount=-1)
for tag in user_tags:
try:
MStarredStoryCounts.adjust_count(request.user.pk, tag=tag, amount=-1)
except MStarredStoryCounts.DoesNotExist:
pass
# MStarredStoryCounts.schedule_count_tags_for_user(request.user.pk)
MStarredStoryCounts.count_for_user(request.user.pk, total_only=True)
starred_counts = MStarredStoryCounts.user_counts(request.user.pk)
else:
code = -1
return {'code': code, 'starred_counts': starred_counts}
@ajax_login_required
@json.json_view
def send_story_email(request):
code = 1
message = 'OK'
story_id = request.POST['story_id']
feed_id = request.POST['feed_id']
to_addresses = request.POST.get('to', '').replace(',', ' ').replace(' ', ' ').strip().split(' ')
from_name = request.POST['from_name']
from_email = request.POST['from_email']
email_cc = is_true(request.POST.get('email_cc', 'true'))
comments = request.POST['comments']
comments = comments[:2048] # Separated due to PyLint
from_address = '[email protected]'
share_user_profile = MSocialProfile.get_user(request.user.pk)
if not to_addresses:
code = -1
message = 'Please provide at least one email address.'
elif not all(validate_email(to_address) for to_address in to_addresses if to_addresses):
code = -1
message = 'You need to send the email to a valid email address.'
elif not validate_email(from_email):
code = -1
message = 'You need to provide your email address.'
elif not from_name:
code = -1
message = 'You need to provide your name.'
else:
story, _ = MStory.find_story(feed_id, story_id)
story = Feed.format_story(story, feed_id, text=True)
feed = Feed.get_by_id(story['story_feed_id'])
params = {
"to_addresses": to_addresses,
"from_name": from_name,
"from_email": from_email,
"email_cc": email_cc,
"comments": comments,
"from_address": from_address,
"story": story,
"feed": feed,
"share_user_profile": share_user_profile,
}
text = render_to_string('mail/email_story.txt', params)
html = render_to_string('mail/email_story.xhtml', params)
subject = '%s' % (story['story_title'])
cc = None
if email_cc:
cc = ['%s <%s>' % (from_name, from_email)]
subject = subject.replace('\n', ' ')
msg = EmailMultiAlternatives(subject, text,
from_email='PyTune <%s>' % from_address,
to=to_addresses,
cc=cc,
headers={'Reply-To': '%s <%s>' % (from_name, from_email)})
msg.attach_alternative(html, "text/html")
try:
msg.send()
except boto.ses.connection.ResponseError, e:
code = -1
message = "Email error: %s" % str(e)
logging.user(request, '~BMSharing story by email to %s recipient%s: ~FY~SB%s~SN~BM~FY/~SB%s' %
(len(to_addresses), '' if len(to_addresses) == 1 else 's',
story['story_title'][:50], feed and feed.feed_title[:50]))
return {'code': code, 'message': message}
@json.json_view
def load_tutorial(request):
if request.REQUEST.get('finished'):
logging.user(request, '~BY~FW~SBFinishing Tutorial')
return {}
else:
pytune_feed = Feed.objects.filter(feed_address__icontains='blog.pytune.com').order_by('-pk')[0]
logging.user(request, '~BY~FW~SBLoading Tutorial')
return {
'pytune_feed': pytune_feed.canonical()
}
| mit | -6,293,551,005,867,734,000 | 42.350855 | 137 | 0.595985 | false |
clchiou/garage | py/garage/examples/asyncs/supervisors.py | 1 | 1115 | """Supervisor tree example."""
import logging
import curio
from garage.asyncs import TaskStack
from garage.asyncs.queues import Closed, Queue
async def supervisor():
print('supervisor start')
async with TaskStack() as stack:
queue = Queue()
await stack.spawn(consumer(queue)),
await stack.spawn(producer(queue)),
async for task in curio.TaskGroup(stack):
await task.join()
print('supervisor stop')
async def producer(queue):
print('producer start')
message = list('Hello world!')
while message:
await queue.put(message.pop(0))
queue.close()
print('producer stop')
async def consumer(queue):
print('consumer start')
try:
while True:
print('consume', repr(await queue.get()))
except Closed:
pass
finally:
print('consumer stop')
def main():
logging.basicConfig(level=logging.DEBUG)
print('main start')
try:
curio.run(supervisor())
except KeyboardInterrupt:
print('main quit')
print('main stop')
if __name__ == '__main__':
main()
| mit | 3,136,778,452,220,943,400 | 20.037736 | 53 | 0.620628 | false |
PostRockFTW/ExcitingBike | src/screenscrolltest/__init__.py | 1 | 2317 | import sys
import pygame
from pygame.locals import *
def main():
pygame.init()
gameWidth = 460
gameHeight = 230
miniMapFactor = 8
mainSurface = pygame.display.set_mode((gameWidth, gameHeight))
mainClock = pygame.time.Clock()
FPS = 30
pygame.display.set_caption('Screen Scroll Test')
print "Move screen with left/right arrow keys"
print "Hold SHIFT to jump to edges"
backgroundSurface = pygame.image.load('background.png').convert()
miniMapSurface = pygame.Surface((backgroundSurface.get_width()/miniMapFactor, backgroundSurface.get_height()/miniMapFactor))
pygame.transform.scale(backgroundSurface, (miniMapSurface.get_width(), miniMapSurface.get_height()), miniMapSurface)
running = True
currOffset = 0
# Game loop
while running:
pygame.event.pump()
for event in pygame.event.get():
if ((event.type == QUIT) or
(event.type == KEYDOWN and event.key == K_ESCAPE)):
running = False
# Draw the current section of the background
mainSurface.blit(backgroundSurface, (-currOffset, 0))
miniMapLeft = mainSurface.get_width() - miniMapSurface.get_width()
mainSurface.blit(miniMapSurface, (miniMapLeft, 0))
miniMapBorderRect = pygame.Rect(
miniMapLeft + currOffset * (float(miniMapSurface.get_width()) / backgroundSurface.get_width()),
0,
miniMapSurface.get_width() * (float(mainSurface.get_width()) / backgroundSurface.get_width()),
miniMapSurface.get_height()
)
pygame.draw.rect(mainSurface, pygame.color.Color('white'), miniMapBorderRect, 2)
pressedKeys = pygame.key.get_pressed()
shiftPressed = pressedKeys[K_LSHIFT] or pressedKeys[K_RSHIFT]
if pressedKeys[K_RIGHT]:
currOffset += 10
rightMost = (backgroundSurface.get_width() - mainSurface.get_width())
if (currOffset > rightMost) or shiftPressed:
currOffset = rightMost
elif pressedKeys[K_LEFT]:
currOffset -= 10
if (currOffset < 0) or shiftPressed:
currOffset = 0
pygame.display.update()
mainClock.tick(FPS)
pygame.quit()
sys.exit()
if __name__ == "__main__":
main()
| gpl-2.0 | -1,759,312,278,505,376,500 | 30.739726 | 128 | 0.630988 | false |
Donkyhotay/MoonPy | zope/app/component/tests/factory.py | 1 | 1462 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Factory tests.
$Id: factory.py 26551 2004-07-15 07:06:37Z srichter $
"""
from zope.component.interfaces import IFactory
from zope.interface import Interface, implements, implementedBy
class IX(Interface):
"""the dummy interface which class X supposedly implements,
according to the factory"""
class IFoo(Interface):
"""an even more dummy interface just for testing """
class X(object):
implements(IX)
def __init__(self, *args, **kwargs):
self.args=args
self.kwargs=kwargs
class ClassFactoryWrapper(object):
implements(IFactory)
def __init__(self, klass):
self.__klass=klass
def __call__(self, *args, **kwargs):
return self.__klass(*args, **kwargs)
def getInterfaces(self):
return implementedBy(self.__klass)
f=ClassFactoryWrapper(X)
| gpl-3.0 | 723,490,795,661,606,500 | 32.227273 | 78 | 0.633379 | false |
NaPs/Kolekto | kolekto/commands/edit.py | 1 | 1031 | import json
from kolekto.printer import printer
from kolekto.commands import Command
from kolekto.helpers import get_hash
class Edit(Command):
""" Edit a movie.
"""
help = 'edit a movie'
def prepare(self):
self.add_arg('input', metavar='movie-hash-or-file')
def run(self, args, config):
mdb = self.get_metadata_db(args.tree)
movie_hash = get_hash(args.input)
try:
movie = mdb.get(movie_hash)
except KeyError:
printer.p('Unknown movie hash.')
return
movie_json = json.dumps(movie, indent=4)
while True:
movie_json = printer.edit(movie_json)
try:
mdb.save(movie_hash, json.loads(movie_json))
except ValueError:
if printer.ask('Bad json data, would you like to try again?', default=True):
continue
else:
break
else:
printer.p('Saved.')
break
| mit | 4,217,550,169,612,711,400 | 22.976744 | 92 | 0.534433 | false |
synthesio/infra-ovh-ansible-module | plugins/modules/public_cloud_instance.py | 1 | 3577 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from ansible.module_utils.basic import AnsibleModule
__metaclass__ = type
DOCUMENTATION = '''
---
module: public_cloud_instance
short_description: Manage OVH API for public cloud instance creatikon
description:
- This module manage the creation of an instance on OVH public Cloud
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
name:
required: true
description: The instance name to create
ssh_key_id:
required: false
description: The sshKey Id to add
flavor_id:
required: true
description: The id of the commercial name
image_id:
required: true
description: The id of the image/os to deploy on the instance
region:
required: true
description: The region where to deploy the instance
networks:
required: false
description: The network configuration.
Can be the full array of the network configuration
service_name:
required: true
description: The service_name
monthly_billing:
required: false
default: false
description: Enable or not the monthly billing
'''
EXAMPLES = '''
- name: run installation
synthesio.ovh.ovh_public_cloud_instance:
name: "{{ inventory_hostname }}"
ssh_key_id: "{{ sshKeyId }}"
service_name: "{{ service_name }}"
networks: "{{ networks }}"
flavor_id: "{{ flavor_id }}"
region: "{{ region }}"
image_id: "{{ image_id }}"
delegate_to: localhost
register: instance_metadata
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
name=dict(required=True),
flavor_id=dict(required=True),
image_id=dict(required=True),
service_name=dict(required=True),
ssh_key_id=dict(required=False, default=None),
region=dict(required=True),
networks=dict(required=False, default=[], type="list"),
monthly_billing=dict(required=False, default=False, type="bool")
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
name = module.params['name']
service_name = module.params['service_name']
flavor_id = module.params['flavor_id']
image_id = module.params['image_id']
service_name = module.params['service_name']
ssh_key_id = module.params['ssh_key_id']
region = module.params['region']
networks = module.params['networks']
monthly_billing = module.params['monthly_billing']
try:
result = client.post('/cloud/project/%s/instance' % service_name,
flavorId=flavor_id,
imageId=image_id,
monthlyBilling=monthly_billing,
name=name,
region=region,
networks=networks,
sshKeyId=ssh_key_id
)
module.exit_json(changed=True, **result)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error))
def main():
run_module()
if __name__ == '__main__':
main()
| mit | 4,153,331,261,659,755,000 | 27.846774 | 105 | 0.609729 | false |
smlacombe/sageo | app/model/filters/filter_host_state.py | 1 | 2263 | #
# Copyright (C) 2013 Savoir-Faire Linux Inc.
#
# This file is part of Sageo
#
# Sageo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sageo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sageo. If not, see <http://www.gnu.org/licenses/>
from .filter import Filter
from builtin import FILTER_HOST_STATE
from sqlalchemy import *
from sqlalchemy.orm import *
STATE_CODES = { FILTER_HOST_STATE + '_up': 0 , FILTER_HOST_STATE + '_down': 1 , FILTER_HOST_STATE + '_unreach': 2, FILTER_HOST_STATE + '_pending': 3 }
class FilterHostState(Filter):
def __init__(self, name, title, descr):
Filter.__init__(self, name, title, descr)
self.column_names = ['host_has_been_checked', 'host_state']
def filter(self, states):
"""
Filter host states.
states: dictionnary that contain states and is boolean value
"""
filter = "Filter: host_has_been_checked = 1\n"
state_code = 0
count = 0
for state, value in states.items():
if value:
state_code = STATE_CODES[state]
filter = filter + "Filter: host_state = " + str(state_code) + "\n"
count = count + 1
filter = filter + "Or: " + str(count) + "\n"
return filter
def get_col_def(self):
return [
Column(FILTER_HOST_STATE + '_up', Boolean, default=True, info={'label': 'UP'}),
Column(FILTER_HOST_STATE + '_down', Boolean, default=True, info={'label': 'DOWN'}),
Column(FILTER_HOST_STATE + '_unreach', Boolean, default=True, info={'label': 'UNREACHABLE'}),
Column(FILTER_HOST_STATE + '_pending', Boolean, default=True, info={'label': 'PENDING'})
]
| gpl-3.0 | -3,413,612,498,410,856,400 | 40.145455 | 151 | 0.604949 | false |
JDevlieghere/InTeXration | intexration/task.py | 1 | 4809 | import logging
import os
import shutil
import subprocess
import tempfile
from intexration.tools import create_dir, cd
from intexration.build import Identifier, Build
from intexration.document import Document
from intexration.parser import BuildParser
class Task():
def run(self):
pass
class CloneTask(Task):
def __init__(self, manager, request):
self.build_manager = manager
self.build_request = request
self.temp_directory = tempfile.mkdtemp()
self.clone_directory = self._create_dir()
def _create_dir(self):
return create_dir(os.path.join(self.temp_directory,
self.build_request.owner,
self.build_request.repository,
self.build_request.commit))
def _clone(self):
logging.info("Cloning to %s", self.clone_directory)
if subprocess.call(['git', 'clone', self.build_request.ssh_url(), self.clone_directory],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0:
return
if subprocess.call(['git', 'clone', self.build_request.https_url(), self.clone_directory],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0:
return
raise RuntimeError("Clone failed.")
def _submit_builds(self):
builds = dict()
build_parser = BuildParser(self.clone_directory)
for name in build_parser.names():
identifier = Identifier(self.build_request.owner,
self.build_request.repository,
name)
src_path = os.path.join(self.clone_directory, build_parser.dir(name))
dst_path = os.path.join(self.temp_directory, name)
shutil.copytree(src_path, dst_path)
build = Build(dst_path,
build_parser.tex(name),
build_parser.idx(name),
build_parser.bib(name))
builds[identifier] = build
self.build_manager.submit_builds(builds)
def _clean(self):
shutil.rmtree(self.clone_directory)
def run(self):
try:
self._clone()
self._submit_builds()
self._clean()
except RuntimeError as e:
logging.error(e)
except RuntimeWarning as e:
logging.warning(e)
class CompileTask(Task):
MAKEINDEX = 'makeindex'
BIBTEX = 'bibtex'
PDFLATEX = 'pdflatex'
def __init__(self, manager, identifier, build):
self.build_manager = manager
self.identifier = identifier
self.build = build
self.document_directory = self._create_dir()
def _create_dir(self):
return create_dir(os.path.join(self.build_manager.output,
self.identifier.owner,
self.identifier.repository))
def _makeindex(self):
"""Make index."""
with cd(self.build.path):
if subprocess.call([self.MAKEINDEX, self.build.idx],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
logging.warning("%s Makeindex failed for %s", self.identifier, self.build.idx)
def _bibtex(self):
"""Compile bibtex."""
with cd(self.build.path):
if subprocess.call([self.BIBTEX, self.build.bib],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
logging.warning("%s Bibtex failed for %s", self.identifier, self.build.bib)
def _compile(self):
"""Compile with pdflatex."""
with cd(self.build.path):
if subprocess.call([self.PDFLATEX, '-interaction=nonstopmode', self.build.tex],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
logging.warning("%s Compilation finished with errors for %s", self.identifier, self.build.tex)
def _submit_documents(self):
document = Document(self.identifier.name, self.build.path)
document.move_to(self.document_directory)
self.build_manager.submit_document(self.identifier, document)
self.build.finish()
def run(self):
try:
self._compile()
self._makeindex()
self._bibtex()
self._compile()
self._compile()
self._submit_documents()
except RuntimeError as e:
logging.error(e)
except RuntimeWarning as e:
logging.warning(e)
| apache-2.0 | 7,176,475,637,325,700,000 | 35.157895 | 110 | 0.550842 | false |
maoy/zknova | nova/api/openstack/compute/contrib/extended_status.py | 1 | 4009 | # Copyright 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Status Admin API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'extended_status')
class ExtendedStatusController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedStatusController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _extend_server(self, server, instance):
for state in ['task_state', 'vm_state', 'power_state']:
key = "%s:%s" % (Extended_status.alias, state)
server[key] = instance[state]
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
self._extend_server(server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
# Attach our slave template to the response object
resp_obj.attach(xml=ExtendedStatusesTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'detail' method.
self._extend_server(server, db_instance)
class Extended_status(extensions.ExtensionDescriptor):
"""Extended Status support."""
name = "ExtendedStatus"
alias = "OS-EXT-STS"
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_status/api/v1.1")
updated = "2011-11-03T00:00:00+00:00"
def get_controller_extensions(self):
controller = ExtendedStatusController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def make_server(elem):
elem.set('{%s}task_state' % Extended_status.namespace,
'%s:task_state' % Extended_status.alias)
elem.set('{%s}power_state' % Extended_status.namespace,
'%s:power_state' % Extended_status.alias)
elem.set('{%s}vm_state' % Extended_status.namespace,
'%s:vm_state' % Extended_status.alias)
class ExtendedStatusTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
class ExtendedStatusesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
return xmlutil.SlaveTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
| apache-2.0 | 3,888,264,689,838,659,000 | 38.693069 | 79 | 0.65777 | false |
projectatomic/atomic-reactor | atomic_reactor/plugins/exit_remove_worker_metadata.py | 1 | 1722 | """
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import
from atomic_reactor.plugin import ExitPlugin
from atomic_reactor.constants import PLUGIN_REMOVE_WORKER_METADATA_KEY
from osbs.exceptions import OsbsResponseException
from atomic_reactor.util import get_platform_config, BadConfigMapError
from atomic_reactor.plugins.build_orchestrate_build import get_worker_build_info
class RemoveWorkerMetadataPlugin(ExitPlugin):
"""
Remove worker metadata for each platform.
"""
key = PLUGIN_REMOVE_WORKER_METADATA_KEY
def run(self):
"""
Run the plugin.
"""
build_result = self.workflow.build_result
worker_builds = build_result.annotations['worker-builds']
for platform, build_annotations in worker_builds.items():
try:
if ('metadata_fragment' not in build_annotations or
'metadata_fragment_key' not in build_annotations):
continue
cm_key, _ = get_platform_config(platform, build_annotations)
except BadConfigMapError:
continue
build_info = get_worker_build_info(self.workflow, platform)
osbs = build_info.osbs
try:
osbs.delete_config_map(cm_key)
self.log.debug("ConfigMap %s on platform %s deleted", cm_key, platform)
except OsbsResponseException as ex:
self.log.warning("Failed to delete ConfigMap %s on platform %s: %s",
cm_key, platform, ex)
| bsd-3-clause | -3,733,896,536,567,704,000 | 33.44 | 87 | 0.641115 | false |
ccpgames/eve-metrics | web2py/cgihandler.py | 1 | 1527 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <[email protected]>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This is a CGI handler for Apache
Requires apache+[mod_cgi or mod_cgid].
In httpd.conf put something like:
LoadModule cgi_module modules/mod_cgi.so
ScriptAlias / /path/to/cgihandler.py/
Example of httpd.conf ------------
<VirtualHost *:80>
ServerName web2py.example.com
ScriptAlias / /users/www-data/web2py/cgihandler.py/
<Directory /users/www-data/web2py>
AllowOverride None
Order Allow,Deny
Deny from all
<Files cgihandler.py>
Allow from all
</Files>
</Directory>
AliasMatch ^/([^/]+)/static/(.*) \
/users/www-data/web2py/applications/$1/static/$2
<Directory /users/www-data/web2py/applications/*/static/>
Order Allow,Deny
Allow from all
</Directory>
<Location /admin>
Deny from all
</Location>
<LocationMatch ^/([^/]+)/appadmin>
Deny from all
</LocationMatch>
CustomLog /private/var/log/apache2/access.log common
ErrorLog /private/var/log/apache2/error.log
</VirtualHost>
----------------------------------
"""
import os
import sys
import wsgiref.handlers
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path] + [p for p in sys.path if not p == path]
import gluon.main
wsgiref.handlers.CGIHandler().run(gluon.main.wsgibase)
| mit | 4,005,331,098,302,896,000 | 22.238095 | 59 | 0.646365 | false |
matt-gardner/deep_qa | tests/data/data_indexer_test.py | 1 | 5324 | # pylint: disable=no-self-use,invalid-name
import codecs
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.datasets import TextDataset
from deep_qa.data.instances.text_classification.text_classification_instance import TextClassificationInstance
from ..common.test_case import DeepQaTestCase
class TestDataIndexer(DeepQaTestCase):
def test_fit_word_dictionary_respects_min_count(self):
instance = TextClassificationInstance("a a a a b b c c c", True)
dataset = TextDataset([instance])
data_indexer = DataIndexer()
data_indexer.fit_word_dictionary(dataset, min_count=4)
assert 'a' in data_indexer.words_in_index()
assert 'b' not in data_indexer.words_in_index()
assert 'c' not in data_indexer.words_in_index()
data_indexer = DataIndexer()
data_indexer.fit_word_dictionary(dataset, min_count=1)
assert 'a' in data_indexer.words_in_index()
assert 'b' in data_indexer.words_in_index()
assert 'c' in data_indexer.words_in_index()
def test_add_word_to_index_gives_consistent_results(self):
data_indexer = DataIndexer()
initial_vocab_size = data_indexer.get_vocab_size()
word_index = data_indexer.add_word_to_index("word")
assert "word" in data_indexer.words_in_index()
assert data_indexer.get_word_index("word") == word_index
assert data_indexer.get_word_from_index(word_index) == "word"
assert data_indexer.get_vocab_size() == initial_vocab_size + 1
# Now add it again, and make sure nothing changes.
data_indexer.add_word_to_index("word")
assert "word" in data_indexer.words_in_index()
assert data_indexer.get_word_index("word") == word_index
assert data_indexer.get_word_from_index(word_index) == "word"
assert data_indexer.get_vocab_size() == initial_vocab_size + 1
def test_namespaces(self):
data_indexer = DataIndexer()
initial_vocab_size = data_indexer.get_vocab_size()
word_index = data_indexer.add_word_to_index("word", namespace='1')
assert "word" in data_indexer.words_in_index(namespace='1')
assert data_indexer.get_word_index("word", namespace='1') == word_index
assert data_indexer.get_word_from_index(word_index, namespace='1') == "word"
assert data_indexer.get_vocab_size(namespace='1') == initial_vocab_size + 1
# Now add it again, in a different namespace and a different word, and make sure it's like
# new.
word2_index = data_indexer.add_word_to_index("word2", namespace='2')
word_index = data_indexer.add_word_to_index("word", namespace='2')
assert "word" in data_indexer.words_in_index(namespace='2')
assert "word2" in data_indexer.words_in_index(namespace='2')
assert data_indexer.get_word_index("word", namespace='2') == word_index
assert data_indexer.get_word_index("word2", namespace='2') == word2_index
assert data_indexer.get_word_from_index(word_index, namespace='2') == "word"
assert data_indexer.get_word_from_index(word2_index, namespace='2') == "word2"
assert data_indexer.get_vocab_size(namespace='2') == initial_vocab_size + 2
def test_unknown_token(self):
# pylint: disable=protected-access
# We're putting this behavior in a test so that the behavior is documented. There is
# solver code that depends in a small way on how we treat the unknown token, so any
# breaking change to this behavior should break a test, so you know you've done something
# that needs more consideration.
data_indexer = DataIndexer()
oov_token = data_indexer._oov_token
oov_index = data_indexer.get_word_index(oov_token)
assert oov_index == 1
assert data_indexer.get_word_index("unseen word") == oov_index
def test_set_from_file(self):
# pylint: disable=protected-access
vocab_filename = self.TEST_DIR + 'vocab_file'
with codecs.open(vocab_filename, 'w', 'utf-8') as vocab_file:
vocab_file.write('<S>\n')
vocab_file.write('</S>\n')
vocab_file.write('<UNK>\n')
vocab_file.write('a\n')
vocab_file.write('word\n')
vocab_file.write('another\n')
data_indexer = DataIndexer()
data_indexer.set_from_file(vocab_filename, oov_token="<UNK>")
assert data_indexer._oov_token == "<UNK>"
assert data_indexer.get_word_index("random string") == 3
assert data_indexer.get_word_index("<S>") == 1
assert data_indexer.get_word_index("</S>") == 2
assert data_indexer.get_word_index("<UNK>") == 3
assert data_indexer.get_word_index("a") == 4
assert data_indexer.get_word_index("word") == 5
assert data_indexer.get_word_index("another") == 6
assert data_indexer.get_word_from_index(0) == data_indexer._padding_token
assert data_indexer.get_word_from_index(1) == "<S>"
assert data_indexer.get_word_from_index(2) == "</S>"
assert data_indexer.get_word_from_index(3) == "<UNK>"
assert data_indexer.get_word_from_index(4) == "a"
assert data_indexer.get_word_from_index(5) == "word"
assert data_indexer.get_word_from_index(6) == "another"
| apache-2.0 | 5,252,858,860,208,307,000 | 51.712871 | 110 | 0.64707 | false |
JNRowe/versionah | tests/test_date.py | 1 | 1244 | #
"""test_date - Date tests"""
# Copyright © 2012-2018 James Rowe <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of versionah.
#
# versionah is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# versionah is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# versionah. If not, see <http://www.gnu.org/licenses/>.
from datetime import date
from pytest import mark
from versionah.cmdline import CliVersion
@mark.requires_write
@mark.parametrize('v, file', [
('0.1.0', 'test_wr_a'),
('1.0.0', 'test_wr_b'),
('2.1.3', 'test_wr_c'),
])
def test_date_metadata(v, file, tmpdir):
file_loc = tmpdir.join(file).strpath
CliVersion(v).write(file_loc, 'text')
read = CliVersion.read(file_loc)
assert read.as_date() == date.today().isoformat()
| gpl-3.0 | 5,703,269,001,026,856,000 | 31.710526 | 79 | 0.711183 | false |
Makeystreet/makeystreet | woot/apps/catalog/migrations/0020_auto__add_image__chg_field_documentation_url.py | 1 | 21689 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Image'
db.create_table(u'catalog_image', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('large_url', self.gf('django.db.models.fields.URLField')(max_length=1000)),
('small_url', self.gf('django.db.models.fields.URLField')(max_length=1000, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='images', null=True, to=orm['django_facebook.FacebookCustomUser'])),
('added_time', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'catalog', ['Image'])
# Changing field 'Documentation.url'
db.alter_column(u'catalog_documentation', 'url', self.gf('django.db.models.fields.URLField')(max_length=1000))
# Adding M2M table for field images on 'Makey'
m2m_table_name = db.shorten_name(u'catalog_makey_images')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('makey', models.ForeignKey(orm[u'catalog.makey'], null=False)),
('image', models.ForeignKey(orm[u'catalog.image'], null=False))
))
db.create_unique(m2m_table_name, ['makey_id', 'image_id'])
def backwards(self, orm):
# Deleting model 'Image'
db.delete_table(u'catalog_image')
# Changing field 'Documentation.url'
db.alter_column(u'catalog_documentation', 'url', self.gf('django.db.models.fields.URLField')(max_length=200))
# Removing M2M table for field images on 'Makey'
db.delete_table(db.shorten_name(u'catalog_makey_images'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductDescription']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.liketutorial': {
'Meta': {'object_name': 'LikeTutorial'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"})
},
u'catalog.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeydocumentations'", 'symmetrical': 'False', 'to': u"orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeyimages'", 'symmetrical': 'False', 'to': u"orm['catalog.Image']"}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeylikes'", 'to': u"orm['django_facebook.FacebookCustomUser']", 'through': u"orm['catalog.LikeMakey']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'makeynotes'", 'symmetrical': 'False', 'to': u"orm['catalog.Note']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': u"orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
u'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog'] | apache-2.0 | 7,494,500,861,876,699,000 | 74.051903 | 260 | 0.55867 | false |
pawelmhm/scrapy | scrapy/core/http2/protocol.py | 3 | 16522 | import ipaddress
import itertools
import logging
from collections import deque
from ipaddress import IPv4Address, IPv6Address
from typing import Dict, List, Optional, Union
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.errors import ErrorCodes
from h2.events import (
Event, ConnectionTerminated, DataReceived, ResponseReceived,
SettingsAcknowledged, StreamEnded, StreamReset, UnknownFrameReceived,
WindowUpdated
)
from h2.exceptions import FrameTooLargeError, H2Error
from twisted.internet.defer import Deferred
from twisted.internet.error import TimeoutError
from twisted.internet.interfaces import IHandshakeListener, IProtocolNegotiationFactory
from twisted.internet.protocol import connectionDone, Factory, Protocol
from twisted.internet.ssl import Certificate
from twisted.protocols.policies import TimeoutMixin
from twisted.python.failure import Failure
from twisted.web.client import URI
from zope.interface import implementer
from scrapy.core.http2.stream import Stream, StreamCloseReason
from scrapy.http import Request
from scrapy.settings import Settings
from scrapy.spiders import Spider
logger = logging.getLogger(__name__)
PROTOCOL_NAME = b"h2"
class InvalidNegotiatedProtocol(H2Error):
def __init__(self, negotiated_protocol: bytes) -> None:
self.negotiated_protocol = negotiated_protocol
def __str__(self) -> str:
return (f"Expected {PROTOCOL_NAME!r}, received {self.negotiated_protocol!r}")
class RemoteTerminatedConnection(H2Error):
def __init__(
self,
remote_ip_address: Optional[Union[IPv4Address, IPv6Address]],
event: ConnectionTerminated,
) -> None:
self.remote_ip_address = remote_ip_address
self.terminate_event = event
def __str__(self) -> str:
return f'Received GOAWAY frame from {self.remote_ip_address!r}'
class MethodNotAllowed405(H2Error):
def __init__(self, remote_ip_address: Optional[Union[IPv4Address, IPv6Address]]) -> None:
self.remote_ip_address = remote_ip_address
def __str__(self) -> str:
return f"Received 'HTTP/2.0 405 Method Not Allowed' from {self.remote_ip_address!r}"
@implementer(IHandshakeListener)
class H2ClientProtocol(Protocol, TimeoutMixin):
IDLE_TIMEOUT = 240
def __init__(self, uri: URI, settings: Settings, conn_lost_deferred: Deferred) -> None:
"""
Arguments:
uri -- URI of the base url to which HTTP/2 Connection will be made.
uri is used to verify that incoming client requests have correct
base URL.
settings -- Scrapy project settings
conn_lost_deferred -- Deferred fires with the reason: Failure to notify
that connection was lost
"""
self._conn_lost_deferred = conn_lost_deferred
config = H2Configuration(client_side=True, header_encoding='utf-8')
self.conn = H2Connection(config=config)
# ID of the next request stream
# Following the convention - 'Streams initiated by a client MUST
# use odd-numbered stream identifiers' (RFC 7540 - Section 5.1.1)
self._stream_id_generator = itertools.count(start=1, step=2)
# Streams are stored in a dictionary keyed off their stream IDs
self.streams: Dict[int, Stream] = {}
# If requests are received before connection is made we keep
# all requests in a pool and send them as the connection is made
self._pending_request_stream_pool: deque = deque()
# Save an instance of errors raised which lead to losing the connection
# We pass these instances to the streams ResponseFailed() failure
self._conn_lost_errors: List[BaseException] = []
# Some meta data of this connection
# initialized when connection is successfully made
self.metadata: Dict = {
# Peer certificate instance
'certificate': None,
# Address of the server we are connected to which
# is updated when HTTP/2 connection is made successfully
'ip_address': None,
# URI of the peer HTTP/2 connection is made
'uri': uri,
# Both ip_address and uri are used by the Stream before
# initiating the request to verify that the base address
# Variables taken from Project Settings
'default_download_maxsize': settings.getint('DOWNLOAD_MAXSIZE'),
'default_download_warnsize': settings.getint('DOWNLOAD_WARNSIZE'),
# Counter to keep track of opened streams. This counter
# is used to make sure that not more than MAX_CONCURRENT_STREAMS
# streams are opened which leads to ProtocolError
# We use simple FIFO policy to handle pending requests
'active_streams': 0,
# Flag to keep track if settings were acknowledged by the remote
# This ensures that we have established a HTTP/2 connection
'settings_acknowledged': False,
}
@property
def h2_connected(self) -> bool:
"""Boolean to keep track of the connection status.
This is used while initiating pending streams to make sure
that we initiate stream only during active HTTP/2 Connection
"""
return bool(self.transport.connected) and self.metadata['settings_acknowledged']
@property
def allowed_max_concurrent_streams(self) -> int:
"""We keep total two streams for client (sending data) and
server side (receiving data) for a single request. To be safe
we choose the minimum. Since this value can change in event
RemoteSettingsChanged we make variable a property.
"""
return min(
self.conn.local_settings.max_concurrent_streams,
self.conn.remote_settings.max_concurrent_streams
)
def _send_pending_requests(self) -> None:
"""Initiate all pending requests from the deque following FIFO
We make sure that at any time {allowed_max_concurrent_streams}
streams are active.
"""
while (
self._pending_request_stream_pool
and self.metadata['active_streams'] < self.allowed_max_concurrent_streams
and self.h2_connected
):
self.metadata['active_streams'] += 1
stream = self._pending_request_stream_pool.popleft()
stream.initiate_request()
self._write_to_transport()
def pop_stream(self, stream_id: int) -> Stream:
"""Perform cleanup when a stream is closed
"""
stream = self.streams.pop(stream_id)
self.metadata['active_streams'] -= 1
self._send_pending_requests()
return stream
def _new_stream(self, request: Request, spider: Spider) -> Stream:
"""Instantiates a new Stream object
"""
stream = Stream(
stream_id=next(self._stream_id_generator),
request=request,
protocol=self,
download_maxsize=getattr(spider, 'download_maxsize', self.metadata['default_download_maxsize']),
download_warnsize=getattr(spider, 'download_warnsize', self.metadata['default_download_warnsize']),
)
self.streams[stream.stream_id] = stream
return stream
def _write_to_transport(self) -> None:
""" Write data to the underlying transport connection
from the HTTP2 connection instance if any
"""
# Reset the idle timeout as connection is still actively sending data
self.resetTimeout()
data = self.conn.data_to_send()
self.transport.write(data)
def request(self, request: Request, spider: Spider) -> Deferred:
if not isinstance(request, Request):
raise TypeError(f'Expected scrapy.http.Request, received {request.__class__.__qualname__}')
stream = self._new_stream(request, spider)
d = stream.get_response()
# Add the stream to the request pool
self._pending_request_stream_pool.append(stream)
# If we receive a request when connection is idle
# We need to initiate pending requests
self._send_pending_requests()
return d
def connectionMade(self) -> None:
"""Called by Twisted when the connection is established. We can start
sending some data now: we should open with the connection preamble.
"""
# Initialize the timeout
self.setTimeout(self.IDLE_TIMEOUT)
destination = self.transport.getPeer()
self.metadata['ip_address'] = ipaddress.ip_address(destination.host)
# Initiate H2 Connection
self.conn.initiate_connection()
self._write_to_transport()
def _lose_connection_with_error(self, errors: List[BaseException]) -> None:
"""Helper function to lose the connection with the error sent as a
reason"""
self._conn_lost_errors += errors
self.transport.loseConnection()
def handshakeCompleted(self) -> None:
"""
Close the connection if it's not made via the expected protocol
"""
if self.transport.negotiatedProtocol is not None and self.transport.negotiatedProtocol != PROTOCOL_NAME:
# we have not initiated the connection yet, no need to send a GOAWAY frame to the remote peer
self._lose_connection_with_error([InvalidNegotiatedProtocol(self.transport.negotiatedProtocol)])
def _check_received_data(self, data: bytes) -> None:
"""Checks for edge cases where the connection to remote fails
without raising an appropriate H2Error
Arguments:
data -- Data received from the remote
"""
if data.startswith(b'HTTP/2.0 405 Method Not Allowed'):
raise MethodNotAllowed405(self.metadata['ip_address'])
def dataReceived(self, data: bytes) -> None:
# Reset the idle timeout as connection is still actively receiving data
self.resetTimeout()
try:
self._check_received_data(data)
events = self.conn.receive_data(data)
self._handle_events(events)
except H2Error as e:
if isinstance(e, FrameTooLargeError):
# hyper-h2 does not drop the connection in this scenario, we
# need to abort the connection manually.
self._conn_lost_errors += [e]
self.transport.abortConnection()
return
# Save this error as ultimately the connection will be dropped
# internally by hyper-h2. Saved error will be passed to all the streams
# closed with the connection.
self._lose_connection_with_error([e])
finally:
self._write_to_transport()
def timeoutConnection(self) -> None:
"""Called when the connection times out.
We lose the connection with TimeoutError"""
# Check whether there are open streams. If there are, we're going to
# want to use the error code PROTOCOL_ERROR. If there aren't, use
# NO_ERROR.
if (
self.conn.open_outbound_streams > 0
or self.conn.open_inbound_streams > 0
or self.metadata['active_streams'] > 0
):
error_code = ErrorCodes.PROTOCOL_ERROR
else:
error_code = ErrorCodes.NO_ERROR
self.conn.close_connection(error_code=error_code)
self._write_to_transport()
self._lose_connection_with_error([
TimeoutError(f"Connection was IDLE for more than {self.IDLE_TIMEOUT}s")
])
def connectionLost(self, reason: Failure = connectionDone) -> None:
"""Called by Twisted when the transport connection is lost.
No need to write anything to transport here.
"""
# Cancel the timeout if not done yet
self.setTimeout(None)
# Notify the connection pool instance such that no new requests are
# sent over current connection
if not reason.check(connectionDone):
self._conn_lost_errors.append(reason)
self._conn_lost_deferred.callback(self._conn_lost_errors)
for stream in self.streams.values():
if stream.metadata['request_sent']:
close_reason = StreamCloseReason.CONNECTION_LOST
else:
close_reason = StreamCloseReason.INACTIVE
stream.close(close_reason, self._conn_lost_errors, from_protocol=True)
self.metadata['active_streams'] -= len(self.streams)
self.streams.clear()
self._pending_request_stream_pool.clear()
self.conn.close_connection()
def _handle_events(self, events: List[Event]) -> None:
"""Private method which acts as a bridge between the events
received from the HTTP/2 data and IH2EventsHandler
Arguments:
events -- A list of events that the remote peer triggered by sending data
"""
for event in events:
if isinstance(event, ConnectionTerminated):
self.connection_terminated(event)
elif isinstance(event, DataReceived):
self.data_received(event)
elif isinstance(event, ResponseReceived):
self.response_received(event)
elif isinstance(event, StreamEnded):
self.stream_ended(event)
elif isinstance(event, StreamReset):
self.stream_reset(event)
elif isinstance(event, WindowUpdated):
self.window_updated(event)
elif isinstance(event, SettingsAcknowledged):
self.settings_acknowledged(event)
elif isinstance(event, UnknownFrameReceived):
logger.warning('Unknown frame received: %s', event.frame)
# Event handler functions starts here
def connection_terminated(self, event: ConnectionTerminated) -> None:
self._lose_connection_with_error([
RemoteTerminatedConnection(self.metadata['ip_address'], event)
])
def data_received(self, event: DataReceived) -> None:
try:
stream = self.streams[event.stream_id]
except KeyError:
pass # We ignore server-initiated events
else:
stream.receive_data(event.data, event.flow_controlled_length)
def response_received(self, event: ResponseReceived) -> None:
try:
stream = self.streams[event.stream_id]
except KeyError:
pass # We ignore server-initiated events
else:
stream.receive_headers(event.headers)
def settings_acknowledged(self, event: SettingsAcknowledged) -> None:
self.metadata['settings_acknowledged'] = True
# Send off all the pending requests as now we have
# established a proper HTTP/2 connection
self._send_pending_requests()
# Update certificate when our HTTP/2 connection is established
self.metadata['certificate'] = Certificate(self.transport.getPeerCertificate())
def stream_ended(self, event: StreamEnded) -> None:
try:
stream = self.pop_stream(event.stream_id)
except KeyError:
pass # We ignore server-initiated events
else:
stream.close(StreamCloseReason.ENDED, from_protocol=True)
def stream_reset(self, event: StreamReset) -> None:
try:
stream = self.pop_stream(event.stream_id)
except KeyError:
pass # We ignore server-initiated events
else:
stream.close(StreamCloseReason.RESET, from_protocol=True)
def window_updated(self, event: WindowUpdated) -> None:
if event.stream_id != 0:
self.streams[event.stream_id].receive_window_update()
else:
# Send leftover data for all the streams
for stream in self.streams.values():
stream.receive_window_update()
@implementer(IProtocolNegotiationFactory)
class H2ClientFactory(Factory):
def __init__(self, uri: URI, settings: Settings, conn_lost_deferred: Deferred) -> None:
self.uri = uri
self.settings = settings
self.conn_lost_deferred = conn_lost_deferred
def buildProtocol(self, addr) -> H2ClientProtocol:
return H2ClientProtocol(self.uri, self.settings, self.conn_lost_deferred)
def acceptableProtocols(self) -> List[bytes]:
return [PROTOCOL_NAME]
| bsd-3-clause | -3,718,572,256,000,780,300 | 38.526316 | 112 | 0.646169 | false |
afaheem88/tempest | tempest/thirdparty/boto/base.py | 1 | 14745 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging as orig_logging
import re
import boto
from boto import exception
from boto import s3
from oslo_log import log as logging
import six
from six.moves.urllib import parse as urlparse
from tempest_lib import exceptions as lib_exc
import tempest.clients
from tempest.common.utils import file_utils
from tempest import config
from tempest import exceptions
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
def decision_maker():
S3_CAN_CONNECT_ERROR = None
secret_matcher = re.compile("[A-Za-z0-9+/]{32,}") # 40 in other system
id_matcher = re.compile("[A-Za-z0-9]{20,}")
def all_read(*args):
return all(map(file_utils.have_effective_read_access, args))
boto_logger = logging.getLogger('boto')
level = boto_logger.logger.level
# suppress logging for boto
boto_logger.logger.setLevel(orig_logging.CRITICAL)
def _cred_sub_check(connection_data):
if not id_matcher.match(connection_data["aws_access_key_id"]):
raise Exception("Invalid AWS access Key")
if not secret_matcher.match(connection_data["aws_secret_access_key"]):
raise Exception("Invalid AWS secret Key")
raise Exception("Unknown (Authentication?) Error")
# NOTE(andreaf) Setting up an extra manager here is redundant,
# and should be removed.
openstack = tempest.clients.Manager()
try:
if urlparse.urlparse(CONF.boto.s3_url).hostname is None:
raise Exception("Failed to get hostname from the s3_url")
s3client = openstack.s3_client
try:
s3client.get_bucket("^INVALID*#()@INVALID.")
except exception.BotoServerError as exc:
if exc.status == 403:
_cred_sub_check(s3client.connection_data)
except Exception as exc:
S3_CAN_CONNECT_ERROR = str(exc)
except lib_exc.Unauthorized:
S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
" failed to get them even by keystoneclient"
boto_logger.logger.setLevel(level)
return {'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR}
class BotoExceptionMatcher(object):
STATUS_RE = r'[45]\d\d'
CODE_RE = '.*' # regexp makes sense in group match
def match(self, exc):
""":returns: Returns with an error string if it does not match,
returns with None when it matches.
"""
if not isinstance(exc, exception.BotoServerError):
return "%r not an BotoServerError instance" % exc
LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
if re.match(self.STATUS_RE, str(exc.status)) is None:
return ("Status code (%s) does not match"
"the expected re pattern \"%s\""
% (exc.status, self.STATUS_RE))
if re.match(self.CODE_RE, str(exc.error_code)) is None:
return ("Error code (%s) does not match" +
"the expected re pattern \"%s\"") %\
(exc.error_code, self.CODE_RE)
return None
class ClientError(BotoExceptionMatcher):
STATUS_RE = r'4\d\d'
class ServerError(BotoExceptionMatcher):
STATUS_RE = r'5\d\d'
def _add_matcher_class(error_cls, error_data, base=BotoExceptionMatcher):
"""
Usable for adding an ExceptionMatcher(s) into the exception tree.
The not leaf elements does wildcard match
"""
# in error_code just literal and '.' characters expected
if not isinstance(error_data, six.string_types):
(error_code, status_code) = map(str, error_data)
else:
status_code = None
error_code = error_data
parts = error_code.split('.')
basematch = ""
num_parts = len(parts)
max_index = num_parts - 1
add_cls = error_cls
for i_part in six.moves.xrange(num_parts):
part = parts[i_part]
leaf = i_part == max_index
if not leaf:
match = basematch + part + "[.].*"
else:
match = basematch + part
basematch += part + "[.]"
if not hasattr(add_cls, part):
cls_dict = {"CODE_RE": match}
if leaf and status_code is not None:
cls_dict["STATUS_RE"] = status_code
cls = type(part, (base, ), cls_dict)
setattr(add_cls, part, cls())
add_cls = cls
elif leaf:
raise LookupError("Tries to redefine an error code \"%s\"" % part)
else:
add_cls = getattr(add_cls, part)
# TODO(afazekas): classmethod handling
def friendly_function_name_simple(call_able):
name = ""
if hasattr(call_able, "im_class"):
name += call_able.im_class.__name__ + "."
name += call_able.__name__
return name
def friendly_function_call_str(call_able, *args, **kwargs):
string = friendly_function_name_simple(call_able)
string += "(" + ", ".join(map(str, args))
if len(kwargs):
if len(args):
string += ", "
string += ", ".join("=".join(map(str, (key, value)))
for (key, value) in kwargs.items())
return string + ")"
class BotoTestCase(tempest.test.BaseTestCase):
"""Recommended to use as base class for boto related test."""
credentials = ['primary']
@classmethod
def skip_checks(cls):
super(BotoTestCase, cls).skip_checks()
if not CONF.compute_feature_enabled.ec2_api:
raise cls.skipException("The EC2 API is not available")
if not CONF.identity_feature_enabled.api_v2 or \
not CONF.identity.auth_version == 'v2':
raise cls.skipException("Identity v2 is not available")
@classmethod
def resource_setup(cls):
super(BotoTestCase, cls).resource_setup()
cls.conclusion = decision_maker()
# The trash contains cleanup functions and paramaters in tuples
# (function, *args, **kwargs)
cls._resource_trash_bin = {}
cls._sequence = -1
if (hasattr(cls, "S3") and
cls.conclusion['S3_CAN_CONNECT_ERROR'] is not None):
raise cls.skipException("S3 " + cls.__name__ + ": " +
cls.conclusion['S3_CAN_CONNECT_ERROR'])
@classmethod
def addResourceCleanUp(cls, function, *args, **kwargs):
"""Adds CleanUp callable, used by tearDownClass.
Recommended to a use (deep)copy on the mutable args.
"""
cls._sequence = cls._sequence + 1
cls._resource_trash_bin[cls._sequence] = (function, args, kwargs)
return cls._sequence
@classmethod
def cancelResourceCleanUp(cls, key):
"""Cancel Clean up request."""
del cls._resource_trash_bin[key]
# TODO(afazekas): Add "with" context handling
def assertBotoError(self, excMatcher, callableObj,
*args, **kwargs):
"""Example usage:
self.assertBotoError(self.ec2_error_code.client.
InvalidKeyPair.Duplicate,
self.client.create_keypair,
key_name)
"""
try:
callableObj(*args, **kwargs)
except exception.BotoServerError as exc:
error_msg = excMatcher.match(exc)
if error_msg is not None:
raise self.failureException(error_msg)
else:
raise self.failureException("BotoServerError not raised")
@classmethod
def resource_cleanup(cls):
"""Calls the callables added by addResourceCleanUp,
when you overwrite this function don't forget to call this too.
"""
fail_count = 0
trash_keys = sorted(cls._resource_trash_bin, reverse=True)
for key in trash_keys:
(function, pos_args, kw_args) = cls._resource_trash_bin[key]
try:
func_name = friendly_function_call_str(function, *pos_args,
**kw_args)
LOG.debug("Cleaning up: %s" % func_name)
function(*pos_args, **kw_args)
except BaseException:
fail_count += 1
LOG.exception("Cleanup failed %s" % func_name)
finally:
del cls._resource_trash_bin[key]
super(BotoTestCase, cls).resource_cleanup()
# NOTE(afazekas): let the super called even on exceptions
# The real exceptions already logged, if the super throws another,
# does not causes hidden issues
if fail_count:
raise exceptions.TearDownException(num=fail_count)
s3_error_code = BotoExceptionMatcher()
s3_error_code.server = ServerError()
s3_error_code.client = ClientError()
gone_set = set(('_GONE',))
def assertReSearch(self, regexp, string):
if re.search(regexp, string) is None:
raise self.failureException("regexp: '%s' not found in '%s'" %
(regexp, string))
def assertNotReSearch(self, regexp, string):
if re.search(regexp, string) is not None:
raise self.failureException("regexp: '%s' found in '%s'" %
(regexp, string))
def assertReMatch(self, regexp, string):
if re.match(regexp, string) is None:
raise self.failureException("regexp: '%s' not matches on '%s'" %
(regexp, string))
def assertNotReMatch(self, regexp, string):
if re.match(regexp, string) is not None:
raise self.failureException("regexp: '%s' matches on '%s'" %
(regexp, string))
@classmethod
def destroy_bucket(cls, connection_data, bucket):
"""Destroys the bucket and its content, just for teardown."""
exc_num = 0
try:
with contextlib.closing(
boto.connect_s3(**connection_data)) as conn:
if isinstance(bucket, basestring):
bucket = conn.lookup(bucket)
assert isinstance(bucket, s3.bucket.Bucket)
for obj in bucket.list():
try:
bucket.delete_key(obj.key)
obj.close()
except BaseException:
LOG.exception("Failed to delete key %s " % obj.key)
exc_num += 1
conn.delete_bucket(bucket)
except BaseException:
LOG.exception("Failed to destroy bucket %s " % bucket)
exc_num += 1
if exc_num:
raise exceptions.TearDownException(num=exc_num)
# you can specify tuples if you want to specify the status pattern
for code in (('AccessDenied', 403),
('AccountProblem', 403),
('AmbiguousGrantByEmailAddress', 400),
('BadDigest', 400),
('BucketAlreadyExists', 409),
('BucketAlreadyOwnedByYou', 409),
('BucketNotEmpty', 409),
('CredentialsNotSupported', 400),
('CrossLocationLoggingProhibited', 403),
('EntityTooSmall', 400),
('EntityTooLarge', 400),
('ExpiredToken', 400),
('IllegalVersioningConfigurationException', 400),
('IncompleteBody', 400),
('IncorrectNumberOfFilesInPostRequest', 400),
('InlineDataTooLarge', 400),
('InvalidAccessKeyId', 403),
'InvalidAddressingHeader',
('InvalidArgument', 400),
('InvalidBucketName', 400),
('InvalidBucketState', 409),
('InvalidDigest', 400),
('InvalidLocationConstraint', 400),
('InvalidPart', 400),
('InvalidPartOrder', 400),
('InvalidPayer', 403),
('InvalidPolicyDocument', 400),
('InvalidRange', 416),
('InvalidRequest', 400),
('InvalidSecurity', 403),
('InvalidSOAPRequest', 400),
('InvalidStorageClass', 400),
('InvalidTargetBucketForLogging', 400),
('InvalidToken', 400),
('InvalidURI', 400),
('KeyTooLong', 400),
('MalformedACLError', 400),
('MalformedPOSTRequest', 400),
('MalformedXML', 400),
('MaxMessageLengthExceeded', 400),
('MaxPostPreDataLengthExceededError', 400),
('MetadataTooLarge', 400),
('MethodNotAllowed', 405),
('MissingAttachment'),
('MissingContentLength', 411),
('MissingRequestBodyError', 400),
('MissingSecurityElement', 400),
('MissingSecurityHeader', 400),
('NoLoggingStatusForKey', 400),
('NoSuchBucket', 404),
('NoSuchKey', 404),
('NoSuchLifecycleConfiguration', 404),
('NoSuchUpload', 404),
('NoSuchVersion', 404),
('NotSignedUp', 403),
('NotSuchBucketPolicy', 404),
('OperationAborted', 409),
('PermanentRedirect', 301),
('PreconditionFailed', 412),
('Redirect', 307),
('RequestIsNotMultiPartContent', 400),
('RequestTimeout', 400),
('RequestTimeTooSkewed', 403),
('RequestTorrentOfBucketError', 400),
('SignatureDoesNotMatch', 403),
('TemporaryRedirect', 307),
('TokenRefreshRequired', 400),
('TooManyBuckets', 400),
('UnexpectedContent', 400),
('UnresolvableGrantByEmailAddress', 400),
('UserKeyMustBeSpecified', 400)):
_add_matcher_class(BotoTestCase.s3_error_code.client,
code, base=ClientError)
for code in (('InternalError', 500),
('NotImplemented', 501),
('ServiceUnavailable', 503),
('SlowDown', 503)):
_add_matcher_class(BotoTestCase.s3_error_code.server,
code, base=ServerError)
| apache-2.0 | -9,187,748,402,305,624,000 | 37.498695 | 78 | 0.571448 | false |
pberkes/persistent_locals | test_deco.py | 1 | 2351 | import unittest
import deco
class _TestException(Exception):
pass
@deco.persistent_locals
def _globalfunc(x):
z = 2*x
return z
_a = 2
@deco.persistent_locals
def _globaldependent(x):
z = x + _a
return z
@deco.persistent_locals
def _toberemoved(x):
z = 2*x
return z
class TestPersistLocals(unittest.TestCase):
def test_outer_scope(self):
_globalfunc(2)
self.assertEqual(_globalfunc.locals['x'], 2)
self.assertEqual(_globalfunc.locals['z'], 4)
def test_global_name_removed(self):
global _toberemoved
f = _toberemoved
f(2) # should pass
del _toberemoved
f(2) # might fail if 'f' looks for a global name '_toberemoved'
def test_globals_are_flexible(self):
global _a
self.assertEqual(_globaldependent(2), 4)
_a = 3
self.assertEqual(_globaldependent(2), 5)
def test_inner_scope(self):
@deco.persistent_locals
def is_sum_lt_prod(a,b,c):
sum = a+b+c
prod = a*b*c
return sum<prod
self.assertEqual(is_sum_lt_prod.locals, {})
is_sum_lt_prod(2,3,4)
self.assertEqual(set(is_sum_lt_prod.locals.keys()),
set(['a','b','c','sum','prod']))
self.assertEqual(is_sum_lt_prod.locals['sum'], 2+3+4)
self.assertEqual(is_sum_lt_prod.locals['prod'], 2*3*4)
def test_args(self):
@deco.persistent_locals
def f(x, *args):
return x, args
x, args = f(2,3,4)
self.assertEqual(x, 2)
self.assertEqual(args, (3,4))
self.assertEqual(f.locals['x'], 2)
self.assertEqual(f.locals['args'], (3,4))
def test_exception(self):
@deco.persistent_locals
def f(x):
y = 3
raise _TestException
z = 4 # this local variable is never initialized
self.assertRaises(_TestException, f, 0)
self.assertEqual(f.locals, {'x': 0, 'y': 3})
def test_late_return(self):
def g(a):
return a
@deco.persistent_locals
def f(x):
try:
return x
finally:
g(1)
f(0)
self.assertEqual(f.locals, {'x': 0, 'g': g})
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,609,870,817,239,272,400 | 24.27957 | 71 | 0.535517 | false |
Ecotrust/TEKDB | TEKDB/TEKDB/settings.py | 1 | 5770 | """
Django settings for TEKDB project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lbgg^obk_vnj1o%s-u)vy+6@%=)uk4011d!!vub_5s40(^+mzp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [
'localhost',
u'demo-tekdb.herokuapp.com',
]
# Application definition
INSTALLED_APPS = [
'dal',
'dal_select2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
# 'registration',
'leaflet',
'nested_admin',
'ckeditor',
'explore',
'login',
'TEKDB',
'Lookup',
'Accounts',
'Relationships',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TEKDB.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'login/templates'),
os.path.join(BASE_DIR, 'explore/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TEKDB.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'tekdb',
'USER': 'postgres',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
#Registration
ACCOUNT_ACTIVATION_DAYS = 14
REGISTRATION_OPEN = False
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, "explore", "static"),
# os.path.join(BASE_DIR, "TEKDB", "static"),
# ]
### DJANGO-REGISTRATION SETTINGS ###
REGISTRATION_OPEN = True
SEARCH_CATEGORIES = [
'all',
'places',
'resources',
'activities',
'citations',
'media',
]
#Locality? People?
AUTH_USER_MODEL = 'Accounts.Users'
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Full',
},
'custom': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Format'],
['Bold', 'Italic', 'Underline','Strike','Subscript','Superscript'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock'],
['Link', 'Unlink'],
['Image','Table','HorizontalRule','SpecialChar'],
[ 'TextColor','BGColor' ],
['Undo','Redo'],
['RemoveFormat', 'Source']
]
}
}
RECORD_ICONS = {
'activity': '/static/explore/img/activity.png',
'citation': '/static/explore/img/citation.png',
'place': '/static/explore/img/place.png',
'media': '/static/explore/img/media.png',
'event': '/static/explore/img/activity.png',
'resource': '/static/explore/img/resource.png',
}
# Set this in local_settings.py
DATABASE_GEOGRAPHY = {
'default_lon': -11131949.08,
'default_lat': 4865942.28,
'default_zoom': 3,
'map_template': 'gis/admin/ol2osm.html'
}
ADMIN_SITE_HEADER = 'TEK DB Admin'
from TEKDB.local_settings import *
### HEROKU SETTINGS (NOT FOR PRODUCTION!!!)
### Update database configuration with $DATABASE_URL.
#
# import dj_database_url
# db_from_env = dj_database_url.config(conn_max_age=500)
# DATABASES['default'].update(db_from_env)
#
# STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| mit | -8,495,687,464,848,662,000 | 24.418502 | 140 | 0.64922 | false |
gautelinga/BERNAISE | problems/porous.py | 1 | 10258 | import dolfin as df
import os
from . import *
from common.io import mpi_is_root, load_mesh
from common.bcs import Fixed, Pressure, Charged
from ufl import sign
import numpy as np
__author__ = "Gaute Linga"
class PeriodicBoundary(df.SubDomain):
# Left boundary is target domain
def __init__(self, Ly, grid_spacing):
self.Ly = Ly
self.grid_spacing = grid_spacing
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
return bool(df.near(x[1], -self.Ly/2) and on_boundary)
def map(self, x, y):
y[0] = x[0]
y[1] = x[1] - self.Ly
class Left(df.SubDomain):
def __init__(self, Lx):
self.Lx = Lx
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
return bool(df.near(x[0], -self.Lx/2) and on_boundary)
class Right(df.SubDomain):
def __init__(self, Lx):
self.Lx = Lx
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
return bool(df.near(x[0], self.Lx/2) and on_boundary)
class Obstacles(df.SubDomain):
def __init__(self, Lx, centroids, rad, grid_spacing):
self.Lx = Lx
self.centroids = centroids
self.rad = rad
self.grid_spacing = grid_spacing
df.SubDomain.__init__(self)
def inside(self, x, on_boundary):
dx = self.centroids - np.outer(np.ones(len(self.centroids)), x)
dist = np.sqrt(dx[:, 0]**2 + dx[:, 1]**2)
return bool(on_boundary
and any(dist < self.rad + 0.1*self.grid_spacing))
def problem():
info_cyan("Intrusion of one fluid into another in a porous medium.")
# Define solutes
# Format: name, valency, diffusivity in phase 1, diffusivity in phase
# 2, beta in phase 1, beta in phase 2
solutes = [["c_p", 1, 1e-4, 1e-2, 4., 1.],
["c_m", -1, 1e-4, 1e-2, 4., 1.]]
# Format: name : (family, degree, is_vector)
base_elements = dict(u=["Lagrange", 2, True],
p=["Lagrange", 1, False],
phi=["Lagrange", 1, False],
g=["Lagrange", 1, False],
c=["Lagrange", 1, False],
V=["Lagrange", 1, False])
factor = 1./2.
sigma_e = -1.
# Default parameters to be loaded unless starting from checkpoint.
parameters = dict(
solver="basic",
folder="results_porous",
restart_folder=False,
enable_NS=True,
enable_PF=True,
enable_EC=True,
save_intv=5,
stats_intv=5,
checkpoint_intv=50,
tstep=0,
dt=0.08,
t_0=0.,
T=20.,
grid_spacing=0.05,
interface_thickness=factor*0.060,
solutes=solutes,
base_elements=base_elements,
Lx=4.,
Ly=3.,
rad_init=0.25,
#
surface_tension=24.5,
grav_const=0.0,
# inlet_velocity=0.1,
pressure_left=1000.,
pressure_right=0.,
V_left=0.,
V_right=0.,
surface_charge=sigma_e,
concentration_init=1.,
front_position_init=0.1, # percentage "filled" initially
solutes_in_oil=False,
#
pf_mobility_coeff=factor*0.000040,
density=[1000., 1000.],
viscosity=[100., 10.],
permittivity=[1., 1.],
#
initial_interface="flat",
#
use_iterative_solvers=False,
use_pressure_stabilization=False
)
return parameters
def constrained_domain(Ly, grid_spacing, **namespace):
return PeriodicBoundary(Ly, grid_spacing)
def mesh(Lx=4., Ly=3., grid_spacing=0.04, **namespace):
return load_mesh("meshes/periodic_porous_dx" + str(grid_spacing) + ".h5")
def initialize(Lx, Ly, rad_init,
interface_thickness, solutes, restart_folder,
field_to_subspace, solutes_in_oil, # inlet_velocity,
front_position_init, concentration_init,
pressure_left, pressure_right,
enable_NS, enable_PF, enable_EC, initial_interface,
**namespace):
""" Create the initial state.
The initial states are specified in a dict indexed by field. The format
should be
w_init_field[field] = 'df.Function(...)'.
The work dicts w_ and w_1 are automatically initialized from these
functions elsewhere in the code.
Note: You only need to specify the initial states that are nonzero.
"""
w_init_field = dict()
if not restart_folder:
# if enable_NS:
# try:
# subspace = field_to_subspace["u"].collapse()
# except:
# subspace = field_to_subspace["u"]
# w_init_field["u"] = initial_velocity(0.,
# subspace)
# Phase field
x_0 = -Lx/2 + Lx*front_position_init
if enable_PF:
w_init_field["phi"] = initial_phasefield(
x_0, Ly/2, rad_init, interface_thickness,
field_to_subspace["phi"].collapse(), shape=initial_interface)
if enable_EC:
for solute in solutes:
c_init = initial_phasefield(
x_0, Ly/2, rad_init, interface_thickness,
field_to_subspace[solute[0]].collapse(),
shape=initial_interface)
# Only have ions in phase 1 (phi=1)
if solutes_in_oil:
if bool(solutes[0][4] == solutes[1][4] or
solutes[0][5] == solutes[1][5]):
info_red("Warning! The beta values of the two "
"ions are different; not supported for "
"initialization")
exp_beta = np.exp(-solutes[0][4] + solutes[0][5])
c_init.vector().set_local(
concentration_init*((1-exp_beta)*0.5*(
1. - c_init.vector().get_local()) + exp_beta))
w_init_field[solute[0]] = c_init
else:
c_init.vector().set_local(
concentration_init*0.5*(
1.-c_init.vector().get_local()))
w_init_field[solute[0]] = c_init
return w_init_field
def create_bcs(Lx, Ly, grid_spacing, # inlet_velocity,
concentration_init, solutes,
surface_charge, V_left, V_right,
pressure_left, pressure_right,
enable_NS, enable_PF, enable_EC, **namespace):
""" The boundaries and boundary conditions are defined here. """
data = np.loadtxt("meshes/periodic_porous_dx" + str(grid_spacing) + ".dat")
centroids = data[:, :2]
rad = data[:, 2]
boundaries = dict(
right=[Right(Lx)],
left=[Left(Lx)],
obstacles=[Obstacles(Lx, centroids, rad, grid_spacing)]
)
# Allocating the boundary dicts
bcs = dict()
bcs_pointwise = dict()
for boundary in boundaries:
bcs[boundary] = dict()
# u_inlet = Fixed((inlet_velocity, 0.))
noslip = Fixed((0., 0.))
p_inlet = Pressure(pressure_left)
p_outlet = Pressure(pressure_right)
phi_inlet = Fixed(-1.0)
phi_outlet = Fixed(1.0)
if enable_NS:
# bcs["left"]["u"] = u_inlet
bcs["obstacles"]["u"] = noslip
bcs["right"]["p"] = p_outlet
bcs["left"]["p"] = p_inlet
# bcs_pointwise["p"] = (0., "x[0] < -{Lx}/2+DOLFIN_EPS && x[1] > {Ly}/2-DOLFIN_EPS".format(Lx=Lx, Ly=Ly))
if enable_PF:
bcs["left"]["phi"] = phi_inlet
bcs["right"]["phi"] = phi_outlet
if enable_EC:
for solute in solutes:
bcs["left"][solute[0]] = Fixed(concentration_init)
# bcs["right"][solute[0]] = Fixed(0.)
bcs["left"]["V"] = Fixed(V_left)
bcs["right"]["V"] = Fixed(V_right)
bcs["obstacles"]["V"] = Charged(surface_charge)
return boundaries, bcs, bcs_pointwise
def initial_phasefield(x0, y0, rad, eps, function_space, shape="flat"):
if shape == "flat":
expr_str = "tanh((x[0]-x0)/(sqrt(2)*eps))"
elif shape == "sine":
expr_str = "tanh((x[0]-x0-eps*sin(2*x[1]*pi))/(sqrt(2)*eps))"
elif shape == "circle":
expr_str = ("tanh(sqrt(2)*(sqrt(pow(x[0]-x0,2)" +
"+pow(x[1]-y0,2))-rad)/eps)")
else:
info_red("Unrecognized shape: " + shape)
exit()
phi_init_expr = df.Expression(expr_str, x0=x0, y0=y0, rad=rad,
eps=eps, degree=2)
phi_init = df.interpolate(phi_init_expr, function_space)
return phi_init
def initial_velocity(inlet_velocity, function_space):
#u_init_expr = df.Constant((inlet_velocity, 0.))
u_init_expr = df.Constant((0., 0.))
u_init = df.interpolate(u_init_expr, function_space)
return u_init
def tstep_hook(t, tstep, stats_intv, statsfile, field_to_subspace,
field_to_subproblem, subproblems, w_,
enable_PF,
**namespace):
info_blue("Timestep = {}".format(tstep))
if enable_PF and stats_intv and tstep % stats_intv == 0:
# GL: Seems like a rather awkward way of doing this,
# but any other way seems to fuck up the simulation.
# Anyhow, a better idea could be to move some of this to a post-processing stage.
# GL: Move into common/utilities at a certain point.
subproblem_name, subproblem_i = field_to_subproblem["phi"]
phi = w_[subproblem_name].split(deepcopy=True)[subproblem_i]
bubble = 0.5*(1.-sign(phi))
mass = df.assemble(bubble*df.dx)
massy = df.assemble(
bubble*df.Expression("x[1]", degree=1)*df.dx)
if mpi_is_root():
with open(statsfile, "a") as outfile:
outfile.write("{} {} {} \n".format(t, mass, massy))
def pf_mobility(phi, gamma):
""" Phase field mobility function. """
# return gamma * (phi**2-1.)**2
# func = 1.-phi**2
# return 0.75 * gamma * 0.5 * (1. + df.sign(func)) * func
return gamma
def start_hook(newfolder, **namespace):
statsfile = os.path.join(newfolder, "Statistics/stats.dat")
return dict(statsfile=statsfile)
| mit | 7,689,344,559,994,583,000 | 32.854785 | 113 | 0.540456 | false |
cbertinato/pandas | pandas/tests/test_window.py | 1 | 158948 | from collections import OrderedDict
from datetime import datetime, timedelta
from itertools import product
import warnings
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame, Index, Series, Timestamp, bdate_range, concat, isna, notna)
from pandas.core.base import SpecificationError
from pandas.core.sorting import safe_sort
import pandas.core.window as rwindow
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
N, K = 100, 10
def assert_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
else:
tm.assert_frame_equal(left, right)
@pytest.fixture(params=[True, False])
def raw(request):
return request.param
@pytest.fixture(params=['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann'])
def win_types(request):
return request.param
@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian',
'exponential'])
def win_types_special(request):
return request.param
class Base:
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
class TestApi(Base):
def setup_method(self, method):
self._create_data()
def test_getitem(self):
r = self.frame.rolling(window=5)
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.rolling(window=5)[1]
assert r._selected_obj.name == self.frame.columns[1]
# technically this is allowed
r = self.frame.rolling(window=5)[1, 3]
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[1, 3]])
r = self.frame.rolling(window=5)[[1, 3]]
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[1, 3]])
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
g = df.rolling(window=5)
with pytest.raises(KeyError, match="Columns not found: 'C'"):
g[['C']]
with pytest.raises(KeyError, match='^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'C']]
def test_attribute_access(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
r = df.rolling(window=5)
tm.assert_series_equal(r.A.sum(), r['A'].sum())
msg = "'Rolling' object has no attribute 'F'"
with pytest.raises(AttributeError, match=msg):
r.F
def tests_skip_nuisance(self):
df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'})
r = df.rolling(window=3)
result = r[['A', 'B']].sum()
expected = DataFrame({'A': [np.nan, np.nan, 3, 6, 9],
'B': [np.nan, np.nan, 18, 21, 24]},
columns=list('AB'))
tm.assert_frame_equal(result, expected)
def test_skip_sum_object_raises(self):
df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'})
r = df.rolling(window=3)
with pytest.raises(TypeError, match='cannot handle this type'):
r.sum()
def test_agg(self):
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
a_mean = r['A'].mean()
a_std = r['A'].std()
a_sum = r['A'].sum()
b_mean = r['B'].mean()
b_std = r['B'].std()
b_sum = r['B'].sum()
result = r.aggregate([np.mean, np.std])
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([['A', 'B'], ['mean',
'std']])
tm.assert_frame_equal(result, expected)
result = r.aggregate({'A': np.mean, 'B': np.std})
expected = concat([a_mean, b_std], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({'A': ['mean', 'std']})
expected = concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), ('A',
'std')])
tm.assert_frame_equal(result, expected)
result = r['A'].aggregate(['mean', 'sum'])
expected = concat([a_mean, a_sum], axis=1)
expected.columns = ['mean', 'sum']
tm.assert_frame_equal(result, expected)
with catch_warnings(record=True):
# using a dict with renaming
warnings.simplefilter("ignore", FutureWarning)
result = r.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
expected = concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
tm.assert_frame_equal(result, expected, check_like=True)
with catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
result = r.aggregate({'A': {'mean': 'mean',
'sum': 'sum'},
'B': {'mean2': 'mean',
'sum2': 'sum'}})
expected = concat([a_mean, a_sum, b_mean, b_sum], axis=1)
exp_cols = [('A', 'mean'), ('A', 'sum'), ('B', 'mean2'), ('B', 'sum2')]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({'A': ['mean', 'std'], 'B': ['mean', 'std']})
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
exp_cols = [('A', 'mean'), ('A', 'std'), ('B', 'mean'), ('B', 'std')]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_apply(self, raw):
# passed lambda
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
a_sum = r['A'].sum()
result = r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})
rcustom = r['B'].apply(lambda x: np.std(x, ddof=1), raw=raw)
expected = concat([a_sum, rcustom], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_consistency(self):
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
result = r.agg([np.sum, np.mean]).columns
expected = pd.MultiIndex.from_product([list('AB'), ['sum', 'mean']])
tm.assert_index_equal(result, expected)
result = r['A'].agg([np.sum, np.mean]).columns
expected = Index(['sum', 'mean'])
tm.assert_index_equal(result, expected)
result = r.agg({'A': [np.sum, np.mean]}).columns
expected = pd.MultiIndex.from_tuples([('A', 'sum'), ('A', 'mean')])
tm.assert_index_equal(result, expected)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
msg = r"cannot perform renaming for (r1|r2) with a nested dictionary"
with pytest.raises(SpecificationError, match=msg):
r.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
expected = concat([r['A'].mean(), r['A'].std(),
r['B'].mean(), r['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
with catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
result = r[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
tm.assert_frame_equal(result, expected, check_like=True)
with catch_warnings(record=True):
warnings.simplefilter("ignore", FutureWarning)
result = r.agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
expected.columns = pd.MultiIndex.from_tuples([('A', 'ra', 'mean'), (
'A', 'ra', 'std'), ('B', 'rb', 'mean'), ('B', 'rb', 'std')])
tm.assert_frame_equal(result, expected, check_like=True)
def test_count_nonnumeric_types(self):
# GH12541
cols = ['int', 'float', 'string', 'datetime', 'timedelta', 'periods',
'fl_inf', 'fl_nan', 'str_nan', 'dt_nat', 'periods_nat']
df = DataFrame(
{'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'datetime': pd.date_range('20170101', periods=3),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s'),
'periods': [pd.Period('2012-01'), pd.Period('2012-02'),
pd.Period('2012-03')],
'fl_inf': [1., 2., np.Inf],
'fl_nan': [1., 2., np.NaN],
'str_nan': ['aa', 'bb', np.NaN],
'dt_nat': [Timestamp('20170101'), Timestamp('20170203'),
Timestamp(None)],
'periods_nat': [pd.Period('2012-01'), pd.Period('2012-02'),
pd.Period(None)]},
columns=cols)
expected = DataFrame(
{'int': [1., 2., 2.],
'float': [1., 2., 2.],
'string': [1., 2., 2.],
'datetime': [1., 2., 2.],
'timedelta': [1., 2., 2.],
'periods': [1., 2., 2.],
'fl_inf': [1., 2., 2.],
'fl_nan': [1., 2., 1.],
'str_nan': [1., 2., 1.],
'dt_nat': [1., 2., 1.],
'periods_nat': [1., 2., 1.]},
columns=cols)
result = df.rolling(window=2).count()
tm.assert_frame_equal(result, expected)
result = df.rolling(1).count()
expected = df.notna().astype(float)
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_window_with_args(self):
# make sure that we are aggregating window functions correctly with arg
r = Series(np.random.randn(100)).rolling(window=10, min_periods=1,
win_type='gaussian')
expected = concat([r.mean(std=10), r.mean(std=.01)], axis=1)
expected.columns = ['<lambda>', '<lambda>']
result = r.aggregate([lambda x: x.mean(std=10),
lambda x: x.mean(std=.01)])
tm.assert_frame_equal(result, expected)
def a(x):
return x.mean(std=10)
def b(x):
return x.mean(std=0.01)
expected = concat([r.mean(std=10), r.mean(std=.01)], axis=1)
expected.columns = ['a', 'b']
result = r.aggregate([a, b])
tm.assert_frame_equal(result, expected)
def test_preserve_metadata(self):
# GH 10565
s = Series(np.arange(100), name='foo')
s2 = s.rolling(30).sum()
s3 = s.rolling(20).sum()
assert s2.name == 'foo'
assert s3.name == 'foo'
@pytest.mark.parametrize("func,window_size,expected_vals", [
('rolling', 2, [[np.nan, np.nan, np.nan, np.nan],
[15., 20., 25., 20.],
[25., 30., 35., 30.],
[np.nan, np.nan, np.nan, np.nan],
[20., 30., 35., 30.],
[35., 40., 60., 40.],
[60., 80., 85., 80]]),
('expanding', None, [[10., 10., 20., 20.],
[15., 20., 25., 20.],
[20., 30., 30., 20.],
[10., 10., 30., 30.],
[20., 30., 35., 30.],
[26.666667, 40., 50., 30.],
[40., 80., 60., 30.]])])
def test_multiple_agg_funcs(self, func, window_size, expected_vals):
# GH 15072
df = pd.DataFrame([
['A', 10, 20],
['A', 20, 30],
['A', 30, 40],
['B', 10, 30],
['B', 30, 40],
['B', 40, 80],
['B', 80, 90]], columns=['stock', 'low', 'high'])
f = getattr(df.groupby('stock'), func)
if window_size:
window = f(window_size)
else:
window = f()
index = pd.MultiIndex.from_tuples([
('A', 0), ('A', 1), ('A', 2),
('B', 3), ('B', 4), ('B', 5), ('B', 6)], names=['stock', None])
columns = pd.MultiIndex.from_tuples([
('low', 'mean'), ('low', 'max'), ('high', 'mean'),
('high', 'min')])
expected = pd.DataFrame(expected_vals, index=index, columns=columns)
result = window.agg(OrderedDict((
('low', ['mean', 'max']),
('high', ['mean', 'min']),
)))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestWindow(Base):
def setup_method(self, method):
self._create_data()
@td.skip_if_no_scipy
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
# GH 12669
o = getattr(self, which)
c = o.rolling
# valid
c(win_type='boxcar', window=2, min_periods=1)
c(win_type='boxcar', window=2, min_periods=1, center=True)
c(win_type='boxcar', window=2, min_periods=1, center=False)
# not valid
for w in [2., 'foo', np.array([2])]:
with pytest.raises(ValueError):
c(win_type='boxcar', window=2, min_periods=w)
with pytest.raises(ValueError):
c(win_type='boxcar', window=2, min_periods=1, center=w)
for wt in ['foobar', 1]:
with pytest.raises(ValueError):
c(win_type=wt, window=2)
@td.skip_if_no_scipy
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor_with_win_type(self, which, win_types):
# GH 12669
o = getattr(self, which)
c = o.rolling
c(win_type=win_types, window=2)
@pytest.mark.parametrize(
'method', ['sum', 'mean'])
def test_numpy_compat(self, method):
# see gh-12811
w = rwindow.Window(Series([2, 4, 6]), window=[0, 2])
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(w, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(w, method)(dtype=np.float64)
class TestRolling(Base):
def setup_method(self, method):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.rolling(2).sum()
df.rolling(2, min_periods=1).sum()
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
# GH 12669
o = getattr(self, which)
c = o.rolling
# valid
c(window=2)
c(window=2, min_periods=1)
c(window=2, min_periods=1, center=True)
c(window=2, min_periods=1, center=False)
# GH 13383
with pytest.raises(ValueError):
c(0)
c(-1)
# not valid
for w in [2., 'foo', np.array([2])]:
with pytest.raises(ValueError):
c(window=w)
with pytest.raises(ValueError):
c(window=2, min_periods=w)
with pytest.raises(ValueError):
c(window=2, min_periods=1, center=w)
@td.skip_if_no_scipy
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor_with_win_type(self, which):
# GH 13383
o = getattr(self, which)
c = o.rolling
with pytest.raises(ValueError):
c(-1, win_type='boxcar')
@pytest.mark.parametrize(
'window', [timedelta(days=3), pd.Timedelta(days=3)])
def test_constructor_with_timedelta_window(self, window):
# GH 15440
n = 10
df = DataFrame({'value': np.arange(n)},
index=pd.date_range('2015-12-24', periods=n, freq="D"))
expected_data = np.append([0., 1.], np.arange(3., 27., 3))
result = df.rolling(window=window).sum()
expected = DataFrame({'value': expected_data},
index=pd.date_range('2015-12-24', periods=n,
freq="D"))
tm.assert_frame_equal(result, expected)
expected = df.rolling('3D').sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'window', [timedelta(days=3), pd.Timedelta(days=3), '3D'])
def test_constructor_timedelta_window_and_minperiods(self, window, raw):
# GH 15305
n = 10
df = DataFrame({'value': np.arange(n)},
index=pd.date_range('2017-08-08', periods=n, freq="D"))
expected = DataFrame(
{'value': np.append([np.NaN, 1.], np.arange(3., 27., 3))},
index=pd.date_range('2017-08-08', periods=n, freq="D"))
result_roll_sum = df.rolling(window=window, min_periods=2).sum()
result_roll_generic = df.rolling(window=window,
min_periods=2).apply(sum, raw=raw)
tm.assert_frame_equal(result_roll_sum, expected)
tm.assert_frame_equal(result_roll_generic, expected)
@pytest.mark.parametrize(
'method', ['std', 'mean', 'sum', 'max', 'min', 'var'])
def test_numpy_compat(self, method):
# see gh-12811
r = rwindow.Rolling(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(r, method)(dtype=np.float64)
def test_closed(self):
df = DataFrame({'A': [0, 1, 2, 3, 4]})
# closed only allowed for datetimelike
with pytest.raises(ValueError):
df.rolling(window=3, closed='neither')
@pytest.mark.parametrize("func", ['min', 'max'])
def test_closed_one_entry(self, func):
# GH24718
ser = pd.Series(data=[2], index=pd.date_range('2000', periods=1))
result = getattr(ser.rolling('10D', closed='left'), func)()
tm.assert_series_equal(result, pd.Series([np.nan], index=ser.index))
@pytest.mark.parametrize("func", ['min', 'max'])
def test_closed_one_entry_groupby(self, func):
# GH24718
ser = pd.DataFrame(data={'A': [1, 1, 2], 'B': [3, 2, 1]},
index=pd.date_range('2000', periods=3))
result = getattr(
ser.groupby('A', sort=False)['B'].rolling('10D', closed='left'),
func)()
exp_idx = pd.MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index],
names=('A', None))
expected = pd.Series(data=[np.nan, 3, np.nan], index=exp_idx, name='B')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("input_dtype", ['int', 'float'])
@pytest.mark.parametrize("func,closed,expected", [
('min', 'right', [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]),
('min', 'both', [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]),
('min', 'neither', [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]),
('min', 'left', [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]),
('max', 'right', [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
('max', 'both', [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
('max', 'neither', [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),
('max', 'left', [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8])
])
def test_closed_min_max_datetime(self, input_dtype,
func, closed,
expected):
# see gh-21704
ser = pd.Series(data=np.arange(10).astype(input_dtype),
index=pd.date_range('2000', periods=10))
result = getattr(ser.rolling('3D', closed=closed), func)()
expected = pd.Series(expected, index=ser.index)
tm.assert_series_equal(result, expected)
def test_closed_uneven(self):
# see gh-21704
ser = pd.Series(data=np.arange(10),
index=pd.date_range('2000', periods=10))
# uneven
ser = ser.drop(index=ser.index[[1, 5]])
result = ser.rolling('3D', closed='left').min()
expected = pd.Series([np.nan, 0, 0, 2, 3, 4, 6, 6],
index=ser.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func,closed,expected", [
('min', 'right', [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),
('min', 'both', [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]),
('min', 'neither', [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),
('min', 'left', [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]),
('max', 'right', [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]),
('max', 'both', [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]),
('max', 'neither', [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]),
('max', 'left', [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan])
])
def test_closed_min_max_minp(self, func, closed, expected):
# see gh-21704
ser = pd.Series(data=np.arange(10),
index=pd.date_range('2000', periods=10))
ser[ser.index[-3:]] = np.nan
result = getattr(ser.rolling('3D', min_periods=2, closed=closed),
func)()
expected = pd.Series(expected, index=ser.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("closed,expected", [
('right', [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]),
('both', [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),
('neither', [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),
('left', [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7])
])
def test_closed_median_quantile(self, closed, expected):
# GH 26005
ser = pd.Series(data=np.arange(10),
index=pd.date_range('2000', periods=10))
roll = ser.rolling('3D', closed=closed)
expected = pd.Series(expected, index=ser.index)
result = roll.median()
tm.assert_series_equal(result, expected)
result = roll.quantile(0.5)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('roller', ['1s', 1])
def tests_empty_df_rolling(self, roller):
# GH 15819 Verifies that datetime and integer rolling windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().rolling(roller).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer rolling windows can be applied to
# empty DataFrames with datetime index
expected = DataFrame(index=pd.DatetimeIndex([]))
result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum()
tm.assert_frame_equal(result, expected)
def test_empty_window_median_quantile(self):
# GH 26005
expected = pd.Series([np.nan, np.nan, np.nan])
roll = pd.Series(np.arange(3)).rolling(0)
result = roll.median()
tm.assert_series_equal(result, expected)
result = roll.quantile(0.1)
tm.assert_series_equal(result, expected)
def test_missing_minp_zero(self):
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = pd.Series([np.nan])
result = x.rolling(1, min_periods=0).sum()
expected = pd.Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.rolling(1, min_periods=1).sum()
expected = pd.Series([np.nan])
tm.assert_series_equal(result, expected)
def test_missing_minp_zero_variable(self):
# https://github.com/pandas-dev/pandas/pull/18921
x = pd.Series([np.nan] * 4,
index=pd.DatetimeIndex(['2017-01-01', '2017-01-04',
'2017-01-06', '2017-01-07']))
result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum()
expected = pd.Series(0.0, index=x.index)
tm.assert_series_equal(result, expected)
def test_multi_index_names(self):
# GH 16789, 16825
cols = pd.MultiIndex.from_product([['A', 'B'], ['C', 'D', 'E']],
names=['1', '2'])
df = DataFrame(np.ones((10, 6)), columns=cols)
result = df.rolling(3).cov()
tm.assert_index_equal(result.columns, df.columns)
assert result.index.names == [None, '1', '2']
@pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame])
def test_iter_raises(self, klass):
# https://github.com/pandas-dev/pandas/issues/11704
# Iteration over a Window
obj = klass([1, 2, 3, 4])
with pytest.raises(NotImplementedError):
iter(obj.rolling(2))
def test_rolling_axis_sum(self, axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame({
i: [np.nan] * 2 + [3.0] * 8
for i in range(20)
})
else:
# axis == 1
expected = DataFrame([
[np.nan] * 2 + [3.0] * 18
] * 10)
result = df.rolling(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
def test_rolling_axis_count(self, axis_frame):
# see gh-26055
df = DataFrame({'x': range(3), 'y': range(3)})
axis = df._get_axis_number(axis_frame)
if axis in [0, 'index']:
expected = DataFrame({'x': [1.0, 2.0, 2.0], 'y': [1.0, 2.0, 2.0]})
else:
expected = DataFrame({'x': [1.0, 1.0, 1.0], 'y': [2.0, 2.0, 2.0]})
result = df.rolling(2, axis=axis_frame).count()
tm.assert_frame_equal(result, expected)
class TestExpanding(Base):
def setup_method(self, method):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.expanding(2).sum()
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
# GH 12669
o = getattr(self, which)
c = o.expanding
# valid
c(min_periods=1)
c(min_periods=1, center=True)
c(min_periods=1, center=False)
# not valid
for w in [2., 'foo', np.array([2])]:
with pytest.raises(ValueError):
c(min_periods=w)
with pytest.raises(ValueError):
c(min_periods=1, center=w)
@pytest.mark.parametrize(
'method', ['std', 'mean', 'sum', 'max', 'min', 'var'])
def test_numpy_compat(self, method):
# see gh-12811
e = rwindow.Expanding(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
@pytest.mark.parametrize(
'expander',
[1, pytest.param('ls', marks=pytest.mark.xfail(
reason='GH#16425 expanding with '
'offset not supported'))])
def test_empty_df_expanding(self, expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
# to empty DataFrames with datetime index
expected = DataFrame(index=pd.DatetimeIndex([]))
result = DataFrame(
index=pd.DatetimeIndex([])).expanding(expander).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero(self):
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = pd.Series([np.nan])
result = x.expanding(min_periods=0).sum()
expected = pd.Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.expanding(min_periods=1).sum()
expected = pd.Series([np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame])
def test_iter_raises(self, klass):
# https://github.com/pandas-dev/pandas/issues/11704
# Iteration over a Window
obj = klass([1, 2, 3, 4])
with pytest.raises(NotImplementedError):
iter(obj.expanding(2))
def test_expanding_axis(self, axis_frame):
# see gh-23372.
df = DataFrame(np.ones((10, 20)))
axis = df._get_axis_number(axis_frame)
if axis == 0:
expected = DataFrame({
i: [np.nan] * 2 + [float(j) for j in range(3, 11)]
for i in range(20)
})
else:
# axis == 1
expected = DataFrame([
[np.nan] * 2 + [float(i) for i in range(3, 21)]
] * 10)
result = df.expanding(3, axis=axis_frame).sum()
tm.assert_frame_equal(result, expected)
class TestEWM(Base):
def setup_method(self, method):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
o = getattr(self, which)
c = o.ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
with pytest.raises(ValueError):
c(com=0.5, alpha=0.5)
with pytest.raises(ValueError):
c(span=1.5, halflife=0.75)
with pytest.raises(ValueError):
c(alpha=0.5, span=1.5)
# not valid: com < 0
with pytest.raises(ValueError):
c(com=-0.5)
# not valid: span < 1
with pytest.raises(ValueError):
c(span=0.5)
# not valid: halflife <= 0
with pytest.raises(ValueError):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
for alpha in (-0.5, 1.5):
with pytest.raises(ValueError):
c(alpha=alpha)
@pytest.mark.parametrize(
'method', ['std', 'mean', 'var'])
def test_numpy_compat(self, method):
# see gh-12811
e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# NOTE that these are yielded tests and so _create_data
# is explicitly called.
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
class Dtype:
window = 2
funcs = {
'count': lambda v: v.count(),
'max': lambda v: v.max(),
'min': lambda v: v.min(),
'sum': lambda v: v.sum(),
'mean': lambda v: v.mean(),
'std': lambda v: v.std(),
'var': lambda v: v.var(),
'median': lambda v: v.median()
}
def get_expects(self):
expects = {
'sr1': {
'count': Series([1, 2, 2, 2, 2], dtype='float64'),
'max': Series([np.nan, 1, 2, 3, 4], dtype='float64'),
'min': Series([np.nan, 0, 1, 2, 3], dtype='float64'),
'sum': Series([np.nan, 1, 3, 5, 7], dtype='float64'),
'mean': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64'),
'std': Series([np.nan] + [np.sqrt(.5)] * 4, dtype='float64'),
'var': Series([np.nan, .5, .5, .5, .5], dtype='float64'),
'median': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64')
},
'sr2': {
'count': Series([1, 2, 2, 2, 2], dtype='float64'),
'max': Series([np.nan, 10, 8, 6, 4], dtype='float64'),
'min': Series([np.nan, 8, 6, 4, 2], dtype='float64'),
'sum': Series([np.nan, 18, 14, 10, 6], dtype='float64'),
'mean': Series([np.nan, 9, 7, 5, 3], dtype='float64'),
'std': Series([np.nan] + [np.sqrt(2)] * 4, dtype='float64'),
'var': Series([np.nan, 2, 2, 2, 2], dtype='float64'),
'median': Series([np.nan, 9, 7, 5, 3], dtype='float64')
},
'df': {
'count': DataFrame({0: Series([1, 2, 2, 2, 2]),
1: Series([1, 2, 2, 2, 2])},
dtype='float64'),
'max': DataFrame({0: Series([np.nan, 2, 4, 6, 8]),
1: Series([np.nan, 3, 5, 7, 9])},
dtype='float64'),
'min': DataFrame({0: Series([np.nan, 0, 2, 4, 6]),
1: Series([np.nan, 1, 3, 5, 7])},
dtype='float64'),
'sum': DataFrame({0: Series([np.nan, 2, 6, 10, 14]),
1: Series([np.nan, 4, 8, 12, 16])},
dtype='float64'),
'mean': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),
1: Series([np.nan, 2, 4, 6, 8])},
dtype='float64'),
'std': DataFrame({0: Series([np.nan] + [np.sqrt(2)] * 4),
1: Series([np.nan] + [np.sqrt(2)] * 4)},
dtype='float64'),
'var': DataFrame({0: Series([np.nan, 2, 2, 2, 2]),
1: Series([np.nan, 2, 2, 2, 2])},
dtype='float64'),
'median': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),
1: Series([np.nan, 2, 4, 6, 8])},
dtype='float64'),
}
}
return expects
def _create_dtype_data(self, dtype):
sr1 = Series(np.arange(5), dtype=dtype)
sr2 = Series(np.arange(10, 0, -2), dtype=dtype)
df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype)
data = {
'sr1': sr1,
'sr2': sr2,
'df': df
}
return data
def _create_data(self):
self.data = self._create_dtype_data(self.dtype)
self.expects = self.get_expects()
def test_dtypes(self):
self._create_data()
for f_name, d_name in product(self.funcs.keys(), self.data.keys()):
f = self.funcs[f_name]
d = self.data[d_name]
exp = self.expects[d_name][f_name]
self.check_dtypes(f, f_name, d, d_name, exp)
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
result = f(roll)
tm.assert_almost_equal(result, exp)
class TestDtype_object(Dtype):
dtype = object
class Dtype_integer(Dtype):
pass
class TestDtype_int8(Dtype_integer):
dtype = np.int8
class TestDtype_int16(Dtype_integer):
dtype = np.int16
class TestDtype_int32(Dtype_integer):
dtype = np.int32
class TestDtype_int64(Dtype_integer):
dtype = np.int64
class Dtype_uinteger(Dtype):
pass
class TestDtype_uint8(Dtype_uinteger):
dtype = np.uint8
class TestDtype_uint16(Dtype_uinteger):
dtype = np.uint16
class TestDtype_uint32(Dtype_uinteger):
dtype = np.uint32
class TestDtype_uint64(Dtype_uinteger):
dtype = np.uint64
class Dtype_float(Dtype):
pass
class TestDtype_float16(Dtype_float):
dtype = np.float16
class TestDtype_float32(Dtype_float):
dtype = np.float32
class TestDtype_float64(Dtype_float):
dtype = np.float64
class TestDtype_category(Dtype):
dtype = 'category'
include_df = False
def _create_dtype_data(self, dtype):
sr1 = Series(range(5), dtype=dtype)
sr2 = Series(range(10, 0, -2), dtype=dtype)
data = {
'sr1': sr1,
'sr2': sr2
}
return data
class DatetimeLike(Dtype):
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
if f_name == 'count':
result = f(roll)
tm.assert_almost_equal(result, exp)
else:
# other methods not Implemented ATM
with pytest.raises(NotImplementedError):
f(roll)
class TestDtype_timedelta(DatetimeLike):
dtype = np.dtype('m8[ns]')
class TestDtype_datetime(DatetimeLike):
dtype = np.dtype('M8[ns]')
class TestDtype_datetime64UTC(DatetimeLike):
dtype = 'datetime64[ns, UTC]'
def _create_data(self):
pytest.skip("direct creation of extension dtype "
"datetime64[ns, UTC] is not supported ATM")
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True,
axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True,
axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10)))
.rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self):
self._check_moment_func(np.nansum, name='sum',
zero_min_periods_equal=False)
def test_rolling_count(self):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(counter, name='count', has_min_periods=False,
fill_value=0)
def test_rolling_mean(self):
self._check_moment_func(np.mean, name='mean')
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
result = Series(vals).rolling(5, center=True).mean()
expected = Series([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
result = Series(vals).rolling(5, win_type='boxcar', center=True).mean()
expected = Series([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type='boxcar').mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([])
result = vals.rolling(5, center=True, win_type='boxcar').mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type='boxcar').mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
def test_cmov_window_frame(self):
# Gh 8238
vals = np.array([[12.18, 3.64], [10.18, 9.16], [13.24, 14.61],
[4.51, 8.11], [6.15, 11.44], [9.14, 6.21],
[11.31, 10.67], [2.94, 6.51], [9.42, 8.39], [12.44,
7.34]])
xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [9.252, 9.392],
[8.644, 9.906], [8.87, 10.208], [6.81, 8.588],
[7.792, 8.644], [9.05, 7.824], [np.nan, np.nan
], [np.nan, np.nan]])
# DataFrame
rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).mean()
tm.assert_frame_equal(DataFrame(xp), rs)
# invalid method
with pytest.raises(AttributeError):
(DataFrame(vals).rolling(5, win_type='boxcar', center=True)
.std())
# sum
xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [46.26, 46.96],
[43.22, 49.53], [44.35, 51.04], [34.05, 42.94],
[38.96, 43.22], [45.25, 39.12], [np.nan, np.nan
], [np.nan, np.nan]])
rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).sum()
tm.assert_frame_equal(DataFrame(xp), rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type='boxcar', min_periods=4,
center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xps = {
'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009, 14.03687,
13.8567, 11.81473, np.nan, np.nan],
'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556, 13.33889,
13.38, 12.33667, np.nan, np.nan],
'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,
14.0825, 11.5675, np.nan, np.nan],
'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559, 14.17267,
14.65923, 11.10401, np.nan, np.nan],
'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
14.02156, 15.10512, 10.74574, np.nan, np.nan],
'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671, 14.03559,
15.05657, 10.78514, np.nan, np.nan],
'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607, 14.20036,
14.57726, 11.16988, np.nan, np.nan],
'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,
14.0825, 11.5675, np.nan, np.nan]
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan,
10.63, 14.48])
xps = {
'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,
12.5575, 14.3675, 15.61667, 13.655],
'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345, 9.17869,
12.79607, 14.20036, 15.8706, 13.655],
'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,
12.5575, 14.3675, 15.61667, 13.655],
'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599, 9.1764,
12.83559, 14.17267, 15.90976, 13.655],
'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384, 9.56348,
12.38009, 14.20565, 15.24694, 13.69758],
'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618, 9.16786,
13.02671, 14.03673, 16.08759, 13.65553],
'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667, 10.34667,
12.00556, 13.82125, 14.49429, 13.765],
'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
9.16438, 13.05052, 14.02175, 16.1098, 13.65509]
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
'kaiser': {'beta': 1.},
'gaussian': {'std': 1.},
'general_gaussian': {'power': 2., 'width': 2.},
'exponential': {'tau': 10}}
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xps = {
'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763, 13.89053,
13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589, 11.73161,
13.08516, 12.95111, 12.74577, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161, 12.75129,
12.90702, 12.83757, np.nan, np.nan],
'exponential': [np.nan, np.nan, 9.83364, 11.10472, 11.64551,
12.66138, 12.92379, 12.83770, np.nan, np.nan],
}
xp = Series(xps[win_types_special])
rs = Series(vals).rolling(
5, win_type=win_types_special, center=True).mean(
**kwds[win_types_special])
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
'kaiser': {'beta': 1.},
'gaussian': {'std': 1.},
'general_gaussian': {'power': 2., 'width': 2.},
'slepian': {'width': 0.5},
'exponential': {'tau': 10}}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(
5, win_type=win_types_special, center=True).mean(
**kwds[win_types_special])
tm.assert_series_equal(xp, rs)
def test_rolling_median(self):
self._check_moment_func(np.median, name='median')
def test_rolling_min(self):
self._check_moment_func(np.min, name='min')
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self):
self._check_moment_func(np.max, name='max')
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@pytest.mark.parametrize('q', [0.0, .1, .5, .9, 1.0])
def test_rolling_quantile(self, q):
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1. * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = float(idx) / float(values.shape[0] - 1)
qhig = float(idx + 1) / float(values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
def quantile_func(x):
return scoreatpercentile(x, q)
self._check_moment_func(quantile_func, name='quantile',
quantile=q)
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
row = 10
col = 5
idx = pd.date_range('20100101', periods=row, freq='B')
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize('quantile', [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize('interpolation', ['linear', 'lower', 'higher',
'nearest', 'midpoint'])
@pytest.mark.parametrize('data', [[1., 2., 3., 4., 5., 6., 7.],
[8., 1., 3., 4., 5., 2., 6., 7.],
[0., np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5], [np.nan, 0.7, 0.6]])
def test_rolling_quantile_interpolation_options(self, quantile,
interpolation, data):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(
quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value(self):
data = np.arange(5)
s = Series(data)
with pytest.raises(ValueError, match="Interpolation 'invalid'"
" is not supported"):
s.rolling(len(data), min_periods=1).quantile(
0.5, interpolation='invalid')
def test_rolling_quantile_param(self):
ser = Series([0.0, .1, .5, .9, 1.0])
with pytest.raises(ValueError):
ser.rolling(3).quantile(-0.1)
with pytest.raises(ValueError):
ser.rolling(3).quantile(10.0)
with pytest.raises(TypeError):
ser.rolling(3).quantile('foo')
def test_rolling_apply(self, raw):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
def f(x):
return x[np.isfinite(x)].mean()
self._check_moment_func(np.mean, name='apply', func=f, raw=raw)
expected = Series([])
result = expected.rolling(10).apply(lambda x: x.mean(), raw=raw)
tm.assert_series_equal(result, expected)
# gh-8080
s = Series([None, None, None])
result = s.rolling(2, min_periods=0).apply(lambda x: len(x), raw=raw)
expected = Series([1., 2., 2.])
tm.assert_series_equal(result, expected)
result = s.rolling(2, min_periods=0).apply(len, raw=raw)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('klass', [Series, DataFrame])
@pytest.mark.parametrize(
'method', [lambda x: x.rolling(window=2), lambda x: x.expanding()])
def test_apply_future_warning(self, klass, method):
# gh-5071
s = klass(np.arange(3))
with tm.assert_produces_warning(FutureWarning):
method(s).apply(lambda x: len(x))
def test_rolling_apply_out_of_bounds(self, raw):
# gh-1850
vals = pd.Series([1, 2, 3, 4])
result = vals.rolling(10).apply(np.sum, raw=raw)
assert result.isna().all()
result = vals.rolling(10, min_periods=1).apply(np.sum, raw=raw)
expected = pd.Series([1, 3, 6, 10], dtype=float)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('window', [2, '2s'])
def test_rolling_apply_with_pandas_objects(self, window):
# 5071
df = pd.DataFrame({'A': np.random.randn(5),
'B': np.random.randint(0, 10, size=5)},
index=pd.date_range('20130101', periods=5, freq='s'))
# we have an equal spaced timeseries index
# so simulate removing the first period
def f(x):
if x.index[0] == df.index[0]:
return np.nan
return x.iloc[-1]
result = df.rolling(window).apply(f, raw=False)
expected = df.iloc[2:].reindex_like(df)
tm.assert_frame_equal(result, expected)
with pytest.raises(AttributeError):
df.rolling(window).apply(f, raw=True)
def test_rolling_std(self):
self._check_moment_func(lambda x: np.std(x, ddof=1),
name='std')
self._check_moment_func(lambda x: np.std(x, ddof=0),
name='std', ddof=0)
def test_rolling_std_1obs(self):
vals = pd.Series([1., 2., 3., 4., 5.])
result = vals.rolling(1, min_periods=1).std()
expected = pd.Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = pd.Series([0.] * 5)
tm.assert_series_equal(result, expected)
result = (pd.Series([np.nan, np.nan, 3, 4, 5])
.rolling(3, min_periods=2).std())
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = pd.Series([0.0011448196318903589, 0.00028718669878572767,
0.00028718669878572767, 0.00028718669878572767,
0.00028718669878572767])
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
def test_rolling_var(self):
self._check_moment_func(lambda x: np.var(x, ddof=1),
name='var')
self._check_moment_func(lambda x: np.var(x, ddof=0),
name='var', ddof=0)
@td.skip_if_no_scipy
def test_rolling_skew(self):
from scipy.stats import skew
self._check_moment_func(lambda x: skew(x, bias=False), name='skew')
@td.skip_if_no_scipy
def test_rolling_kurt(self):
from scipy.stats import kurtosis
self._check_moment_func(lambda x: kurtosis(x, bias=False),
name='kurt')
def _check_moment_func(self, static_comp, name, has_min_periods=True,
has_center=True, has_time_rule=True,
fill_value=None, zero_min_periods_equal=True,
**kwargs):
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods,
center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(self.series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1],
static_comp(self.series[-50:]))
frame_result = get_result(self.frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
series = self.series[::2].resample('B').mean()
frame = self.frame[::2].resample('B').mean()
if has_min_periods:
series_result = get_result(series, window=win,
min_periods=minp)
frame_result = get_result(frame, window=win,
min_periods=minp)
else:
series_result = get_result(series, window=win)
frame_result = get_result(frame, window=win)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1],
static_comp(trunc_series))
tm.assert_series_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(self.series) - 1, len(self.series)):
result = get_result(self.series, len(self.series) + 1,
min_periods=minp)
expected = get_result(self.series, len(self.series),
min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask],
expected[nan_mask])
else:
result = get_result(self.series, len(self.series) + 1)
expected = get_result(self.series, len(self.series))
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20,
min_periods=15)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]),
20)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = ['x%d' % x for x in range(12)]
if has_min_periods:
minp = 10
series_xp = get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp).shift(-12).reindex(self.series.index)
frame_xp = get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = get_result(self.series, window=25,
min_periods=minp, center=True)
frame_rs = get_result(self.frame, window=25, min_periods=minp,
center=True)
else:
series_xp = get_result(
self.series.reindex(list(self.series.index) + s),
window=25).shift(-12).reindex(self.series.index)
frame_xp = get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25).shift(-12).reindex(self.frame.index)
series_rs = get_result(self.series, window=25, center=True)
frame_rs = get_result(self.frame, window=25, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def test_ewma(self):
self._check_ew(name='mean')
vals = pd.Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize('adjust', [True, False])
@pytest.mark.parametrize('ignore_na', [True, False])
def test_ewma_cases(self, adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.] + [np.nan] * 5 + [1.])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.] * len(s)))
s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.] * 4))
# GH 7603
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1. - alpha), 1.]),
(s0, True, True, [np.nan, (1. - alpha), 1.]),
(s0, False, False, [np.nan, (1. - alpha), alpha]),
(s0, False, True, [np.nan, (1. - alpha), alpha]),
(s1, True, False, [(1. - alpha) ** 2, np.nan, 1.]),
(s1, True, True, [(1. - alpha), np.nan, 1.]),
(s1, False, False, [(1. - alpha) ** 2, np.nan, alpha]),
(s1, False, True, [(1. - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1. - alpha) **
3, np.nan, np.nan, 1., np.nan]),
(s2, True, True, [np.nan, (1. - alpha),
np.nan, np.nan, 1., np.nan]),
(s2, False, False, [np.nan, (1. - alpha) **
3, np.nan, np.nan, alpha, np.nan]),
(s2, False, True, [np.nan, (1. - alpha),
np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1. - alpha) **
3, np.nan, (1. - alpha), 1.]),
(s3, True, True, [(1. - alpha) **
2, np.nan, (1. - alpha), 1.]),
(s3, False, False, [(1. - alpha) ** 3, np.nan,
(1. - alpha) * alpha,
alpha * ((1. - alpha) ** 2 + alpha)]),
(s3, False, True, [(1. - alpha) ** 2,
np.nan, (1. - alpha) * alpha, alpha])]:
expected = simple_wma(s, Series(w))
result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=com, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(name='var')
def test_ewmvol(self):
self._check_ew(name='vol')
def test_ewma_span_com_args(self):
A = self.series.ewm(com=9.5).mean()
B = self.series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20)
with pytest.raises(ValueError):
self.series.ewm().mean()
def test_ewma_halflife_arg(self):
A = self.series.ewm(com=13.932726172912965).mean()
B = self.series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm()
def test_ewm_alpha(self):
# GH 10789
s = Series(self.arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_alpha_arg(self):
# GH 10789
s = self.series
with pytest.raises(ValueError):
s.ewm()
with pytest.raises(ValueError):
s.ewm(com=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(span=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(halflife=10.0, alpha=0.5)
def test_ewm_domain_checks(self):
# GH 12492
s = Series(self.arr)
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
s.ewm(com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(span=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.0)
with pytest.raises(ValueError, match=msg):
s.ewm(span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(halflife=0.0)
s.ewm(halflife=0.1)
msg = "alpha must satisfy: 0 < alpha <= 1"
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=-0.1)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
with pytest.raises(ValueError, match=msg):
s.ewm(alpha=1.1)
@pytest.mark.parametrize('method', ['mean', 'vol', 'var'])
def test_ew_empty_series(self, method):
vals = pd.Series([], dtype=np.float64)
ewm = vals.ewm(3)
result = getattr(ewm, method)()
tm.assert_almost_equal(result, vals)
def _check_ew(self, name=None, preserve_nan=False):
series_result = getattr(self.series.ewm(com=10), name)()
assert isinstance(series_result, Series)
frame_result = getattr(self.frame.ewm(com=10), name)()
assert type(frame_result) == DataFrame
result = getattr(self.series.ewm(com=10), name)()
if preserve_nan:
assert result[self._nan_locs].isna().all()
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = getattr(s.ewm(com=50, min_periods=2), name)()
assert result[:11].isna().all()
assert not result[11:].isna().any()
for min_periods in (0, 1):
result = getattr(s.ewm(com=50, min_periods=min_periods), name)()
if name == 'mean':
assert result[:10].isna().all()
assert not result[10:].isna().any()
else:
# ewm.std, ewm.vol, ewm.var (with bias=False) require at least
# two values
assert result[:11].isna().all()
assert not result[11:].isna().any()
# check series of length 0
result = getattr(Series().ewm(com=50, min_periods=min_periods),
name)()
tm.assert_series_equal(result, Series())
# check series of length 1
result = getattr(Series([1.]).ewm(50, min_periods=min_periods),
name)()
if name == 'mean':
tm.assert_series_equal(result, Series([1.]))
else:
# ewm.std, ewm.vol, ewm.var with bias=False require at least
# two values
tm.assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = getattr(Series(np.arange(50)).ewm(span=10), name)()
assert result2.dtype == np.float_
class TestPairwise:
# GH 7738
df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]],
columns=['C', 'C']),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1., 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0., 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=['C', 1]),
DataFrame([[2., 4.], [1., 2.], [5., 2.], [8., 1.]],
columns=[1, 0.]),
DataFrame([[2, 4.], [1, 2.], [5, 2.], [8, 1.]],
columns=[0, 1.]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.]],
columns=[1., 'X']), ]
df2 = DataFrame([[None, 1, 1], [None, 1, 2],
[None, 3, 2], [None, 8, 1]], columns=['Y', 'Z', 'X'])
s = Series([1, 1, 3, 8])
def compare(self, result, expected):
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize('f', [lambda x: x.cov(), lambda x: x.corr()])
def test_no_flex(self, f):
# DataFrame methods (which do not call _flex_binary_moment())
results = [f(df) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.columns)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x: x.expanding().cov(pairwise=True),
lambda x: x.expanding().corr(pairwise=True),
lambda x: x.rolling(window=3).cov(pairwise=True),
lambda x: x.rolling(window=3).corr(pairwise=True),
lambda x: x.ewm(com=3).cov(pairwise=True),
lambda x: x.ewm(com=3).corr(pairwise=True)])
def test_pairwise_with_self(self, f):
# DataFrame with itself, pairwise=True
# note that we may construct the 1st level of the MI
# in a non-monotonic way, so compare accordingly
results = []
for i, df in enumerate(self.df1s):
result = f(df)
tm.assert_index_equal(result.index.levels[0],
df.index,
check_names=False)
tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]),
safe_sort(df.columns.unique()))
tm.assert_index_equal(result.columns, df.columns)
results.append(df)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x: x.expanding().cov(pairwise=False),
lambda x: x.expanding().corr(pairwise=False),
lambda x: x.rolling(window=3).cov(pairwise=False),
lambda x: x.rolling(window=3).corr(pairwise=False),
lambda x: x.ewm(com=3).cov(pairwise=False),
lambda x: x.ewm(com=3).corr(pairwise=False), ])
def test_no_pairwise_with_self(self, f):
# DataFrame with itself, pairwise=False
results = [f(df) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.index)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x, y: x.expanding().cov(y, pairwise=True),
lambda x, y: x.expanding().corr(y, pairwise=True),
lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
lambda x, y: x.ewm(com=3).corr(y, pairwise=True), ])
def test_pairwise_with_other(self, f):
# DataFrame with another DataFrame, pairwise=True
results = [f(df, self.df2) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index.levels[0],
df.index,
check_names=False)
tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]),
safe_sort(self.df2.columns.unique()))
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x, y: x.expanding().cov(y, pairwise=False),
lambda x, y: x.expanding().corr(y, pairwise=False),
lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
lambda x, y: x.ewm(com=3).corr(y, pairwise=False), ])
def test_no_pairwise_with_other(self, f):
# DataFrame with another DataFrame, pairwise=False
results = [f(df, self.df2) if df.columns.is_unique else None
for df in self.df1s]
for (df, result) in zip(self.df1s, results):
if result is not None:
with catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
# we can have int and str columns
expected_index = df.index.union(self.df2.index)
expected_columns = df.columns.union(self.df2.columns)
tm.assert_index_equal(result.index, expected_index)
tm.assert_index_equal(result.columns, expected_columns)
else:
with pytest.raises(ValueError,
match="'arg1' columns are not unique"):
f(df, self.df2)
with pytest.raises(ValueError,
match="'arg2' columns are not unique"):
f(self.df2, df)
@pytest.mark.parametrize(
'f', [lambda x, y: x.expanding().cov(y),
lambda x, y: x.expanding().corr(y),
lambda x, y: x.rolling(window=3).cov(y),
lambda x, y: x.rolling(window=3).corr(y),
lambda x, y: x.ewm(com=3).cov(y),
lambda x, y: x.ewm(com=3).corr(y), ])
def test_pairwise_with_series(self, f):
# DataFrame with a Series
results = ([f(df, self.s) for df in self.df1s] +
[f(self.s, df) for df in self.df1s])
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.index)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [Series(),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.]),
Series([np.nan, 3.]),
Series([3., np.nan]),
Series([1., 3.]),
Series([2., 2.]),
Series([3., 1.]),
Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan,
np.nan]),
Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5.,
np.nan, np.nan]),
Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5.,
np.nan, np.nan]),
Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,
12., 13., 14., 15.]),
Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,
12., 13., 14., 15.]),
Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,
12., 13., 14., 15.]),
Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,
12., 13., 14., 15.]),
Series(range(10)),
Series(range(20, 0, -2)), ]
def create_dataframes():
return ([DataFrame(),
DataFrame(columns=['a']),
DataFrame(columns=['a', 'a']),
DataFrame(columns=['a', 'b']),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)),
columns=['a', 'b', 99, 'd', 'd'])] +
[DataFrame(s) for s in create_series()])
def is_constant(x):
values = x.values.ravel()
return len(set(values[notna(values)])) == 1
def no_nans(x):
return x.notna().all().all()
# data is a tuple(object, is_constant, no_nans)
data = create_series() + create_dataframes()
return [(x, is_constant(x), no_nans(x)) for x in data]
_consistency_data = _create_consistency_data()
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
class TestMomentsConsistency(Base):
base_functions = [
(lambda v: Series(v).count(), None, 'count'),
(lambda v: Series(v).max(), None, 'max'),
(lambda v: Series(v).min(), None, 'min'),
(lambda v: Series(v).sum(), None, 'sum'),
(lambda v: Series(v).mean(), None, 'mean'),
(lambda v: Series(v).std(), 1, 'std'),
(lambda v: Series(v).cov(Series(v)), None, 'cov'),
(lambda v: Series(v).corr(Series(v)), None, 'corr'),
(lambda v: Series(v).var(), 1, 'var'),
# restore once GH 8086 is fixed
# lambda v: Series(v).skew(), 3, 'skew'),
# (lambda v: Series(v).kurt(), 4, 'kurt'),
# restore once GH 8084 is fixed
# lambda v: Series(v).quantile(0.3), None, 'quantile'),
(lambda v: Series(v).median(), None, 'median'),
(np.nanmax, 1, 'max'),
(np.nanmin, 1, 'min'),
(np.nansum, 1, 'sum'),
(np.nanmean, 1, 'mean'),
(lambda v: np.nanstd(v, ddof=1), 1, 'std'),
(lambda v: np.nanvar(v, ddof=1), 1, 'var'),
(np.nanmedian, 1, 'median'),
]
no_nan_functions = [
(np.max, None, 'max'),
(np.min, None, 'min'),
(np.sum, None, 'sum'),
(np.mean, None, 'mean'),
(lambda v: np.std(v, ddof=1), 1, 'std'),
(lambda v: np.var(v, ddof=1), 1, 'var'),
(np.median, None, 'median'),
]
def _create_data(self):
super()._create_data()
self.data = _consistency_data
def setup_method(self, method):
self._create_data()
def _test_moments_consistency(self, min_periods, count, mean, mock_mean,
corr, var_unbiased=None, std_unbiased=None,
cov_unbiased=None, var_biased=None,
std_biased=None, cov_biased=None,
var_debiasing_factors=None):
def _non_null_values(x):
values = x.values.ravel()
return set(values[notna(values)].tolist())
for (x, is_constant, no_nans) in self.data:
count_x = count(x)
mean_x = mean(x)
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
assert_equal(mean_x, expected.astype('float64'))
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
# assert _non_null_values(corr_x_x).issubset(set([1.]))
# restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
assert_equal(corr_x_x, expected)
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
assert_equal(var_unbiased_x, var_biased_x *
var_debiasing_factors_x)
for (std, var, cov) in [(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased)
]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
if cov:
cov_x_x = cov(x, x)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.
if var is var_unbiased:
expected[count_x < 2] = np.nan
assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isna().equals(y.isna()):
# can only easily test two Series with similar
# structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
assert_equal(cov_x_y, 0.5 *
(var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
std_y = std(y)
assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y -
(mean_x * mean_y))
@pytest.mark.slow
@pytest.mark.parametrize('min_periods', [0, 1, 2, 3, 4])
@pytest.mark.parametrize('adjust', [True, False])
@pytest.mark.parametrize('ignore_na', [True, False])
def test_ewm_consistency(self, min_periods, adjust, ignore_na):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
return DataFrame(index=s.index, columns=s.columns)
w = concat([
_weights(s.iloc[:, i], com=com, adjust=adjust,
ignore_na=ignore_na)
for i, _ in enumerate(s.columns)], axis=1)
w.index = s.index
w.columns = s.columns
return w
w = Series(np.nan, index=s.index)
alpha = 1. / (1. + com)
if ignore_na:
w[s.notna()] = _weights(s[s.notna()], com=com,
adjust=adjust, ignore_na=False)
elif adjust:
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1. / (1. - alpha), i)
else:
sum_wts = 0.
prev_i = -1
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.
else:
w.iat[i] = alpha * sum_wts / pow(1. - alpha,
i - prev_i)
sum_wts += w.iat[i]
prev_i = i
return w
def _variance_debiasing_factors(s, com, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method='ffill')
cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.] = np.nan
return numerator / denominator
def _ewma(s, com, min_periods, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
result = s.multiply(weights).cumsum().divide(weights.cumsum(
)).fillna(method='ffill')
result[s.expanding().count() < (max(min_periods, 1) if min_periods
else 1)] = np.nan
return result
com = 3.
# test consistency between different ewm* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: x.expanding().count(),
mean=lambda x: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).mean(),
mock_mean=lambda x: _ewma(x, com=com,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na),
corr=lambda x, y: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).corr(y),
var_unbiased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).var(bias=False)),
std_unbiased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.std(bias=False)),
cov_unbiased=lambda x, y: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.cov(y, bias=False)),
var_biased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.var(bias=True)),
std_biased=lambda x: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).std(bias=True),
cov_biased=lambda x, y: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.cov(y, bias=True)),
var_debiasing_factors=lambda x: (
_variance_debiasing_factors(x, com=com, adjust=adjust,
ignore_na=ignore_na)))
@pytest.mark.slow
@pytest.mark.parametrize(
'min_periods', [0, 1, 2, 3, 4])
def test_expanding_consistency(self, min_periods):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
# test consistency between different expanding_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: x.expanding().count(),
mean=lambda x: x.expanding(
min_periods=min_periods).mean(),
mock_mean=lambda x: x.expanding(
min_periods=min_periods).sum() / x.expanding().count(),
corr=lambda x, y: x.expanding(
min_periods=min_periods).corr(y),
var_unbiased=lambda x: x.expanding(
min_periods=min_periods).var(),
std_unbiased=lambda x: x.expanding(
min_periods=min_periods).std(),
cov_unbiased=lambda x, y: x.expanding(
min_periods=min_periods).cov(y),
var_biased=lambda x: x.expanding(
min_periods=min_periods).var(ddof=0),
std_biased=lambda x: x.expanding(
min_periods=min_periods).std(ddof=0),
cov_biased=lambda x, y: x.expanding(
min_periods=min_periods).cov(y, ddof=0),
var_debiasing_factors=lambda x: (
x.expanding().count() /
(x.expanding().count() - 1.)
.replace(0., np.nan)))
# test consistency between expanding_xyz() and either (a)
# expanding_apply of Series.xyz(), or (b) expanding_apply of
# np.nanxyz()
for (x, is_constant, no_nans) in self.data:
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
expanding_f = getattr(
x.expanding(min_periods=min_periods), name)
if (require_min_periods and
(min_periods is not None) and
(min_periods < require_min_periods)):
continue
if name == 'count':
expanding_f_result = expanding_f()
expanding_apply_f_result = x.expanding(
min_periods=0).apply(func=f, raw=True)
else:
if name in ['cov', 'corr']:
expanding_f_result = expanding_f(
pairwise=False)
else:
expanding_f_result = expanding_f()
expanding_apply_f_result = x.expanding(
min_periods=min_periods).apply(func=f, raw=True)
# GH 9422
if name in ['sum', 'prod']:
assert_equal(expanding_f_result,
expanding_apply_f_result)
@pytest.mark.slow
@pytest.mark.parametrize(
'window,min_periods,center', list(_rolling_consistency_cases()))
def test_rolling_consistency(self, window, min_periods, center):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
# test consistency between different rolling_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: (
x.rolling(window=window, center=center)
.count()),
mean=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).mean()),
mock_mean=lambda x: (
x.rolling(window=window,
min_periods=min_periods,
center=center).sum()
.divide(x.rolling(window=window,
min_periods=min_periods,
center=center).count())),
corr=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).corr(y)),
var_unbiased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).var()),
std_unbiased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).std()),
cov_unbiased=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).cov(y)),
var_biased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).var(ddof=0)),
std_biased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).std(ddof=0)),
cov_biased=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).cov(y, ddof=0)),
var_debiasing_factors=lambda x: (
x.rolling(window=window, center=center).count()
.divide((x.rolling(window=window, center=center)
.count() - 1.)
.replace(0., np.nan))))
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
for (x, is_constant, no_nans) in self.data:
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center,
min_periods=min_periods), name)
if require_min_periods and (
min_periods is not None) and (
min_periods < require_min_periods):
continue
if name == 'count':
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=0,
center=center).apply(func=f, raw=True)
else:
if name in ['cov', 'corr']:
rolling_f_result = rolling_f(
pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods,
center=center).apply(func=f, raw=True)
# GH 9422
if name in ['sum', 'prod']:
assert_equal(rolling_f_result,
rolling_apply_f_result)
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_cov_pairwise(self):
self._check_pairwise_moment('rolling', 'cov', window=10, min_periods=5)
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
def test_rolling_corr_pairwise(self):
self._check_pairwise_moment('rolling', 'corr', window=10,
min_periods=5)
@pytest.mark.parametrize('window', range(7))
def test_rolling_corr_with_zero_variance(self, window):
# GH 18430
s = pd.Series(np.zeros(20))
other = pd.Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def _check_pairwise_moment(self, dispatch, name, **kwargs):
def get_result(obj, obj2=None):
return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)
result = get_result(self.frame)
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = get_result(self.frame[1], self.frame[5])
tm.assert_series_equal(result, expected, check_names=False)
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
msg = ("arguments to moment function must be of type"
" np.ndarray/Series/DataFrame")
with pytest.raises(TypeError, match=msg):
rwindow._flex_binary_moment(5, 6, None)
def test_corr_sanity(self):
# GH 3155
df = DataFrame(np.array(
[[0.87024726, 0.18505595], [0.64355431, 0.3091617],
[0.92372966, 0.50552513], [0.00203756, 0.04520709],
[0.84780328, 0.33394331], [0.78369152, 0.63919667]]))
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
@pytest.mark.parametrize('method', ['corr', 'cov'])
def test_flex_binary_frame(self, method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(
series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame({k: getattr(self.frame[k].rolling(
window=10), method)(frame2[k]) for k in self.frame})
tm.assert_frame_equal(res3, exp)
def test_ewmcov(self):
self._check_binary_ew('cov')
def test_ewmcov_pairwise(self):
self._check_pairwise_moment('ewm', 'cov', span=10, min_periods=5)
def test_ewmcorr(self):
self._check_binary_ew('corr')
def test_ewmcorr_pairwise(self):
self._check_pairwise_moment('ewm', 'corr', span=10, min_periods=5)
def _check_binary_ew(self, name):
def func(A, B, com, **kwargs):
return getattr(A.ewm(com, **kwargs), name)(B)
A = Series(randn(50), index=np.arange(50))
B = A[2:] + randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([]))
# check series of length 1
result = func(
Series([1.]), Series([1.]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([np.NaN]))
msg = "Input arrays must be of the same type!"
# exception raised is Exception
with pytest.raises(Exception, match=msg):
func(A, randn(50), 20, min_periods=5)
def test_expanding_apply_args_kwargs(self, raw):
def mean_w_arg(x, const):
return np.mean(x) + const
df = DataFrame(np.random.rand(20, 3))
expected = df.expanding().apply(np.mean, raw=raw) + 20.
result = df.expanding().apply(mean_w_arg,
raw=raw,
args=(20, ))
tm.assert_frame_equal(result, expected)
result = df.expanding().apply(mean_w_arg,
raw=raw,
kwargs={'const': 20})
tm.assert_frame_equal(result, expected)
def test_expanding_corr(self):
A = self.series.dropna()
B = (A + randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
result = self.series.expanding().count()
tm.assert_almost_equal(result, self.series.rolling(
window=len(self.series)).count())
def test_expanding_quantile(self):
result = self.series.expanding().quantile(0.5)
rolling_result = self.series.rolling(window=len(self.series),
min_periods=1).quantile(0.5)
tm.assert_almost_equal(result, rolling_result)
def test_expanding_cov(self):
A = self.series
B = (A + randn(len(A)))[:-5]
result = A.expanding().cov(B)
rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_cov_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_cov_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().cov(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().cov(s2)
expected = Series([None, None, None, 4.5])
tm.assert_series_equal(result, expected)
def test_expanding_corr_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().corr(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().corr(s2)
expected = Series([None, None, None, 1.])
tm.assert_series_equal(result, expected)
def test_rolling_cov_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'f',
[
lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=False)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=False)),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(
window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=True),
lambda x: x.rolling(win_type='boxcar',
window=10, min_periods=5).mean()])
def test_rolling_functions_window_non_shrinkage(self, f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=['A', 'B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
except (ImportError):
# scipy needed for rolling_window
pytest.skip("scipy not available")
def test_rolling_functions_window_non_shrinkage_binary(self):
# corr/cov return a MI DataFrame
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(['A', 'B'], name='foo'),
index=Index(range(4), name='bar'))
df_expected = DataFrame(
columns=Index(['A', 'B'], name='foo'),
index=pd.MultiIndex.from_product([df.index, df.columns],
names=['bar', 'foo']),
dtype='float64')
functions = [lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True))]
for f in functions:
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_moment_functions_zero_length(self):
# GH 8056
s = Series()
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=['a'])
df2['a'] = df2['a'].astype('float64')
df2_expected = df2
functions = [lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(
x, pairwise=False),
lambda x: x.expanding(min_periods=5).corr(
x, pairwise=False),
lambda x: x.expanding(min_periods=5).max(),
lambda x: x.expanding(min_periods=5).min(),
lambda x: x.expanding(min_periods=5).sum(),
lambda x: x.expanding(min_periods=5).mean(),
lambda x: x.expanding(min_periods=5).std(),
lambda x: x.expanding(min_periods=5).var(),
lambda x: x.expanding(min_periods=5).skew(),
lambda x: x.expanding(min_periods=5).kurt(),
lambda x: x.expanding(min_periods=5).quantile(0.5),
lambda x: x.expanding(min_periods=5).median(),
lambda x: x.expanding(min_periods=5).apply(
sum, raw=False),
lambda x: x.expanding(min_periods=5).apply(
sum, raw=True),
lambda x: x.rolling(window=10).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(
x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(
x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(
window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=True),
lambda x: x.rolling(win_type='boxcar',
window=10, min_periods=5).mean(),
]
for f in functions:
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
def test_moment_functions_zero_length_pairwise(self):
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=Index(['a'], name='foo'),
index=Index([], name='bar'))
df2['a'] = df2['a'].astype('float64')
df1_expected = DataFrame(
index=pd.MultiIndex.from_product([df1.index, df1.columns]),
columns=Index([]))
df2_expected = DataFrame(
index=pd.MultiIndex.from_product([df2.index, df2.columns],
names=['bar', 'foo']),
columns=Index(['a'], name='foo'),
dtype='float64')
functions = [lambda x: (x.expanding(min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5)
.corr(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True)),
]
for f in functions:
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1, 5], [3, 2], [3, 9]],
columns=Index(['A', 'B'], name='foo'))
df1a = DataFrame([[1, 5], [3, 9]],
index=[0, 2],
columns=Index(['A', 'B'], name='foo'))
df2 = DataFrame([[5, 6], [None, None], [2, 1]],
columns=Index(['X', 'Y'], name='foo'))
df2a = DataFrame([[5, 6], [2, 1]],
index=[0, 2],
columns=Index(['X', 'Y'], name='foo'))
# TODO: xref gh-15826
# .loc is not preserving the names
result1 = df1.expanding().cov(df2a, pairwise=True).loc[2]
result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
expected = DataFrame([[-3.0, -6.0], [-5.0, -10.0]],
columns=Index(['A', 'B'], name='foo'),
index=Index(['X', 'Y'], name='foo'))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1, 2], [3, 2], [3, 4]],
columns=['A', 'B'],
index=Index(range(3), name='bar'))
df1a = DataFrame([[1, 2], [3, 4]],
index=Index([0, 2], name='bar'),
columns=['A', 'B'])
df2 = DataFrame([[5, 6], [None, None], [2, 1]],
columns=['X', 'Y'],
index=Index(range(3), name='bar'))
df2a = DataFrame([[5, 6], [2, 1]],
index=Index([0, 2], name='bar'),
columns=['X', 'Y'])
result1 = df1.expanding().corr(df2, pairwise=True).loc[2]
result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]
result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]
expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]],
columns=['A', 'B'],
index=Index(['X', 'Y']))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401
])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401
])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr(self):
# #18804 all rolling skew for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).skew()
assert np.isnan(a).all()
def test_rolling_kurt_eq_value_fperr(self):
# #18804 all rolling kurt for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).kurt()
assert np.isnan(a).all()
@pytest.mark.parametrize('func,static_comp', [('sum', np.sum),
('mean', np.mean),
('max', np.max),
('min', np.min)],
ids=['sum', 'mean', 'max', 'min'])
def test_expanding_func(self, func, static_comp):
def expanding_func(x, min_periods=1, center=False, axis=0):
exp = x.expanding(min_periods=min_periods,
center=center, axis=axis)
return getattr(exp, func)()
self._check_expanding(expanding_func, static_comp, preserve_nan=False)
def test_expanding_apply(self, raw):
def expanding_mean(x, min_periods=1):
exp = x.expanding(min_periods=min_periods)
result = exp.apply(lambda x: x.mean(), raw=raw)
return result
# TODO(jreback), needed to add preserve_nan=False
# here to make this pass
self._check_expanding(expanding_mean, np.mean, preserve_nan=False)
ser = Series([])
tm.assert_series_equal(ser, ser.expanding().apply(
lambda x: x.mean(), raw=raw))
# GH 8080
s = Series([None, None, None])
result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw)
expected = Series([1., 2., 3.])
tm.assert_series_equal(result, expected)
def _check_expanding(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
series_result = func(self.series)
assert isinstance(series_result, Series)
frame_result = func(self.frame)
assert isinstance(frame_result, DataFrame)
result = func(self.series)
tm.assert_almost_equal(result[10], static_comp(self.series[:11]))
if preserve_nan:
assert result.iloc[self._nan_locs].isna().all()
ser = Series(randn(50))
if has_min_periods:
result = func(ser, min_periods=30)
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
# min_periods is working correctly
result = func(ser, min_periods=15)
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(randn(20))
result = func(ser2, min_periods=5)
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = func(ser, min_periods=0)
result1 = func(ser, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = func(ser)
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
def test_rolling_max_gh6297(self):
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
r = series.resample('D').min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error(self):
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
def test_rolling_min_max_numeric_types(self):
# GH12373
types_test = [np.dtype("f{}".format(width)) for width in [4, 8]]
types_test.extend([np.dtype("{}{}".format(sign, width))
for width in [1, 2, 4, 8] for sign in "ui"])
for data_type in types_test:
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = (DataFrame(np.arange(20, dtype=data_type))
.rolling(window=5).max())
assert result.dtypes[0] == np.dtype("f8")
result = (DataFrame(np.arange(20, dtype=data_type))
.rolling(window=5).min())
assert result.dtypes[0] == np.dtype("f8")
class TestGrouperGrouping:
def setup_method(self, method):
self.series = Series(np.arange(10))
self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.arange(40)})
def test_mutated(self):
msg = r"group\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
self.frame.groupby('A', foo=1)
g = self.frame.groupby('A')
assert not g.mutated
g = self.frame.groupby('A', mutated=True)
assert g.mutated
def test_getitem(self):
g = self.frame.groupby('A')
g_mutated = self.frame.groupby('A', mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())
result = g.rolling(2).mean().B
tm.assert_series_equal(result, expected)
result = g.rolling(2).B.mean()
tm.assert_series_equal(result, expected)
result = g.B.rolling(2).mean()
tm.assert_series_equal(result, expected)
result = self.frame.B.groupby(self.frame.A).rolling(2).mean()
tm.assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
g = self.frame.groupby('A')
r = g.rolling(2)
g_mutated = self.frame.groupby('A', mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).count())
result = r.B.count()
tm.assert_series_equal(result, expected)
result = r.B.count()
tm.assert_series_equal(result, expected)
def test_rolling(self):
g = self.frame.groupby('A')
r = g.rolling(window=4)
for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.rolling(4), f)())
tm.assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.rolling(4).quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_rolling_corr_cov(self):
g = self.frame.groupby('A')
r = g.rolling(window=4)
for f in ['corr', 'cov']:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.rolling(4), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.rolling(4), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_rolling_apply(self, raw):
g = self.frame.groupby('A')
r = g.rolling(window=4)
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(
lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
def test_rolling_apply_mutability(self):
# GH 14013
df = pd.DataFrame({'A': ['foo'] * 3 + ['bar'] * 3, 'B': [1] * 6})
g = df.groupby('A')
mi = pd.MultiIndex.from_tuples([('bar', 3), ('bar', 4), ('bar', 5),
('foo', 0), ('foo', 1), ('foo', 2)])
mi.names = ['A', None]
# Grouped column should not be a part of the output
expected = pd.DataFrame([np.nan, 2., 2.] * 2, columns=['B'], index=mi)
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
# Call an arbitrary function on the groupby
g.sum()
# Make sure nothing has been mutated
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
def test_expanding(self):
g = self.frame.groupby('A')
r = g.expanding()
for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.expanding(), f)())
tm.assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=0)
expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.expanding().quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_expanding_corr_cov(self):
g = self.frame.groupby('A')
r = g.expanding()
for f in ['corr', 'cov']:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.expanding(), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.expanding(), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_expanding_apply(self, raw):
g = self.frame.groupby('A')
r = g.expanding()
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(
lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
class TestRollingTS:
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': range(5)}).set_index('A')
self.ragged = DataFrame({'B': range(5)})
self.ragged.index = [Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')]
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]},
index=[Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')])
df
df.rolling('2s').sum()
def test_valid(self):
df = self.regular
# not a valid freq
with pytest.raises(ValueError):
df.rolling(window='foobar')
# not a datetimelike index
with pytest.raises(ValueError):
df.reset_index().rolling(window='foobar')
# non-fixed freqs
for freq in ['2MS', pd.offsets.MonthBegin(2)]:
with pytest.raises(ValueError):
df.rolling(window=freq)
for freq in ['1D', pd.offsets.Day(2), '2ms']:
df.rolling(window=freq)
# non-integer min_periods
for minp in [1.0, 'foo', np.array([1, 2, 3])]:
with pytest.raises(ValueError):
df.rolling(window='1D', min_periods=minp)
# center is not implemented
with pytest.raises(NotImplementedError):
df.rolling(window='1D', center=True)
def test_on(self):
df = self.regular
# not a valid column
with pytest.raises(ValueError):
df.rolling(window='2s', on='foobar')
# column is valid
df = df.copy()
df['C'] = pd.date_range('20130101', periods=len(df))
df.rolling(window='2d', on='C').sum()
# invalid columns
with pytest.raises(ValueError):
df.rolling(window='2d', on='B')
# ok even though on non-selected
df.rolling(window='2d', on='C').B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': range(5)})
assert df.A.is_monotonic
df.rolling('2s', on='A').sum()
df = df.set_index('A')
assert df.index.is_monotonic
df.rolling('2s').sum()
# non-monotonic
df.index = reversed(df.index.tolist())
assert not df.index.is_monotonic
with pytest.raises(ValueError):
df.rolling('2s').sum()
df = df.reset_index()
with pytest.raises(ValueError):
df.rolling('2s', on='A').sum()
def test_frame_on(self):
df = DataFrame({'B': range(5),
'C': pd.date_range('20130101 09:00:00',
periods=5,
freq='3s')})
df['A'] = [Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')]
# we are doing simulating using 'on'
expected = (df.set_index('A')
.rolling('2s')
.B
.sum()
.reset_index(drop=True)
)
result = (df.rolling('2s', on='A')
.B
.sum()
)
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (df.set_index('A')
.rolling('2s')[['B']]
.sum()
.reset_index()[['B', 'A']]
)
result = (df.rolling('2s', on='A')[['B']]
.sum()
)
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame({'A': [0, 1, 2, 3, 4],
'B': [0, 1, 2, np.nan, 4],
'C': Index([Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')])},
columns=['A', 'C', 'B'])
expected1 = DataFrame({'A': [0., 1, 3, 3, 7],
'B': [0, 1, 3, np.nan, 4],
'C': df['C']},
columns=['A', 'C', 'B'])
result = df.rolling('2s', on='C').sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name='B')
result = df.rolling('2s', on='C').B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[['A', 'B', 'C']]
result = df.rolling('2s', on='C')[['A', 'B', 'C']].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = pd.date_range('20130101', periods=5, freq='D')
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='1D').sum()
tm.assert_frame_equal(result, expected)
df.index = pd.date_range('20130101', periods=5, freq='2D')
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='2D', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='2D', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window='2D').sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling('2s').sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling('2s', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame({'A': [1] * 5},
index=[Timestamp('20130101 09:00:01'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:04'),
Timestamp('20130101 09:00:06')])
# closed must be 'right', 'left', 'both', 'neither'
with pytest.raises(ValueError):
self.regular.rolling(window='2s', closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling('2s', closed='right').sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling('2s').sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling('2s', closed='both').sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling('2s', closed='left').sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling('2s', closed='neither').sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=2).sum()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s').sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='4s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='4s', min_periods=3).sum()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).mean()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).mean()
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).median()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).median()
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).quantile(0.5)
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).quantile(0.5)
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).std(ddof=0)
expected = df.copy()
expected['B'] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='1s', min_periods=1).std(ddof=1)
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).std(ddof=0)
expected = df.copy()
expected['B'] = [0.0] + [0.5] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).std(ddof=1)
expected = df.copy()
expected['B'] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]
tm.assert_frame_equal(result, expected)
def test_ragged_var(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).var(ddof=0)
expected = df.copy()
expected['B'] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='1s', min_periods=1).var(ddof=1)
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).var(ddof=0)
expected = df.copy()
expected['B'] = [0.0] + [0.25] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).var(ddof=1)
expected = df.copy()
expected['B'] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.]
tm.assert_frame_equal(result, expected)
def test_ragged_skew(self):
df = self.ragged
result = df.rolling(window='3s', min_periods=1).skew()
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).skew()
expected = df.copy()
expected['B'] = [np.nan] * 2 + [0.0, 0.0, 0.0]
tm.assert_frame_equal(result, expected)
def test_ragged_kurt(self):
df = self.ragged
result = df.rolling(window='3s', min_periods=1).kurt()
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).kurt()
expected = df.copy()
expected['B'] = [np.nan] * 4 + [-1.2]
tm.assert_frame_equal(result, expected)
def test_ragged_count(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).count()
expected = df.copy()
expected['B'] = [1.0, 1, 1, 1, 1]
tm.assert_frame_equal(result, expected)
df = self.ragged
result = df.rolling(window='1s').count()
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).count()
expected = df.copy()
expected['B'] = [1.0, 1, 2, 1, 2]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=2).count()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 2, np.nan, 2]
tm.assert_frame_equal(result, expected)
def test_regular_min(self):
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': [0.0, 1, 2, 3, 4]}).set_index('A')
result = df.rolling('1s').min()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': [5, 4, 3, 4, 5]}).set_index('A')
tm.assert_frame_equal(result, expected)
result = df.rolling('2s').min()
expected = df.copy()
expected['B'] = [5.0, 4, 3, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling('5s').min()
expected = df.copy()
expected['B'] = [5.0, 4, 3, 3, 3]
tm.assert_frame_equal(result, expected)
def test_ragged_min(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 1, 1, 3, 3]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 0, 0, 1, 1]
tm.assert_frame_equal(result, expected)
def test_perf_min(self):
N = 10000
dfp = DataFrame({'B': np.random.randn(N)},
index=pd.date_range('20130101',
periods=N,
freq='s'))
expected = dfp.rolling(2, min_periods=1).min()
result = dfp.rolling('2s').min()
assert ((result - expected) < 0.01).all().bool()
expected = dfp.rolling(200, min_periods=1).min()
result = dfp.rolling('200s').min()
assert ((result - expected) < 0.01).all().bool()
def test_ragged_max(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
def test_ragged_apply(self, raw):
df = self.ragged
f = lambda x: 1
result = df.rolling(window='1s', min_periods=1).apply(f, raw=raw)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).apply(f, raw=raw)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).apply(f, raw=raw)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
def test_all(self):
# simple comparison of integer vs time-based windowing
df = self.regular * 2
er = df.rolling(window=1)
r = df.rolling(window='1s')
for f in ['sum', 'mean', 'count', 'median', 'std',
'var', 'kurt', 'skew', 'min', 'max']:
result = getattr(r, f)()
expected = getattr(er, f)()
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = er.quantile(0.5)
tm.assert_frame_equal(result, expected)
def test_all_apply(self, raw):
df = self.regular * 2
er = df.rolling(window=1)
r = df.rolling(window='1s')
result = r.apply(lambda x: 1, raw=raw)
expected = er.apply(lambda x: 1, raw=raw)
tm.assert_frame_equal(result, expected)
def test_all2(self):
# more sophisticated comparison of integer vs.
# time-based windowing
df = DataFrame({'B': np.arange(50)},
index=pd.date_range('20130101',
periods=50, freq='H')
)
# in-range data
dft = df.between_time("09:00", "16:00")
r = dft.rolling(window='5H')
for f in ['sum', 'mean', 'count', 'median', 'std',
'var', 'kurt', 'skew', 'min', 'max']:
result = getattr(r, f)()
# we need to roll the days separately
# to compare with a time-based roll
# finally groupby-apply will return a multi-index
# so we need to drop the day
def agg_by_day(x):
x = x.between_time("09:00", "16:00")
return getattr(x.rolling(5, min_periods=1), f)()
expected = df.groupby(df.index.day).apply(
agg_by_day).reset_index(level=0, drop=True)
tm.assert_frame_equal(result, expected)
def test_groupby_monotonic(self):
# GH 15130
# we don't need to validate monotonicity when grouping
data = [
['David', '1/1/2015', 100], ['David', '1/5/2015', 500],
['David', '5/30/2015', 50], ['David', '7/25/2015', 50],
['Ryan', '1/4/2014', 100], ['Ryan', '1/19/2015', 500],
['Ryan', '3/31/2016', 50], ['Joe', '7/1/2015', 100],
['Joe', '9/9/2015', 500], ['Joe', '10/15/2015', 50]]
df = DataFrame(data=data, columns=['name', 'date', 'amount'])
df['date'] = pd.to_datetime(df['date'])
expected = df.set_index('date').groupby('name').apply(
lambda x: x.rolling('180D')['amount'].sum())
result = df.groupby('name').rolling('180D', on='date')['amount'].sum()
tm.assert_series_equal(result, expected)
def test_non_monotonic(self):
# GH 13966 (similar to #15130, closed by #15175)
dates = pd.date_range(start='2016-01-01 09:30:00',
periods=20, freq='s')
df = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.concatenate((dates, dates)),
'C': np.arange(40)})
result = df.groupby('A').rolling('4s', on='B').C.mean()
expected = df.set_index('B').groupby('A').apply(
lambda x: x.rolling('4s')['C'].mean())
tm.assert_series_equal(result, expected)
df2 = df.sort_values('B')
result = df2.groupby('A').rolling('4s', on='B').C.mean()
tm.assert_series_equal(result, expected)
def test_rolling_cov_offset(self):
# GH16058
idx = pd.date_range('2017-01-01', periods=24, freq='1h')
ss = Series(np.arange(len(idx)), index=idx)
result = ss.rolling('2h').cov()
expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(2, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
result = ss.rolling('3h').cov()
expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(3, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
| bsd-3-clause | 4,312,994,284,276,125,700 | 37.402513 | 79 | 0.505008 | false |
passy/glashammer-rdrei | glashammer/bundles/csrf.py | 1 | 3865 | # -*- coding: utf-8 -*-
"""
glashammer.bundles.middleware.csrf_protection
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides a simple middleware to secure against Cross Site Remote Forgery
attacks by setting cookies on every request and validate them on post
requests.
:copyright: 2010, The Glashammer Authors
:license: MIT
"""
from hashlib import sha1
from functools import wraps
from time import time
from glashammer.utils.wrappers import Request
from werkzeug.exceptions import Forbidden
import logging
log = logging.getLogger('glashammer.bundles.csrf')
class CSRFProtectionMiddleware(object):
"""
Middleware that sets a random string to a cookie. This can be used
to validate the the request comes from the expected origin.
Use :func:`setup_csrf_protection` and don't use this directly.
"""
def __init__(self, app, cookie_name):
self.app = app
self.cookie_name = cookie_name
app.connect_event('response-start', self.set_cookie)
def set_cookie(self, response):
"""Sets a unique string to the cookie."""
if not hasattr(response, 'no_csrf_cookie'):
response.set_cookie(self.cookie_name, self._generate_token())
def _generate_token(self):
"""Generate a new random string based on time and secret set in the
config."""
return sha1("%s#%s" % (time(),
self.app.cfg['sessions/secret'])).hexdigest()
def setup_csrf_protection(app, cookie_name='glashammer_csrf'):
"""Sets up the csrf protection middleware.
:param cookie_name: Cookie to store the secret key in. Remember that you
have to create a new ``require_csrf_token`` decorator, if you change this
value.
"""
# In case the session bundle is not activated.
app.add_config_var('sessions/secret', str, 'glashammer_secret')
middleware = CSRFProtectionMiddleware(app, cookie_name)
def require_csrf_token_factory(form_var='_csrf_token',
cookie_var='glashammer_csrf',
exception_type=Forbidden):
"""Create a new ``require_csrf_token`` decorator based on the options
submitted."""
def get_request(args):
"""Tries to retrieve the request object from a list of arguments.
Returns the first argument in the list that looks like a request
object.
This is used to make function-style views and method-style controllers
both work.
"""
for arg in args:
if isinstance(arg, Request):
return arg
raise TypeError("No request object found in function call!")
def require_csrf_token(func):
"""Raises a Forbidden by default if posted '_csrf_token' does
not match the cookie value."""
@wraps(func)
def decorator(*args, **kwargs):
req = get_request(args)
if form_var not in req.form or \
cookie_var not in req.cookies:
log.info("CSRF-Protection failed. Either cookie or post "
"value not found!")
raise exception_type("CSRF protection validation failed! "
"Form data missing!")
elif req.form[form_var] != req.cookies[cookie_var]:
log.info("CSRF-Protection failed. Expected %s, got %s.",
req.cookies[cookie_var], req.form[form_var])
raise exception_type("CSRF protection validation failed! "
"Form data invalid!")
else:
return func(*args, **kwargs)
return decorator
return require_csrf_token
# Default decorators.
require_csrf_token = require_csrf_token_factory()
__all__ = ('setup_csrf_protection', 'require_csrf_token',
'require_csrf_token_factory')
| mit | 3,107,017,994,892,241,400 | 32.034188 | 78 | 0.615783 | false |
coderb0t/CouchPotatoServer | couchpotato/core/media/movie/providers/info/couchpotatoapi.py | 1 | 4212 | import base64
import time
from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.helpers.encoding import tryUrlencode, ss
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.base import MovieProvider
from couchpotato.environment import Env
log = CPLog(__name__)
autoload = 'CouchPotatoApi'
class CouchPotatoApi(MovieProvider):
urls = {
'validate': 'https://api.couchpota.to/validate/%s/',
'search': 'https://api.couchpota.to/search/%s/',
'info': 'https://api.couchpota.to/info/%s/',
'is_movie': 'https://api.couchpota.to/ismovie/%s/',
'eta': 'https://api.couchpota.to/eta/%s/',
'suggest': 'https://api.couchpota.to/suggest/',
'updater': 'https://raw.githubusercontent.com/%s/%s/%s/updater.json',
'messages': 'https://api.couchpota.to/messages/?%s',
}
http_time_between_calls = 0
api_version = 1
def __init__(self):
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info.release_date', self.getReleaseDate)
addEvent('info.search', self.search, priority = 2)
addEvent('movie.search', self.search, priority = 2)
addEvent('movie.suggest', self.getSuggestions)
addEvent('movie.is_movie', self.isMovie)
addEvent('release.validate', self.validate)
addEvent('cp.api_call', self.call)
addEvent('cp.source_url', self.getSourceUrl)
addEvent('cp.messages', self.getMessages)
def call(self, url, **kwargs):
return self.getJsonData(url, headers = self.getRequestHeaders(), **kwargs)
def getMessages(self, last_check = 0):
data = self.getJsonData(self.urls['messages'] % tryUrlencode({
'last_check': last_check,
}), headers = self.getRequestHeaders(), cache_timeout = 10)
return data
def getSourceUrl(self, repo = None, repo_name = None, branch = None):
return self.getJsonData(self.urls['updater'] % (repo, repo_name, branch), headers = self.getRequestHeaders())
def search(self, q, limit = 5):
return self.getJsonData(self.urls['search'] % tryUrlencode(q) + ('?limit=%s' % limit), headers = self.getRequestHeaders())
def validate(self, name = None):
if not name:
return
name_enc = base64.b64encode(ss(name))
return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders())
def isMovie(self, identifier = None, adding = False):
if not identifier:
return
url = self.urls['is_movie'] % identifier
url += '?adding=1' if adding else ''
data = self.getJsonData(url, headers = self.getRequestHeaders())
if data:
return data.get('is_movie', True)
return True
def getInfo(self, identifier = None, **kwargs):
if not identifier:
return
result = self.getJsonData(self.urls['info'] % identifier, headers = self.getRequestHeaders())
if result:
return dict((k, v) for k, v in result.items() if v)
return {}
def getReleaseDate(self, identifier = None):
if identifier is None: return {}
dates = self.getJsonData(self.urls['eta'] % identifier, headers = self.getRequestHeaders())
log.debug('Found ETA for %s: %s', (identifier, dates))
return dates
def getSuggestions(self, movies = None, ignore = None):
if not ignore: ignore = []
if not movies: movies = []
suggestions = self.getJsonData(self.urls['suggest'], data = {
'movies': ','.join(movies),
'ignore': ','.join(ignore),
}, headers = self.getRequestHeaders())
log.info('Found suggestions for %s movies, %s ignored', (len(movies), len(ignore)))
return suggestions
def getRequestHeaders(self):
return {
'X-CP-Version': fireEvent('app.version', single = True),
'X-CP-API': self.api_version,
'X-CP-Time': time.time(),
'X-CP-Identifier': '+%s' % Env.setting('api_key', 'core')[:10], # Use first 10 as identifier, so we don't need to use IP address in api stats
}
| gpl-3.0 | 2,660,003,703,913,347,000 | 32.967742 | 154 | 0.616809 | false |
timothyclemansinsea/smc | src/scripts/gce/pricing.py | 1 | 1971 | # https://cloud.google.com/compute/pricing
# all storage prices are per GB per month.
PRICING = {
'gcs-standard' : 0.026,
'gcs-reduced' : 0.02,
'gcs-nearline' : 0.01,
'snapshot' : 0.026,
'local-ssd' : 0.218,
'pd-ssd' : 0.17,
'pd-standard' : 0.04,
'n1-standard-hour' : 0.05, # for equivalent of -1, so multiply by number of cpu's (the suffix)
'n1-standard-hour-pre' : 0.015,
'n1-standard-month': 0.035*30.5*24, # price for sustained use for a month
'n1-standard-ram' : 3.75, # amount in GB of base machine
'n1-highmem-hour' : 0.096/2,
'n1-highmem-hour-pre' : 0.035/2,
'n1-highmem-month' : 0.088*30.5*24/2,
'n1-highmem-ram' : 6.5,
'n1-highcpu-hour' : 0.076/2,
'n1-highcpu-hour-pre' : 0.02/2,
'n1-highcpu-month' : 0.053*30.5*24/2,
'n1-highcpu-ram' : 0.9,
'g1-small-hour' : 0.021,
'g1-small-hour-pre': 0.01,
'g1-small-month' : 0.019*30.5*24,
'g1-small-ram' : 1.7,
'f1-micro-hour' : 0.008,
'f1-micro-hour-pre': 0.005,
'f1-micro-month' : 0.0056*30.5*24,
'f1-micro-ram' : 0.60,
'europe' : 1.096,
'asia' : 1.096,
'us' : 1,
'egress' : 0.12,
'egress-china' : 0.23,
'egress-australia' : 0.19,
}
def cpu_cost(size='n1-standard-1', preemptible=False, region='us'):
if size.count('-') == 2:
i = size.rfind('-')
m = int(size[i+1:])
else:
i = len(size)
m = 1
if preemptible:
x = PRICING[size[:i] + '-hour-pre']*24*30.5*m
return [x, x]
else:
return [m*PRICING[size[:i] + '-month'], m*PRICING[size[:i] + '-hour']*24*30.5]
def disk_cost(disk_size=10, disk_type='pd-standard'):
x = PRICING[disk_type] * disk_size
return [x, x]
import locale
locale.setlocale( locale.LC_ALL, '' )
def money(s):
return locale.currency(s)
| gpl-3.0 | 4,755,622,627,190,812,000 | 27.565217 | 107 | 0.515982 | false |
rdevon/cortex | demos/demo_classifier.py | 1 | 2354 | '''Simple classifier model
'''
from cortex.main import run
from cortex.plugins import ModelPlugin
import torch
import torch.nn as nn
import torch.nn.functional as F
from cortex.built_ins.models.utils import update_encoder_args
class MyClassifier(ModelPlugin):
'''Basic image classifier.
Classifies images using standard convnets.
'''
defaults = dict(
data=dict(batch_size=128, inputs=dict(inputs='images')),
optimizer=dict(optimizer='Adam', learning_rate=1e-3),
train=dict(epochs=200, save_on_best='losses.classifier'))
def build(self, classifier_type='convnet',
classifier_args=dict(dropout=0.2)):
'''Builds a simple image classifier.
Args:
classifier_type (str): Network type for the classifier.
classifier_args: Classifier arguments. Can include dropout,
batch_norm, layer_norm, etc.
'''
classifier_args = classifier_args or {}
shape = self.get_dims('x', 'y', 'c')
dim_l = self.get_dims('labels')
Encoder, args = update_encoder_args(
shape, model_type=classifier_type, encoder_args=classifier_args)
args.update(**classifier_args)
classifier = Encoder(shape, dim_out=dim_l, **args)
self.nets.classifier = classifier
def routine(self, inputs, targets, criterion=nn.CrossEntropyLoss()):
'''
Args:
criterion: Classifier criterion.
'''
classifier = self.nets.classifier
outputs = classifier(inputs)
predicted = torch.max(F.log_softmax(outputs, dim=1).data, 1)[1]
loss = criterion(outputs, targets)
correct = 100. * predicted.eq(
targets.data).cpu().sum() / targets.size(0)
self.losses.classifier = loss
self.results.accuracy = correct
def predict(self, inputs):
classifier = self.nets.classifier
outputs = classifier(inputs)
predicted = torch.max(F.log_softmax(outputs, dim=1).data, 1)[1]
return predicted
def visualize(self, images, inputs, targets):
predicted = self.predict(inputs)
self.add_image(images.data, labels=(targets.data, predicted.data),
name='gt_pred')
if __name__ == '__main__':
classifier = MyClassifier()
run(model=classifier)
| bsd-3-clause | -8,868,400,450,535,550,000 | 26.057471 | 76 | 0.623195 | false |
phborba/dsgtoolsop | auxiliar/geopy/geocoders/dot_us.py | 1 | 5485 | """
:class:`GeocoderDotUS` geocoder.
"""
import csv
from base64 import b64encode
from geopy.compat import urlencode, py3k, Request
from geopy.geocoders.base import (
Geocoder,
DEFAULT_FORMAT_STRING,
DEFAULT_TIMEOUT,
)
from geopy.location import Location
from geopy.exc import ConfigurationError
from geopy.util import logger, join_filter
__all__ = ("GeocoderDotUS", )
class GeocoderDotUS(Geocoder): # pylint: disable=W0223
"""
GeocoderDotUS geocoder, documentation at:
http://geocoder.us/
Note that GeocoderDotUS does not support SSL.
"""
def __init__(
self,
username=None,
password=None,
format_string=DEFAULT_FORMAT_STRING,
timeout=DEFAULT_TIMEOUT,
proxies=None,
user_agent=None,
): # pylint: disable=R0913
"""
:param str username:
:param str password:
:param str format_string: String containing '%s' where the
string to geocode should be interpolated before querying the
geocoder. For example: '%s, Mountain View, CA'. The default
is just '%s'.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising an :class:`geopy.exc.GeocoderTimedOut`
exception.
.. versionadded:: 0.97
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
.. versionadded:: 0.96
:param str user_agent: Use a custom User-Agent header.
.. versionadded:: 1.12.0
"""
super(GeocoderDotUS, self).__init__(
format_string=format_string, timeout=timeout, proxies=proxies, user_agent=user_agent
)
if username or password:
if not (username and password):
raise ConfigurationError(
"Username and password must both specified"
)
self.authenticated = True
self.api = "http://geocoder.us/member/service/namedcsv"
else:
self.authenticated = False
self.api = "http://geocoder.us/service/namedcsv"
self.username = username
self.password = password
def geocode(self, query, exactly_one=True, timeout=None):
"""
Geocode a location query.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
query_str = self.format_string % query
url = "?".join((self.api, urlencode({'address':query_str})))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
url = Request(url, headers=self._get_headers())
page = self._call_geocoder(url, timeout=timeout, raw=True)
content = page.read().decode("utf-8") if py3k else page.read() # pylint: disable=E1101,E1103
places = [
r for r in csv.reader(
[content, ] if not isinstance(content, list)
else content
)
]
if not len(places):
return None
if exactly_one:
return self._parse_result(places[0])
else:
result = [self._parse_result(res) for res in places]
if None in result: # todo
return None
return result
@staticmethod
def _parse_result(result):
"""
Parse individual results. Different, but lazy actually, so... ok.
"""
# turn x=y pairs ("lat=47.6", "long=-117.426")
# into dict key/value pairs:
place = dict(
[x.split('=') for x in result if len(x.split('=')) > 1]
)
if 'error' in place:
if "couldn't find" in place['error']:
return None
address = [
place.get('number', None),
place.get('prefix', None),
place.get('street', None),
place.get('type', None),
place.get('suffix', None)
]
city = place.get('city', None)
state = place.get('state', None)
zip_code = place.get('zip', None)
name = join_filter(", ", [
join_filter(" ", address),
city,
join_filter(" ", [state, zip_code])
])
latitude = place.get('lat', None)
longitude = place.get('long', None)
if latitude and longitude:
latlon = float(latitude), float(longitude)
else:
return None
return Location(name, latlon, place)
def _get_headers(self):
headers = {}
if self.authenticated:
username_password = ":".join((self.username, self.password))
auth = " ".join((
"Basic",
b64encode(username_password.encode('utf-8')).decode('utf-8')
))
headers["Authorization"] = auth
return headers
| gpl-2.0 | 2,699,461,029,512,830,000 | 31.64881 | 100 | 0.558067 | false |
anergictcell/SonosBar | sonosBar.py | 1 | 13759 | #!/usr/bin/env python -W ignore
# -*- coding: utf-8 -*-
"""
Control you Sonos system from you Mac Menu Bar
"""
# <bitbar.title>SonosBar</bitbar.title>
# <bitbar.version>v1.0</bitbar.version>
# <bitbar.author>Jonas Marcello</bitbar.author>
# <bitbar.author.github>anergictcell</bitbar.author.github>
# <bitbar.desc>Control you Sonos system from you Mac Menu Bar.</bitbar.desc>
# <bitbar.image>https://raw.githubusercontent.com/anergictcell/SonosBar/master/resources/SonosBar.png</bitbar.image>
# <bitbar.dependencies>python,SoCo</bitbar.dependencies>
# <bitbar.abouturl>https://github.com/anergictcell/SonosBar/</bitbar.abouturl>
import argparse
import socket
import os
import sys
try:
import soco
from soco.music_services import MusicService
from soco.data_structures import DidlItem, to_didl_string
except ImportError:
print("Error")
print("---")
print("You need to istall >>soco<< | href=https://github.com/SoCo/SoCo")
sys.exit(0)
def parse_ip(ip_string):
"""Parsing the user supplied IP address to use on the local subnet"""
host_ip = socket.gethostbyname(socket.gethostname())
subnets = host_ip.split(".")
sonos_subnets = ip_string.split(".")
new_ip = subnets[0:(4-len(sonos_subnets))] + sonos_subnets
return ".".join(new_ip)
def parse_cli_arguments():
"""Main function that parses command line arguments"""
parser = argparse.ArgumentParser(description='Control your Sonos')
player_args = parser.add_mutually_exclusive_group()
player_args.add_argument(
"-p", "--player",
metavar="SPEAKER_NAME",
type=str,
# default="Living Room",
help="The name of the player/zone")
player_args.add_argument(
"-i", "--ip",
metavar="IP_ADDRESS",
type=str,
help="The IP address of the player/zone")
control_args = parser.add_mutually_exclusive_group()
control_args.add_argument(
"-l", "--playlist",
metavar="PLAYLIST_NAME",
type=str,
help="The name of the playlist to play")
control_args.add_argument(
"-r", "--radio",
metavar="RADIO_STATION",
type=str,
help="The name of the radio station to play")
control_args.add_argument(
"-v", "--vol",
metavar="VOLUME",
type=int,
choices=range(0, 101),
help="0-100")
control_args.add_argument(
"-j", "--join",
metavar="SPEAKER_NAME",
type=str,
help="Name of the speaker to join")
control_args.add_argument(
"-k", "--ipjoin",
metavar="SPEAKER_IP",
type=str,
help="IP of the speaker to join")
control_args.add_argument(
"-u", "--unjoin",
action='store_const',
const=True,
help="Unjoin the player from all groups")
control_args.add_argument(
'action',
metavar='action',
nargs="?",
choices=["play", "pause", "next", "previous", "shuffle", "normal"],
help="""Action to take if non is set via flags.
Can be either: play, pause, next, previous, shuffle, normal""")
parser.add_argument(
"-g", "--group",
action='store_const',
const=True,
help="Apply the action to the whole group")
output = parser.add_mutually_exclusive_group()
output.add_argument(
"-o", "--verbose",
action='store_const',
const=True,
help="Display feedback about current actions")
output.add_argument(
"-b", "--bitbar",
action='store_const',
const=True,
help="Display bitbar controls")
args = parser.parse_args()
if args.ip:
args.ip = parse_ip(args.ip)
if args.ipjoin:
args.ipjoin = parse_ip(args.ipjoin)
return args
def output_for_bitbar(zones):
"""Prints the topology display"""
print("🔊Sonos")
print("---")
for zone in zones:
print_zone(zone)
def print_zone(zone):
"""Prints basic info about the zone and calls functions to
print more detailed info"""
print("---")
print("Zone:")
print("{0}: {1}".format(zone["kind"], zone["master"].player_name))
if zone["kind"] == "P":
print_single_player(zone["master"])
else:
print_group(zone["master"])
def print_single_player(player):
"""Controls printing of control elements for a single-player zone"""
print_music_controls(player, "--")
print_player_controls(player, "--")
print_top_level_controls(player, "")
def print_group(master):
"""Controls printing of control elements for a multi-player zone"""
print_music_controls(master, "--")
print_top_level_controls(master, "")
for player in master.group.members:
print("➤ {0}".format(player.player_name))
print_player_controls(player, "--")
print("--Volume")
print_volume_controls(player, "--")
def create_command(player, *params):
"""Creates the Bitbar specific command"""
string = "bash={0} param1=-i param2={1}"
i = 3
for param in params:
string += " param{0}={1}".format(i, param)
i += 1
string += " terminal=false refresh=true"
return string.format(PATH_TO_SCRIPT, player.ip_address)
def print_player_controls(player, indent):
"""Prints Player controls for Bitbar"""
print("{0}Join".format(indent))
for single_player in player.all_zones:
if single_player != player:
print("{0}--{1} | ".format(indent, single_player.player_name) +
create_command(player, "--ipjoin", single_player.ip_address)
)
print("{0}Unjoin | ".format(indent) +
create_command(player, "--unjoin")
)
def print_music_controls(player, indent):
"""Prints Music controls for Bitbar"""
print("{0}Playlists".format(indent))
for playlist in player.get_sonos_playlists():
print("{0}--{1} | ".format(indent, playlist.title) +
create_command(player, "-gl", '"' + playlist.title + '"')
)
print("{0}Radios".format(indent))
for station in player.get_favorite_radio_stations()["favorites"]:
print("{0}--{1} | ".format(indent, station["title"]) +
create_command(player, "-gr", '"' + station["uri"] + '"')
)
def print_top_level_controls(player, indent):
"""Prints the controls that are displayed on the base level for each
player / group"""
playing = player.get_current_transport_info()["current_transport_state"]
if playing == "PLAYING":
print("{0}├ Pause | ".format(indent) +
create_command(player, "pause", "-g"))
print("{0}├ Next | ".format(indent) +
create_command(player, "next", "-g"))
else:
print("{0}├ Play | ".format(indent) +
create_command(player, "play", "-g"))
print("{0}└ Volume | ".format(indent))
print_volume_controls(player, indent)
def print_volume_controls(player, indent):
"""Prints controls to adjust the volume"""
for vol in range(0, 11):
if (vol-1) * 10 < player.volume and vol*10 >= player.volume:
# print checkmark
print(("{0}--{1}{2}").format(indent, u'\u2713'.encode("utf-8"), vol))
else:
print("{0}--{1} | ".format(indent, vol) +
create_command(player, "--vol", vol*10)
)
PATH_TO_SCRIPT = os.path.realpath(__file__)
ARGUMENTS = parse_cli_arguments()
GROUP = ARGUMENTS.group
def get_player_by_name(name):
"""Returns a SoCo object for the given name (if it exists)"""
for device in soco.discover():
if device.player_name == name:
return device
def define_player(ip_address, name):
"""Returning a SoCo object of the chosen player"""
player = None
if ip_address:
player = soco.SoCo(ip_address)
if name:
player = get_player_by_name(name)
if player and GROUP:
# Change player to be the coordinator of the group
player = player.group.coordinator
return player
def find_random_player():
"""Searches the network for Sonos zones and picks one randomly"""
zones = soco.discover()
if zones:
# picking a random player
player = next(iter(zones))
return player
return None
def parse_zone_groups(player):
"""Creates a list of all Zones with attrbute
whether they are a group or a single player"""
all_zones = []
for group in player.all_groups:
if len(group.members) > 1:
all_zones.append({"kind":"G", "master":group.coordinator})
else:
all_zones.append({"kind":"P", "master":group.coordinator})
return all_zones
def verbose_output(string):
"""Printing the passed commands to stdout"""
if ARGUMENTS.verbose:
print("{0}: {1}".format(
("Group " if GROUP else "Player "), string))
def group_coordinate(function):
"""Wrapper function to ensure unjoining for single players"""
def inner_function(*arguments):
"""Inner function"""
if GROUP:
function(*arguments)
else:
# First argument always has to be the player SoCo object
arguments[0].unjoin()
function(*arguments)
return inner_function
def get_songs_from_playlist(player, playlist_name):
"""Returns a list of songs from the given playlist"""
lists = player.get_sonos_playlists()
for playlist in lists:
if playlist.title == playlist_name:
return player.music_library.browse(playlist)
@group_coordinate
def play_playlist(player, playlist_name):
"""Replaces the queue with the selected playlist"""
verbose_output("Play playlist {0}".format(playlist_name))
songs = get_songs_from_playlist(player, playlist_name)
player.clear_queue()
for song in songs:
player.add_to_queue(song)
player.play_from_queue(0)
@group_coordinate
def play_radio_station(player, uri):
"""Plays the selected radio station. The URI must be in the
format as it is currently returned from soco:
x-sonosapi-stream:s25111?sid=254&flags=32
"""
verbose_output("Switching to radio station {0}".format(uri))
service = MusicService('TuneIn')
didl = DidlItem(
title="DUMMY", parent_id="DUMMY", item_id="DUMMY", desc=service.desc)
meta = to_didl_string(didl)
player.avTransport.SetAVTransportURI(
[('InstanceID', 0), ('CurrentURI', uri), ('CurrentURIMetaData', meta)])
player.play()
@group_coordinate
def play(player):
"""Play the selected song"""
verbose_output("Play")
player.play()
@group_coordinate
def pause(player):
"""Pause the current playback"""
verbose_output("Pause")
player.pause()
@group_coordinate
def next_track(player):
"""Play the next track"""
verbose_output("Next track")
player.next()
@group_coordinate
def previous_track(player):
"""Play the previous track"""
verbose_output("Previous track")
player.previous()
@group_coordinate
def turn_on_shuffle(player):
"""Turn on shuffle"""
verbose_output("Shuffle ON")
player.play_mode = "SHUFFLE_NOREPEAT"
@group_coordinate
def turn_off_shuffle(player):
"""Turn off shuffle"""
verbose_output("Shuffle OFF")
player.play_mode = "NORMAL"
def set_volume(player, volume):
"""Sets the volume"""
verbose_output("Setting the volume to {0}".format(volume))
player.volume = volume
def join(source, target):
"""Joining another group"""
if target is None:
return invalid_command("Target to join is not known")
if GROUP:
for single_player in source.group.members:
single_player.join(target.group.coordinator)
else:
source.join(target.group.coordinator)
def invalid_command(err):
"""Handles errors and prints error messages"""
print("ERROR: {0}".format(err))
return
def main(args):
"""Main function"""
player = define_player(args.ip, args.player)
if player is None or args.bitbar:
player = player or find_random_player()
print_bitbar_controls(player)
return
if GROUP:
# Change player to the coordinator of the group
player = player.group.coordinator
if args.playlist:
return play_playlist(player, args.playlist)
if args.radio:
return play_radio_station(player, args.radio)
if args.vol is not None:
return set_volume(player, args.vol)
if args.join:
verbose_output("Joining {0}".format(args.join))
to_join = define_player(None, args.join)
return join(player, to_join)
if args.ipjoin:
verbose_output("Joining {0}".format(args.ipjoin))
to_join = define_player(args.ipjoin, None)
return join(player, to_join)
if args.unjoin:
verbose_output("Unjoin")
player.unjoin()
return
if args.action is None:
return
if args.action.lower() == "play":
play(player)
return
if args.action.lower() == "pause":
pause(player)
return
if args.action.lower() == "next":
next_track(player)
return
if args.action.lower() == "previous":
previous_track(player)
return
if args.action.lower() == "shuffle":
turn_on_shuffle(player)
return
if args.action.lower() == "normal":
turn_off_shuffle(player)
return
def print_bitbar_controls(player):
"""Prints the lines used for Bitbar to stdout"""
if player is None:
print("🔇 Sonos")
print("---")
print("No Sonos Zone present")
else:
output_for_bitbar(parse_zone_groups(player))
if __name__ == "__main__":
main(ARGUMENTS)
| mit | -7,581,023,862,845,307,000 | 28.941176 | 116 | 0.612821 | false |
ray-project/ray | python/ray/util/collective/tests/distributed_gpu_tests/test_distributed_allgather.py | 1 | 5383 | """Test the allgather API on a distributed Ray cluster."""
import pytest
import ray
import cupy as cp
import torch
from ray.util.collective.tests.util import create_collective_workers, \
init_tensors_for_gather_scatter
@pytest.mark.parametrize("tensor_backend", ["cupy", "torch"])
@pytest.mark.parametrize("array_size",
[2, 2**5, 2**10, 2**15, 2**20, [2, 2], [5, 5, 5]])
def test_allgather_different_array_size(ray_start_distributed_2_nodes_4_gpus,
array_size, tensor_backend):
world_size = 4
actors, _ = create_collective_workers(world_size)
init_tensors_for_gather_scatter(
actors, array_size=array_size, tensor_backend=tensor_backend)
results = ray.get([a.do_allgather.remote() for a in actors])
for i in range(world_size):
for j in range(world_size):
if tensor_backend == "cupy":
assert (results[i][j] == cp.ones(array_size, dtype=cp.float32)
* (j + 1)).all()
else:
assert (results[i][j] == torch.ones(
array_size, dtype=torch.float32).cuda() * (j + 1)).all()
@pytest.mark.parametrize("dtype",
[cp.uint8, cp.float16, cp.float32, cp.float64])
def test_allgather_different_dtype(ray_start_distributed_2_nodes_4_gpus,
dtype):
world_size = 4
actors, _ = create_collective_workers(world_size)
init_tensors_for_gather_scatter(actors, dtype=dtype)
results = ray.get([a.do_allgather.remote() for a in actors])
for i in range(world_size):
for j in range(world_size):
assert (results[i][j] == cp.ones(10, dtype=dtype) * (j + 1)).all()
@pytest.mark.parametrize("length", [0, 1, 3, 4, 7, 8])
def test_unmatched_tensor_list_length(ray_start_distributed_2_nodes_4_gpus,
length):
world_size = 4
actors, _ = create_collective_workers(world_size)
list_buffer = [cp.ones(10, dtype=cp.float32) for _ in range(length)]
ray.wait([a.set_list_buffer.remote(list_buffer) for a in actors])
if length != world_size:
with pytest.raises(RuntimeError):
ray.get([a.do_allgather.remote() for a in actors])
else:
ray.get([a.do_allgather.remote() for a in actors])
@pytest.mark.parametrize("shape", [10, 20, [4, 5], [1, 3, 5, 7]])
def test_unmatched_tensor_shape(ray_start_distributed_2_nodes_4_gpus, shape):
world_size = 4
actors, _ = create_collective_workers(world_size)
init_tensors_for_gather_scatter(actors, array_size=10)
list_buffer = [cp.ones(shape, dtype=cp.float32) for _ in range(world_size)]
ray.get([a.set_list_buffer.remote(list_buffer) for a in actors])
if shape != 10:
with pytest.raises(RuntimeError):
ray.get([a.do_allgather.remote() for a in actors])
else:
ray.get([a.do_allgather.remote() for a in actors])
def test_allgather_torch_cupy(ray_start_distributed_2_nodes_4_gpus):
world_size = 4
shape = [10, 10]
actors, _ = create_collective_workers(world_size)
# tensor is pytorch, list is cupy
for i, a in enumerate(actors):
t = torch.ones(shape, dtype=torch.float32).cuda() * (i + 1)
ray.wait([a.set_buffer.remote(t)])
list_buffer = [
cp.ones(shape, dtype=cp.float32) for _ in range(world_size)
]
ray.wait([a.set_list_buffer.remote(list_buffer)])
results = ray.get([a.do_allgather.remote() for a in actors])
for i in range(world_size):
for j in range(world_size):
assert (results[i][j] == cp.ones(shape, dtype=cp.float32) *
(j + 1)).all()
# tensor is cupy, list is pytorch
for i, a in enumerate(actors):
t = cp.ones(shape, dtype=cp.float32) * (i + 1)
ray.wait([a.set_buffer.remote(t)])
list_buffer = [
torch.ones(shape, dtype=torch.float32).cuda()
for _ in range(world_size)
]
ray.wait([a.set_list_buffer.remote(list_buffer)])
results = ray.get([a.do_allgather.remote() for a in actors])
for i in range(world_size):
for j in range(world_size):
assert (results[i][j] == torch.ones(
shape, dtype=torch.float32).cuda() * (j + 1)).all()
# some tensors in the list are pytorch, some are cupy
for i, a in enumerate(actors):
t = cp.ones(shape, dtype=cp.float32) * (i + 1)
ray.wait([a.set_buffer.remote(t)])
list_buffer = []
for j in range(world_size):
if j % 2 == 0:
list_buffer.append(
torch.ones(shape, dtype=torch.float32).cuda())
else:
list_buffer.append(cp.ones(shape, dtype=cp.float32))
ray.wait([a.set_list_buffer.remote(list_buffer)])
results = ray.get([a.do_allgather.remote() for a in actors])
for i in range(world_size):
for j in range(world_size):
if j % 2 == 0:
assert (results[i][j] == torch.ones(
shape, dtype=torch.float32).cuda() * (j + 1)).all()
else:
assert (results[i][j] == cp.ones(shape, dtype=cp.float32) *
(j + 1)).all()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| apache-2.0 | 3,633,413,706,309,417,000 | 39.473684 | 79 | 0.577002 | false |
altair-viz/altair | altair/sphinxext/utils.py | 1 | 5859 | import ast
import hashlib
import itertools
import json
import re
def create_thumbnail(image_filename, thumb_filename, window_size=(280, 160)):
"""Create a thumbnail whose shortest dimension matches the window"""
from PIL import Image
im = Image.open(image_filename)
im_width, im_height = im.size
width, height = window_size
width_factor, height_factor = width / im_width, height / im_height
if width_factor > height_factor:
final_width = width
final_height = int(im_height * width_factor)
else:
final_height = height
final_width = int(im_width * height_factor)
thumb = im.resize((final_width, final_height), Image.ANTIALIAS)
thumb.save(thumb_filename)
def create_generic_image(filename, shape=(200, 300), gradient=True):
"""Create a generic image"""
from PIL import Image
import numpy as np
assert len(shape) == 2
arr = np.zeros((shape[0], shape[1], 3))
if gradient:
# gradient from gray to white
arr += np.linspace(128, 255, shape[1])[:, None]
im = Image.fromarray(arr.astype("uint8"))
im.save(filename)
SYNTAX_ERROR_DOCSTRING = """
SyntaxError
===========
Example script with invalid Python syntax
"""
def _parse_source_file(filename):
"""Parse source file into AST node
Parameters
----------
filename : str
File path
Returns
-------
node : AST node
content : utf-8 encoded string
Notes
-----
This function adapted from the sphinx-gallery project; license: BSD-3
https://github.com/sphinx-gallery/sphinx-gallery/
"""
with open(filename, "r", encoding="utf-8") as fid:
content = fid.read()
# change from Windows format to UNIX for uniformity
content = content.replace("\r\n", "\n")
try:
node = ast.parse(content)
except SyntaxError:
node = None
return node, content
def get_docstring_and_rest(filename):
"""Separate ``filename`` content between docstring and the rest
Strongly inspired from ast.get_docstring.
Parameters
----------
filename: str
The path to the file containing the code to be read
Returns
-------
docstring: str
docstring of ``filename``
category: list
list of categories specified by the "# category:" comment
rest: str
``filename`` content without the docstring
lineno: int
the line number on which the code starts
Notes
-----
This function adapted from the sphinx-gallery project; license: BSD-3
https://github.com/sphinx-gallery/sphinx-gallery/
"""
node, content = _parse_source_file(filename)
# Find the category comment
find_category = re.compile(r"^#\s*category:\s*(.*)$", re.MULTILINE)
match = find_category.search(content)
if match is not None:
category = match.groups()[0]
# remove this comment from the content
content = find_category.sub("", content)
else:
category = None
if node is None:
return SYNTAX_ERROR_DOCSTRING, category, content, 1
if not isinstance(node, ast.Module):
raise TypeError(
"This function only supports modules. "
"You provided {}".format(node.__class__.__name__)
)
try:
# In python 3.7 module knows its docstring.
# Everything else will raise an attribute error
docstring = node.docstring
import tokenize
from io import BytesIO
ts = tokenize.tokenize(BytesIO(content).readline)
ds_lines = 0
# find the first string according to the tokenizer and get
# it's end row
for tk in ts:
if tk.exact_type == 3:
ds_lines, _ = tk.end
break
# grab the rest of the file
rest = "\n".join(content.split("\n")[ds_lines:])
lineno = ds_lines + 1
except AttributeError:
# this block can be removed when python 3.6 support is dropped
if (
node.body
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, (ast.Str, ast.Constant))
):
docstring_node = node.body[0]
docstring = docstring_node.value.s
# python2.7: Code was read in bytes needs decoding to utf-8
# unless future unicode_literals is imported in source which
# make ast output unicode strings
if hasattr(docstring, "decode") and not isinstance(docstring, str):
docstring = docstring.decode("utf-8")
# python3.8: has end_lineno
lineno = (
getattr(docstring_node, "end_lineno", None) or docstring_node.lineno
) # The last line of the string.
# This get the content of the file after the docstring last line
# Note: 'maxsplit' argument is not a keyword argument in python2
rest = content.split("\n", lineno)[-1]
lineno += 1
else:
docstring, rest = "", ""
if not docstring:
raise ValueError(
(
'Could not find docstring in file "{0}". '
"A docstring is required for the example gallery."
).format(filename)
)
return docstring, category, rest, lineno
def prev_this_next(it, sentinel=None):
"""Utility to return (prev, this, next) tuples from an iterator"""
i1, i2, i3 = itertools.tee(it, 3)
next(i3, None)
return zip(itertools.chain([sentinel], i1), i2, itertools.chain(i3, [sentinel]))
def dict_hash(dct):
"""Return a hash of the contents of a dictionary"""
serialized = json.dumps(dct, sort_keys=True)
try:
m = hashlib.md5(serialized)
except TypeError:
m = hashlib.md5(serialized.encode())
return m.hexdigest()
| bsd-3-clause | 2,410,201,326,460,441,600 | 28.442211 | 84 | 0.604028 | false |
wpoely86/easybuild-easyblocks | easybuild/easyblocks/f/freetype.py | 1 | 2546 | ##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing freetype, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_freetype(ConfigureMake):
"""Support for building/installing freetype."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for freetype."""
super(EB_freetype, self).__init__(*args, **kwargs)
self.maj_ver = self.version.split('.')[0]
def sanity_check_step(self):
"""Custom sanity check for freetype."""
custom_paths = {
'files': ['bin/freetype-config', 'lib/libfreetype.a', 'lib/libfreetype.%s' % get_shared_lib_ext(),
'lib/pkgconfig/freetype%s.pc' % self.maj_ver],
'dirs': ['include/freetype%s' % self.maj_ver],
}
super(EB_freetype, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Custom guess for CPATH for freetype."""
guesses = super(EB_freetype, self).make_module_req_guess()
guesses.update({
'CPATH': ['include/freetype%s' % self.maj_ver],
})
return guesses
| gpl-2.0 | 8,661,694,134,422,940,000 | 37 | 110 | 0.690102 | false |
jajberni/pcse_web | main/model/simulation.py | 1 | 7917 | # coding: utf-8
"""Provides implementation of Simulation model and Simulation"""
from __future__ import absolute_import
from google.appengine.ext import ndb
from google.appengine.ext.ndb.model import GeoPt
import model
import time
from pcse.db import NASAPowerWeatherDataProvider
from pcse.fileinput import CABOFileReader
from pcse.base_classes import ParameterProvider, WeatherDataProvider
from pcse.models import Wofost71_WLP_FD
import datetime as dt
import json
from dateutil.parser import parse
from flask import jsonify
from operator import itemgetter
from .model_defaults import default_amgt, default_crop, default_site, default_soil
soil_defaults = {'SMW': 0.3, 'SMFCF': 0.46, 'SM0': 0.57, 'CRAIRC': 0.05, 'RDMSOL': 0.45}
class SimulationValidator(model.BaseValidator):
"""Defines how to create validators for simulation properties. For detailed description see BaseValidator"""
name = [1, 100]
description = [3, 400]
latlon = [-180, 180]
tsum = [0, 2000]
@classmethod
def existing_name(cls, name):
"""Validates if given name is in datastore"""
simulation_db = Simulation.get_by('name', name)
if not simulation_db:
raise ValueError('This name is not in our database.')
return name
@classmethod
def unique_name(cls, name):
"""Validates if given name is not in datastore"""
simulation_db = Simulation.get_by('name', name)
if simulation_db:
raise ValueError('Sorry, this name is already taken.')
return name
def is_date(prop, value):
if isinstance(value, dt.date):
return value
elif isinstance(value, dt.datetime):
return value
else:
o = parse(value).date()
return o
def is_geoPt(prop, value):
if isinstance(value, GeoPt):
return value
else:
pt = GeoPt(value.lat, value.lon)
return pt
class StringDateProperty(ndb.DateProperty):
def _validate(self, value):
if isinstance(value, basestring):
o = parse(value).date()
return o
class DictGeoPt(ndb.GeoPtProperty):
def _validate(self, value):
if isinstance(value, dict):
pt = GeoPt(value['lat'], value['lon'])
return pt
class WeatherDataProviderProperty(ndb.PickleProperty):
def _validate(self, value):
# TODO: integrity check
self.store = value[0]
self.elevation = value[1]
self.longitude = value[2]
self.latitude = value[3]
self.description = value[4]
self.ETmodel = value[5]
print("WDP latitude: ", value[3])
return value
def getWDP(self):
wdp = WeatherDataProvider()
wdp.store = self.store
wdp.elevation = self.elevation
wdp.longitude = self.longitude
wdp.latitude = self.latitude
wdp.description = self.description
wdp.ETmodel = self.ETmodel
class Simulation(model.Base):
"""A class describing datastore users."""
name = ndb.StringProperty(required=True, validator=SimulationValidator.create('name'))
description = ndb.StringProperty(default="Demo simulation", validator=SimulationValidator.create('description'))
location = DictGeoPt(default=GeoPt(37.4, -4.03))
soil_attributes = ndb.JsonProperty(default=default_soil)
start_date = StringDateProperty(default=dt.date(2014, 9, 1))
sowing_date = StringDateProperty(default=dt.date(2014, 10, 1))
end_date = StringDateProperty(default=dt.date(2015, 7, 1))
crop_name = ndb.StringProperty(default='wheat')
tsum1 = ndb.FloatProperty(default=900.0)
tsum2 = ndb.FloatProperty(default=900.0)
owner_id = ndb.StringProperty(default='')
simulation_output = ndb.JsonProperty(default={})
plot_data = ndb.JsonProperty(default={})
results_ok = ndb.BooleanProperty(default=False)
#weather_data = WeatherDataProviderProperty()
weather_data = ndb.PickleProperty(compressed=True)
wdp = None
simulation_dict = {}
PUBLIC_PROPERTIES = ['name', 'description', 'location', 'results_ok', 'plot_data',
'soil_attributes', 'start_date', 'sowing_date', 'end_date', 'crop_name', 'tsum1', 'tsum2']
PRIVATE_PROPERTIES = ['owner_id']
@ndb.transactional
def update_simulation_results(self):
print('Updating simulation')
json_data = json.dumps(self.run_simulation(), default=json_timestamp)
self.simulation_output = json_data
self.plot_data = self.plot_dict()
self.weather_data = {'store': self.wdp.store,
'elevation': self.wdp.elevation,
'longitude': self.wdp.longitude,
'latitude': self.wdp.latitude,
'description': self.wdp.description,
'ETmodel': self.wdp.ETmodel}
self.results_ok = True
def plot_dict(self):
ts = map(fuzzydate_to_timestamp, self.simulation_dict.keys())
lai = [v['LAI'] for v in self.simulation_dict.itervalues()]
sm = [v['SM'] for v in self.simulation_dict.itervalues()]
twso = [v['TWSO'] for v in self.simulation_dict.itervalues()]
tagp = [v['TAGP'] for v in self.simulation_dict.itervalues()]
json.dumps(sorted(zip(lai, sm), key=itemgetter(0)))
plot_data = json.dumps([
{'key': "LAI", "values": sorted(zip(ts, lai), key=itemgetter(0))},
{'key': "SM", "values": sorted(zip(ts, sm), key=itemgetter(0))},
{'key': "TAGP", "values": sorted(zip(ts, tagp), key=itemgetter(0))},
{'key': "TWSO", "values": sorted(zip(ts, twso), key=itemgetter(0))}])
#print("Plot DATA: ", plot_data)
return plot_data
def run_simulation(self):
if not isinstance(self.weather_data, dict):
print("Fetching NASA weather...")
self.wdp = NASAPowerWeatherDataProvider(self.location.lat, self.location.lon)
else:
print("Weather data is cached...")
if (self.location.lat != self.weather_data['latitude']) or (self.location.lon != self.weather_data['longitude']):
print("Location changed, fetching NASA weather again")
self.wdp = NASAPowerWeatherDataProvider(self.location.lat, self.location.lon)
else:
self.wdp = WeatherDataProvider()
self.wdp.store = self.weather_data['store']
self.wdp.elevation = self.weather_data['elevation']
self.wdp.longitude = self.weather_data['longitude']
self.wdp.latitude = self.weather_data['latitude']
self.wdp.description = self.weather_data['description']
self.wdp.ETmodel = self.weather_data['ETmodel']
print(self.wdp)
amgt = default_amgt
soil = default_soil
site = default_site
crop = default_crop
amgt[0][self.start_date] = amgt[0].pop(amgt[0].keys()[0])
amgt[0][self.start_date]['CropCalendar']['crop_start_date'] = self.sowing_date
amgt[0][self.start_date]['CropCalendar']['crop_end_date'] = self.end_date
parvalues = ParameterProvider(sitedata=site, soildata=soil, cropdata=crop)
crop['TSUM1'] = self.tsum1
crop['TSUM2'] = self.tsum2
soil.update(self.soil_attributes)
wofsim = Wofost71_WLP_FD(parvalues, self.wdp, agromanagement=amgt)
wofsim.run_till_terminate()
output = wofsim.get_output()
results_dict = {}
for a in output:
results_dict[a.pop('day').isoformat()] = a
self.simulation_dict = results_dict
return results_dict
@classmethod
def qry(cls, name=None, **kwargs):
"""Query for simulations"""
# qry = cls.query(**kwargs)
qry = model.Base.qry(model.Simulation, **kwargs)
if name:
qry = qry.filter(cls.name == name)
# else filter for private True and False
return qry
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (dt.datetime, dt.date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def json_timestamp(obj):
if isinstance(obj, (dt.datetime, dt.date)):
return int(time.mktime(obj.timetuple()))
raise TypeError("Type %s not serializable" % type(obj))
def fuzzydate_to_timestamp(obj):
return time.mktime(is_date(None, obj).timetuple())
| apache-2.0 | 3,805,909,013,444,385,000 | 34.186667 | 119 | 0.674119 | false |
gnmathur/aMAZEd | solution_grid.py | 1 | 3092 | """
Solution-Grid definition
MIT License
Copyright (c) 2017 Gaurav Mathur
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from grid import Grid
from distance import Distances
class SolutionGrid(Grid):
""" A distance grid is a specialized grid that is capable of computing
distances between cells.
"""
def __init__(self, nRows, nColumns):
super(SolutionGrid, self).__init__(nRows, nColumns)
self.distances = None
self.crumbs = None
def compute_distances(self, start):
""" This method computes the distance of each cell in the
grid from <start>
"""
self.distances = Distances(start)
frontier = [start]
while len(frontier) > 0:
new_frontier = []
for cell in frontier:
for linked_cell in cell.getLinks():
if self.distances[linked_cell] != None:
continue
self.distances[linked_cell] = self.distances[cell] + 1
new_frontier.append(linked_cell)
frontier = new_frontier
return self.distances
def solve(self, start, goal):
self.compute_distances(start)
current = goal
self.crumbs = Distances(start)
self.crumbs[current] = self.distances[current]
while current is not start:
for neighbor in current.getLinks():
if self.distances[neighbor] < self.distances[current]:
self.crumbs[neighbor] = self.distances[neighbor]
current = neighbor
def contents_of(self, cell):
""" This routine prints the contents of this cell. This overloaded
function defines the contents of this cell as the distance of this cell
from some defined root cell
"""
if self.crumbs[cell] is not None:
return str(self.crumbs[cell])
else:
return super(SolutionGrid, self).contents_of(cell)
if __name__ == "__main__":
"""
Unit tests
"""
pass
| mit | 1,898,911,336,600,446,000 | 35.376471 | 82 | 0.645213 | false |
mattrid93/ProjectEuler | probs/prob41.py | 1 | 1223 | """Problem 41: Pandigital primes"""
import unittest
from prob24 import next_perm
from utils.primes import is_prime
def is_n_pandigital(n):
"""Tests if n is 1-len(n) pandigital."""
if len(str(n)) > 9:
return False
if len(str(n)) != len(set(str(n))):
return False
m = len(str(n))
digits = list(range(1, m+1))
filtered = [d for d in str(n) if int(d) in digits]
return len(str(n)) == len(filtered)
def solution():
digits = [1, 2, 3, 4, 5, 6, 7]
best = 0
current = digits
while len(digits) < 10:
conc = int("".join([str(d) for d in current]))
if is_prime(conc):
best = conc
current = next_perm(current)
if not current:
digits.append(digits[-1] + 1)
current = digits
return best
class TestFunction(unittest.TestCase):
def test_tester(self):
self.assertFalse(is_n_pandigital(2))
self.assertFalse(is_n_pandigital(445))
self.assertFalse(is_n_pandigital(52439))
self.assertTrue(is_n_pandigital(1))
self.assertTrue(is_n_pandigital(21))
self.assertTrue(is_n_pandigital(52431))
if __name__ == "__main__":
print(solution())
unittest.main()
| mit | -6,400,612,861,880,300,000 | 27.44186 | 54 | 0.587081 | false |
leaprovenzano/kutils | kutils/metrics.py | 1 | 1376 | from keras import backend as K
def d_precision(y_true, y_pred):
'''this is basically precision metric from keras 1. but
I've attempted to make it differentiable
'''
true_positives = K.sum(K.clip(y_true * y_pred, 0, 1))
predicted_positives = K.sum(K.clip(y_pred, 0, 1))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def d_recall(y_true, y_pred):
'''this is basically reall metric from keras 1. but
I've attempted to make it differentiable.
'''
true_positives = K.sum(K.clip(y_true * y_pred, 0, 1))
possible_positives = K.sum(K.clip(y_true, 0, 1))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def d_fbeta_score(y_true, y_pred, beta=1):
"""this is basically fbeta from keras 1. but
I've attempted to make it differentiable.
"""
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = d_precision(y_true, y_pred)
r = d_recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def dice_coef(y_true, y_pred, smooth=.000001):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f, axis=-1)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
| mit | 163,005,164,945,517,700 | 30.272727 | 86 | 0.613372 | false |
eri-trabiccolo/exaile | plugins/daapclient/__init__.py | 1 | 25181 | # Copyright (C) 2006-2007 Aren Olson
# 2011 Brian Parma
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import traceback
import os
import gtk
import dbus
import dbus.exceptions
import logging
import time
import threading
import gobject
import xlgui
import pickle
from gettext import gettext as _
from xlgui.panel.collection import CollectionPanel
from xlgui import guiutil
from xlgui.widgets import dialogs, menu, menuitems
from daap import DAAPClient, DAAPError
from xl import (
collection,
event,
trax,
common,
providers,
settings,
xdg
)
logger = logging.getLogger(__name__)
gobject.threads_init()
_smi = menu.simple_menu_item
_sep = menu.simple_separator
#
# Check For python-avahi, we can work without
# avahi, but wont be able to discover shares.
#
try:
import avahi
AVAHI = True
except ImportError:
logger.warning('avahi not installed, can\'t auto-discover servers')
AVAHI = False
# detect authoriztion support in python-daap
try:
tmp = DAAPClient()
tmp.connect("spam","eggs","sausage") #dummy login
del tmp
except TypeError:
AUTH = False
except:
AUTH = True
# Globals Warming
MANAGER = None
class AttrDict(dict):
def __getattr__(self, name):
return self[name]
import functools
# helper function to parse avahi info into a list of tuples (for dict())
parse = functools.partial(zip,
['interface',
'protocol',
'name',
'type',
'domain',
'host',
'aprotocol',
'address',
'port',
'txt',
'flags'])
class DaapAvahiInterface(gobject.GObject): #derived from python-daap/examples
"""
Handles detection of DAAP shares via Avahi and manages the menu
showing the shares.
Fires a "connect" signal when a menu item is clicked.
"""
__gsignals__ = {
'connect' : ( gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
( gobject.TYPE_PYOBJECT, ) ) }
def new_service(self, interface, protocol, name, type, domain, flags):
"""
Called when a new share is found.
"""
x = self.server.ResolveService(interface, protocol, name, type, domain,
avahi.PROTO_UNSPEC, dbus.UInt32(0))
x = AttrDict(parse(x))
logger.info("DAAP share found: '{0}' at ({1},{2})."
.format(x.name, x.address, x.port))
# gstreamer can't handle link-local ipv6
if 'fe80' in x.address:
return
#Use all available info in key to avoid name conflicts.
nstr = '%s%s%s%s%s' % (interface, protocol, name, type, domain)
if nstr in self.services:
return
self.services[nstr] = x
self.rebuild_share_menu_items()
# self.new_share_menu_item(x)
def remove_service(self, interface, protocol, name, type, domain, flags):
"""
Called when the connection to a share is lost.
"""
logger.info("DAAP share lost: %s." % name)
nstr = '%s%s%s%s%s' % (interface, protocol, name, type, domain)
if nstr in self.services:
# self.remove_share_menu_item(name)
del self.services[nstr]
self.rebuild_share_menu_items()
def new_share_menu_item(self, name, key):
'''
This function is called to add a server to the connect menu.
'''
# check if the menu exist and check if it's ipv4 or we are allowing
# ipv6
print 'adding menu',name,key
if self.menu:
menu_item = _smi(name, ['sep'], name,
callback=lambda *x: self.clicked(key))
self.menu.add_item(menu_item)
def remove_share_menu_item(self, name):
'''
This function is called to remove a server from the connect menu.
'''
if self.menu:
for item in self.menu._items:
if item.name == name:
self.menu.remove_item(item)
break
def clear_share_menu_items(self):
'''
This function is used to clear all the menu items out of a menu.
'''
if self.menu:
for item in self.menu._items:
if item.name == 'manual' or item.name == 'sep':
continue
self.menu.remove_item(item)
def rebuild_share_menu_items(self):
'''
This function fills the menu with known servers.
'''
self.clear_share_menu_items()
show_ipv6 = settings.get_option('plugin/daapclient/ipv6', False)
items = {}
for key,x in self.services.items():
name = '{0} ({1})'.format(x.name,x.host)
if x.protocol == avahi.PROTO_INET6:
if not show_ipv6:
continue
name += ' - ipv6'
if name not in items:
items[name] = (key,x)
# this dedups based on name-host, replacing ipv4 with ipv6
# for key,x in self.services.items():
# name = '{0} ({1})'.format(x.name,x.host)
# if x.protocol == avahi.PROTO_INET6 and show_ipv6:
# if name in items:
# # prefer ipv6
# if items[name][1].protocol == avahi.PROTO_INET:
# items[name] = (key,x)
# elif x.protocol == avahi.PROTO_INET:
# if name not in items:
# items[name] = (key,x)
for name in items:
self.new_share_menu_item(name, key=items[name][0])
def clicked(self, key):
'''
This function is called in response to a menu_item click.
Fire away.
'''
x = self.services[key]
gobject.idle_add(self.emit, "connect", (x.name, x.address, x.port, x))
def __init__(self, exaile, _menu):
"""
Sets up the avahi listener.
"""
gobject.GObject.__init__(self)
self.services = {}
self.menu = _menu
self.bus = dbus.SystemBus()
self.server = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
self.stype = '_daap._tcp'
self.domain = 'local'
self.browser = dbus.Interface(self.bus.get_object(avahi.DBUS_NAME,
self.server.ServiceBrowserNew(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC,
self.stype, self.domain, dbus.UInt32(0))),
avahi.DBUS_INTERFACE_SERVICE_BROWSER)
self.browser.connect_to_signal('ItemNew', self.new_service)
self.browser.connect_to_signal('ItemRemove', self.remove_service)
class DaapHistory(common.LimitedCache):
def __init__(self, limit=5, location=None, menu=None, callback=None):
common.LimitedCache.__init__(self, limit)
if location is None:
location = os.path.join(xdg.get_cache_dir(), 'daaphistory.dat')
self.location = location
self.menu = menu
self.callback = callback
self.load()
def __setitem__(self, item, value):
common.LimitedCache.__setitem__(self, item, value)
# add new menu item
if self.menu is not None and self.callback is not None:
menu_item = _smi('hist'+item, ['sep'], item,
callback=lambda *x: self.callback(None, value+(None,)))
self.menu.add_item(menu_item)
def load(self):
with open(self.location, 'rb') as f:
try:
d = pickle.load(f)
self.update(d)
except (IOError, EOFError):
# no file
pass
def save(self):
with open(self.location, 'wb') as f:
pickle.dump(self.cache, f, common.PICKLE_PROTOCOL)
class DaapManager:
'''
DaapManager is a class that manages DaapConnections, both manual
and avahi-generated.
'''
def __init__(self, exaile, _menu, avahi):
'''
Init! Create manual menu item, and connect to avahi signal.
'''
self.exaile = exaile
self.avahi = avahi
self.panels = {}
hmenu = menu.Menu(None)
def hmfactory(menu, parent, context):
item = gtk.MenuItem(_('History'))
item.set_submenu(hmenu)
sens = settings.get_option('plugin/daapclient/history', True)
item.set_sensitive(sens)
return item
_menu.add_item(_smi('manual', [], _('Manually...'),
callback=self.manual_connect))
_menu.add_item(menu.MenuItem('history', hmfactory, ['manual']))
_menu.add_item(_sep('sep', ['history']))
if avahi is not None:
avahi.connect("connect", self.connect_share)
self.history = DaapHistory(5, menu=hmenu, callback=self.connect_share)
def connect_share(self, obj, (name, address, port, svc)):
'''
This function is called when a user wants to connec to
a DAAP share. It creates a new panel for the share, and
requests a track list.
'''
conn = DaapConnection(name, address, port)
conn.connect()
library = DaapLibrary(conn)
panel = NetworkPanel(self.exaile.gui.main.window, library, self)
# cst = CollectionScanThread(None, panel.net_collection, panel)
# cst.start()
panel.refresh() # threaded
providers.register('main-panel', panel)
self.panels[name] = panel
# history
if settings.get_option('plugin/daapclient/history', True):
self.history[name] = (name, address, port)
self.history.save()
def disconnect_share(self, name):
'''
This function is called to disconnect a previously connected
share. It calls the DAAP disconnect, and removes the panel.
'''
panel = self.panels[name]
# panel.library.daap_share.disconnect()
panel.daap_share.disconnect()
# panel.net_collection.remove_library(panel.library)
providers.unregister('main-panel', panel)
del self.panels[name]
def manual_connect(self, *args):
'''
This function is called when the user selects the manual
connection option from the menu. It requests a host/ip to connect
to.
'''
dialog = dialogs.TextEntryDialog(
_("Enter IP address and port for share"),
_("Enter IP address and port."))
resp = dialog.run()
if resp == gtk.RESPONSE_OK:
loc = dialog.get_value().strip()
host = loc
# the port will be anything after the last :
p = host.rfind(":")
# ipv6 literals should have a closing brace before the port
b = host.rfind("]")
if p > b:
try:
port = int(host[p+1:])
host = host[:p]
except ValueError:
logger.error('non-numeric port specified')
return
else:
port = 3689 # if no port specified, use default DAAP port
# if it's an ipv6 host with brackets, strip them
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
nstr = 'custom%s%s' % (host, port)
conn = DaapConnection(loc, host, port)
self.connect_share(None, (loc, host, port, None))
def refresh_share(self, name):
panel = self.panels[name]
rev = panel.daap_share.session.revision
# check for changes
panel.daap_share.session.update()
logger.debug('DAAP Server %s returned revision %d ( old: %d ) after'
+' update request'
% (name, panel.daap_share.session.revision, rev))
# if changes, refresh
if rev != panel.daap_share.session.revision:
logger.info('DAAP Server %s changed, refreshing... (revision %d)'
% (name, panel.daap_share.session.revision))
panel.refresh()
def close(self, remove=False):
'''
This function disconnects active DaapConnections, and optionally
removes the panels from the UI.
'''
# disconnect active shares
for panel in self.panels.values():
panel.daap_share.disconnect()
# there's no point in doing this if we're just shutting down, only on
# disable
if remove:
providers.unregister('main-panel', panel)
class DaapConnection(object):
"""
A connection to a DAAP share.
"""
def __init__(self, name, server, port):
# if it's an ipv6 address
if ':' in server and server[0] != '[':
server = '['+server+']'
self.all = []
self.session = None
self.connected = False
self.tracks = None
self.server = server
self.port = port
self.name = name
self.auth = False
self.password = None
def connect(self, password = None):
"""
Connect, login, and retrieve the track list.
"""
try:
client = DAAPClient()
if AUTH and password:
client.connect(self.server, self.port, password)
else:
client.connect(self.server, self.port)
self.session = client.login()
self.connected = True
# except DAAPError:
except Exception, inst:
logger.warning('failed to connect to ({0},{1})'.format(
self.server, self.port))
logger.debug(traceback.format_exc())
self.auth = True
self.connected = False
raise
def disconnect(self):
"""
Disconnect, clean up.
"""
try:
self.session.logout()
except:
pass
self.session = None
self.tracks = None
self.database = None
self.all = []
self.connected = False
def reload(self):
"""
Reload the tracks from the server
"""
self.tracks = None
self.database = None
self.all = []
self.get_database()
t = time.time()
self.convert_list()
logger.debug('{0} tracks loaded in {1}s'.format(len(self.all),
time.time()-t))
def get_database(self):
"""
Get a DAAP database and its track list.
"""
if self.session:
self.database = self.session.library()
self.get_tracks(1)
def get_tracks(self, reset = False):
"""
Get the track list from a DAAP database
"""
if reset or self.tracks == None:
if self.database is None:
self.database = self.session.library()
self.tracks = self.database.tracks()
return self.tracks
def convert_list(self):
"""
Converts the DAAP track database into Exaile Tracks.
"""
# Convert DAAPTrack's attributes to Tracks.
eqiv = {'title':'minm','artist':'asar','album':'asal','tracknumber':'astn',}
# 'genre':'asgn','enc':'asfm','bitrate':'asbr'}
for tr in self.tracks:
if tr is not None:
#http://<server>:<port>/databases/<dbid>/items/<id>.<type>?session-id=<sessionid>
uri = "http://%s:%s/databases/%s/items/%s.%s?session-id=%s" % \
(self.server, self.port, self.database.id, tr.id,
tr.type, self.session.sessionid)
# Don't scan tracks because gio is slow!
temp = trax.Track(uri, scan=False)
for field in eqiv.keys():
try:
tag = u'%s'%tr.atom.getAtom(eqiv[field])
if tag != 'None':
temp.set_tag_raw(field, [tag], notify_changed=False)
except:
if field is 'tracknumber':
temp.set_tag_raw('tracknumber', [0], notify_changed=False)
logger.debug(traceback.format_exc())
#TODO: convert year (asyr) here as well, what's the formula?
try:
temp.set_tag_raw("__length", tr.atom.getAtom('astm') / 1000,
notify_changed=False)
except:
temp.set_tag_raw("__length", 0, notify_changed=False)
self.all.append(temp)
@common.threaded
def get_track(self, track_id, filename):
"""
Save the track with track_id to filename
"""
for t in self.tracks:
if t.id == track_id:
try:
t.save(filename)
except CannotSendRequest:
dialog = gtk.MessageDialog(APP.window,
gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_OK,
_("""This server does not support multiple connections.
You must stop playback before downloading songs."""))
class DaapLibrary(collection.Library):
'''
Library subclass for better management of collection??
Or something to do with devices or somesuch. Ask Aren.
'''
def __init__(self, daap_share, col=None):
# location = "http://%s:%s/databasese/%s/items/" % (daap_share.server, daap_share.port, daap_share.database.id)
# Libraries need locations...
location = "http://%s:%s/" % (daap_share.server, daap_share.port)
collection.Library.__init__(self, location)
self.daap_share = daap_share
#self.collection = col
def rescan(self, notify_interval=None):
'''
Called when a library needs to refresh it's track list.
'''
if self.collection is None:
return True
if self.scanning:
return
t = time.time()
logger.info('Scanning library: %s' % self.daap_share.name)
self.scanning = True
db = self.collection
# DAAP gives us all the tracks in one dump
self.daap_share.reload()
if self.daap_share.all:
count = len(self.daap_share.all)
else:
count = 0
if count > 0:
logger.info('Adding %d tracks from %s. (%f s)' % (count,
self.daap_share.name, time.time()-t))
self.collection.add_tracks(self.daap_share.all)
if notify_interval is not None:
event.log_event('tracks_scanned', self, count)
# track removal?
self.scanning = False
#return True
# Needed to be overriden for who knows why (exceptions)
def _count_files(self):
count = 0
if self.daap_share:
count = len(self.daap_share.all)
return count
class NetworkPanel(CollectionPanel):
"""
A panel that displays a collection of tracks from the DAAP share.
"""
def __init__(self, parent, library, mgr):
"""
Expects a parent gtk.Window, and a daap connection.
"""
self.name = library.daap_share.name
self.daap_share = library.daap_share
self.net_collection = collection.Collection(self.name)
self.net_collection.add_library(library)
CollectionPanel.__init__(self, parent, self.net_collection,
self.name, _show_collection_empty_message=False)
self.all = []
self.label = self.name
self.connect_id = None
self.menu = menu.Menu(self)
def get_tracks_func(*args):
return self.tree.get_selected_tracks()
self.menu.add_item(menuitems.AppendMenuItem('append', [],
get_tracks_func))
self.menu.add_item(menuitems.EnqueueMenuItem('enqueue', ['append'],
get_tracks_func))
self.menu.add_item(menuitems.PropertiesMenuItem('props', ['enqueue'],
get_tracks_func))
self.menu.add_item(_sep('sep',['props']))
self.menu.add_item(_smi('refresh', ['sep'], _('Refresh Server List'),
callback = lambda *x: mgr.refresh_share(self.name)))
self.menu.add_item(_smi('disconnect', ['refresh'],
_('Disconnect from Server'),
callback = lambda *x: mgr.disconnect_share(self.name)))
@common.threaded
def refresh(self):
'''
This is called to refresh the track list.
'''
# Since we don't use a ProgressManager/Thingy, we have to call these w/out
# a ScanThread
self.net_collection.rescan_libraries()
gobject.idle_add(self._refresh_tags_in_tree)
def save_selected(self, widget=None, event=None):
"""
Save the selected tracks to disk.
"""
items = self.get_selected_items()
dialog = gtk.FileChooserDialog(_("Select a Location for Saving"),
APP.window, gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
(gtk.STOCK_OPEN, gtk.RESPONSE_OK,
gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))
dialog.set_current_folder(APP.get_last_dir())
dialog.set_select_multiple(False)
result = dialog.run()
dialog.hide()
if result == gtk.RESPONSE_OK:
folder = dialog.get_current_folder()
self.save_items(items, folder)
@common.threaded
def save_items(self, items, folder):
for i in items:
tnum = i.get_track()
if tnum < 10: tnum = "0%s"%tnum
else: tnum = str(tnum)
filename = "%s%s%s - %s.%s"%(folder, os.sep, tnum,
i.get_title(), i.type)
i.connection.get_track(i.daapid, filename)
# print "DAAP: saving track %s to %s."%(i.daapid, filename)
def enable(exaile):
'''
Plugin Enabled.
'''
if exaile.loading:
event.add_callback(__enb, 'gui_loaded')
else:
__enb(None, exaile, None)
def __enb(eventname, exaile, wat):
gobject.idle_add(_enable, exaile)
def _enable(exaile):
global MANAGER
event.add_callback(on_settings_change, 'plugin_daapclient_option_set')
menu_ = menu.Menu(None)
providers.register('menubar-tools-menu', _sep('plugin-sep', ['track-properties']))
item = _smi('daap', ['plugin-sep'], _('Connect to DAAP...'),
submenu=menu_)
providers.register('menubar-tools-menu', item)
if AVAHI:
try:
avahi_interface = DaapAvahiInterface(exaile, menu_)
except RuntimeError: # no dbus?
avahi_interface = None
logger.warning('avahi interface could not be initialized (no dbus?)')
except dbus.exceptions.DBusException, s:
avahi_interface = None
logger.error('Got DBUS error: %s' % s)
logger.error('is avahi-daemon running?')
else:
avahi_interface = None
logger.warn('AVAHI could not be imported, you will not see broadcast shares.')
MANAGER = DaapManager(exaile, menu_, avahi_interface)
def teardown(exaile):
'''
Exaile Shutdown.
'''
if MANAGER is not None:
MANAGER.close()
def disable(exaile):
'''
Plugin Disabled.
'''
# disconnect from active shares
if MANAGER is not None:
# MANAGER.clear()
MANAGER.close(True)
for item in providers.get('menubar-tools-menu'):
if item.name == 'daap':
providers.unregister('menubar-tools-menu', item)
break
event.remove_callback(__enb, 'gui_loaded')
# settings stuff
import daapclientprefs
def get_preferences_pane():
return daapclientprefs
def on_settings_change(event, setting, option):
if option == 'plugin/daapclient/ipv6' and MANAGER is not None:
MANAGER.avahi.rebuild_share_menu_items()
# vi: et ts=4 sts=4 sw=4
| gpl-2.0 | -3,843,772,557,405,208,000 | 31.533592 | 118 | 0.547079 | false |
MRN-Code/pl2mind | models/nice_mlp.py | 1 | 1883 | """
Module for classes to simplify MLPs for NICE training.
"""
import pylearn2
import pylearn2.models
import nice
import nice.pylearn2.models.mlp
from pylearn2.models.mlp import MLP
from pylearn2.models.mlp import Linear
from pylearn2.models.mlp import RectifiedLinear
from nice.pylearn2.models.mlp import CouplingLayer
from nice.pylearn2.models.mlp import Homothety
from nice.pylearn2.models.mlp import SigmaScaling
from nice.pylearn2.models.mlp import TriangularMLP
class Simple_MLP(MLP):
def __init__(self, layer_name, depth, half_vis, nhid, irange=0.01):
layers = []
for i, d in enumerate(xrange(depth)):
layer = RectifiedLinear(dim=nhid,
layer_name="%s_h%d" % (layer_name, i),
irange=irange)
layers.append(layer)
layer = Linear(dim=half_vis,
layer_name="%s_out" % layer_name,
irange=irange)
layers.append(layer)
super(Simple_MLP, self).__init__(layers, layer_name=layer_name)
class Simple_TriangularMLP(TriangularMLP):
def __init__(self, layer_name, layer_depths, nvis, nhid, top_layer=None):
layers = []
for i, depth in enumerate(layer_depths):
layer = CouplingLayer(split=nvis // 2,
coupling=Simple_MLP("coupling_%d" % (i + 1),
depth,
nvis // 2,
nhid))
layers.append(layer)
if top_layer is None:
layer = Homothety(layer_name="z")
layers.append(layer)
else:
layer = top_layer
layers.append(layer)
super(Simple_TriangularMLP, self).__init__(layers, layer_name=layer_name)
| gpl-2.0 | 9,018,267,773,866,419,000 | 35.211538 | 81 | 0.553372 | false |
mouradmourafiq/django-subways | subways/views.py | 1 | 2167 | # -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.template import RequestContext, Context, loader
from subways.models import Map, Line, Stop
from subways.utilis import ride_path, longest_ride_path
def map(request, map_name, template_name=None):
""" view a map """
map = Map.objects.get(name=map_name)
lines = Line.objects.filter(map=map)
stops = Stop.objects.all().values_list('name', flat=True)
c = RequestContext(request, {'map': map,
'lines': lines,
'stops': stops
})
return render_to_response(template_name, c)
def longest_ride(request, map_name, template_name=None):
""""Return the longest possible (in terms of stops)
ride between any two stops in the system."""
map = Map.objects.get(name=map_name)
lines = Line.objects.filter(map=map)
stops = Stop.objects.all()
path_stops = longest_ride_path(stops)
stops = stops.values_list('name', flat=True)
c = RequestContext(request, {'map': map,
'lines': lines,
'stops': stops,
'path_stops': path_stops
})
return render_to_response(template_name, c)
def ride(request, map_name, here='mit', there='government', template_name=None):
""""Return the longest possible
ride between any two stops in the system."""
map = Map.objects.get(name=map_name)
lines = Line.objects.filter(map=map)
here_stop = Stop.objects.get(name=here)
there_stop = Stop.objects.get(name=there)
path_stops = ride_path(here_stop, there_stop)
stops = Stop.objects.all().values_list('name', flat=True)
c = RequestContext(request, {'map': map,
'lines': lines,
'stops': stops,
'path_stops': path_stops,
})
return render_to_response(template_name, c)
| bsd-2-clause | 7,201,897,100,008,982,000 | 45.106383 | 91 | 0.53761 | false |
asfin/electrum | electrum/constants.py | 1 | 3987 | # -*- coding: utf-8 -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
def read_json(filename, default):
path = os.path.join(os.path.dirname(__file__), filename)
try:
with open(path, 'r') as f:
r = json.loads(f.read())
except:
r = default
return r
class BitcoinMainnet:
TESTNET = False
WIF_PREFIX = 0x80
ADDRTYPE_P2PKH = 0
ADDRTYPE_P2SH = 5
SEGWIT_HRP = "bc"
GENESIS = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"
DEFAULT_PORTS = {'t': '50001', 's': '50002'}
DEFAULT_SERVERS = read_json('servers.json', {})
CHECKPOINTS = read_json('checkpoints.json', [])
XPRV_HEADERS = {
'standard': 0x0488ade4, # xprv
'p2wpkh-p2sh': 0x049d7878, # yprv
'p2wsh-p2sh': 0x0295b005, # Yprv
'p2wpkh': 0x04b2430c, # zprv
'p2wsh': 0x02aa7a99, # Zprv
}
XPUB_HEADERS = {
'standard': 0x0488b21e, # xpub
'p2wpkh-p2sh': 0x049d7cb2, # ypub
'p2wsh-p2sh': 0x0295b43f, # Ypub
'p2wpkh': 0x04b24746, # zpub
'p2wsh': 0x02aa7ed3, # Zpub
}
BIP44_COIN_TYPE = 0
class BitcoinTestnet:
TESTNET = True
WIF_PREFIX = 0xef
ADDRTYPE_P2PKH = 111
ADDRTYPE_P2SH = 196
SEGWIT_HRP = "tb"
GENESIS = "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943"
DEFAULT_PORTS = {'t': '51001', 's': '51002'}
DEFAULT_SERVERS = read_json('servers_testnet.json', {})
CHECKPOINTS = read_json('checkpoints_testnet.json', [])
XPRV_HEADERS = {
'standard': 0x04358394, # tprv
'p2wpkh-p2sh': 0x044a4e28, # uprv
'p2wsh-p2sh': 0x024285b5, # Uprv
'p2wpkh': 0x045f18bc, # vprv
'p2wsh': 0x02575048, # Vprv
}
XPUB_HEADERS = {
'standard': 0x043587cf, # tpub
'p2wpkh-p2sh': 0x044a5262, # upub
'p2wsh-p2sh': 0x024289ef, # Upub
'p2wpkh': 0x045f1cf6, # vpub
'p2wsh': 0x02575483, # Vpub
}
BIP44_COIN_TYPE = 1
class BitcoinRegtest(BitcoinTestnet):
SEGWIT_HRP = "bcrt"
GENESIS = "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
class BitcoinSimnet(BitcoinTestnet):
SEGWIT_HRP = "sb"
GENESIS = "683e86bd5c6d110d91b94b97137ba6bfe02dbbdb8e3dff722a669b5d69d77af6"
DEFAULT_SERVERS = read_json('servers_regtest.json', {})
CHECKPOINTS = []
# don't import net directly, import the module instead (so that net is singleton)
net = BitcoinMainnet
def set_simnet():
global net
net = BitcoinSimnet
def set_mainnet():
global net
net = BitcoinMainnet
def set_testnet():
global net
net = BitcoinTestnet
def set_regtest():
global net
net = BitcoinRegtest
| mit | 8,130,624,915,382,426,000 | 28.977444 | 81 | 0.656383 | false |
joyhchen/zulip | zerver/views/messages.py | 1 | 44785 | from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.utils.timezone import now
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection
from django.db.models import Q
from django.http import HttpRequest, HttpResponse
from six import text_type
from typing import Any, AnyStr, Callable, Iterable, Optional, Tuple, Union
from zerver.lib.str_utils import force_bytes, force_text
from zerver.decorator import authenticated_api_view, authenticated_json_post_view, \
has_request_variables, REQ, JsonableError, \
to_non_negative_int
from django.utils.html import escape as escape_html
from zerver.lib import bugdown
from zerver.lib.actions import recipient_for_emails, do_update_message_flags, \
compute_mit_user_fullname, compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients, truncate_body, render_incoming_message
from zerver.lib.queue import queue_json_publish
from zerver.lib.cache import (
generic_bulk_cached_fetch,
to_dict_cache_key_id,
)
from zerver.lib.message import (
access_message,
MessageDict,
extract_message_dict,
render_markdown,
stringify_message_dict,
)
from zerver.lib.response import json_success, json_error
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool
from zerver.models import Message, UserProfile, Stream, Subscription, \
Realm, RealmAlias, Recipient, UserMessage, bulk_get_recipients, get_recipient, \
get_user_profile_by_email, get_stream, \
parse_usermessage_flags, \
email_to_domain, get_realm, get_active_streams, \
bulk_get_streams, get_user_profile_by_id
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias, Selectable, Select, ColumnElement
import re
import ujson
import datetime
from six.moves import map
import six
class BadNarrowOperator(JsonableError):
def __init__(self, desc, status_code=400):
# type: (str, int) -> None
self.desc = desc
self.status_code = status_code
def to_json_error_msg(self):
# type: () -> str
return _('Invalid narrow operator: {}').format(self.desc)
Query = Any # TODO: Should be Select, but sqlalchemy stubs are busted
ConditionTransform = Any # TODO: should be Callable[[ColumnElement], ColumnElement], but sqlalchemy stubs are busted
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder(object):
def __init__(self, user_profile, msg_id_column):
# type: (UserProfile, str) -> None
self.user_profile = user_profile
self.msg_id_column = msg_id_column
def add_term(self, query, term):
# type: (Query, Dict[str, Any]) -> Query
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.replace('-', '_')
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if operand == 'private':
query = query.select_from(join(query.froms[0], "zerver_recipient",
column("recipient_id") ==
literal_column("zerver_recipient.id")))
cond = or_(column("type") == Recipient.PERSONAL,
column("type") == Recipient.HUDDLE)
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned' or operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern):
# type: (text_type) -> text_type
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, u'\u03bb' to u'\\\u03bb'. This function will correctly escape
them for postgres, u'\u03bb' to u'\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if c == '\000':
s[1] = '\\000'
elif ord(c) >= 128:
# convert the character to hex postgres regex will take
# \uXXXX
s[i] = '\\u{:0>4x}'.format(ord(c))
else:
s[i] = '\\' + c
return ''.join(s)
def by_stream(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
stream = get_stream(operand, self.user_profile.realm)
if stream is None:
raise BadNarrowOperator('unknown stream ' + operand)
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to "social" to also show messages to /^(un)*social(.d)*$/
# (unsocial, ununsocial, social.d, etc)
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
if m:
base_stream_name = m.group(1)
else:
base_stream_name = stream.name
matching_streams = get_active_streams(self.user_profile.realm).filter(
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
recipients_map = bulk_get_recipients(Recipient.STREAM, matching_stream_ids)
cond = column("recipient_id").in_([recipient.id for recipient in recipients_map.values()])
return query.where(maybe_negate(cond))
recipient = get_recipient(Recipient.STREAM, type_id=stream.id)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
def by_topic(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to topic "foo" to also show messages to /^foo(.d)*$/
# (foo, foo.d, foo.d.d, etc)
m = re.search(r'^(.*?)(?:\.d)*$', operand, re.IGNORECASE)
if m:
base_topic = m.group(1)
else:
base_topic = operand
# Additionally, MIT users expect the empty instance and
# instance "personal" to be the same.
if base_topic in ('', 'personal', '(instance "")'):
regex = r'^(|personal|\(instance ""\))(\.d)*$'
else:
regex = r'^%s(\.d)*$' % (self._pg_re_escape(base_topic),)
cond = column("subject").op("~*")(regex)
return query.where(maybe_negate(cond))
cond = func.upper(column("subject")) == func.upper(literal(operand))
return query.where(maybe_negate(cond))
def by_sender(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
try:
sender = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
cond = column("sender_id") == literal(sender.id)
return query.where(maybe_negate(cond))
def by_near(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
return query
def by_id(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
cond = self.msg_id_column == literal(operand)
return query.where(maybe_negate(cond))
def by_pm_with(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if ',' in operand:
# Huddle
try:
emails = [e.strip() for e in operand.split(',')]
recipient = recipient_for_emails(emails, False,
self.user_profile, self.user_profile)
except ValidationError:
raise BadNarrowOperator('unknown recipient ' + operand)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
else:
# Personal message
self_recipient = get_recipient(Recipient.PERSONAL, type_id=self.user_profile.id)
if operand == self.user_profile.email:
# Personals with self
cond = and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == self_recipient.id)
return query.where(maybe_negate(cond))
# Personals with other user; include both directions.
try:
narrow_profile = get_user_profile_by_email(operand)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
narrow_recipient = get_recipient(Recipient.PERSONAL, narrow_profile.id)
cond = or_(and_(column("sender_id") == narrow_profile.id,
column("recipient_id") == self_recipient.id),
and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == narrow_recipient.id))
return query.where(maybe_negate(cond))
def by_search(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
if settings.USING_PGROONGA:
return self._by_search_pgroonga(query, operand, maybe_negate)
else:
return self._by_search_tsearch(query, operand, maybe_negate)
def _by_search_pgroonga(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
match_positions_byte = func.pgroonga.match_positions_byte
query_extract_keywords = func.pgroonga.query_extract_keywords
keywords = query_extract_keywords(operand)
query = query.column(match_positions_byte(column("rendered_content"),
keywords).label("content_matches"))
query = query.column(match_positions_byte(column("subject"),
keywords).label("subject_matches"))
condition = column("search_pgroonga").op("@@")(operand)
return query.where(maybe_negate(condition))
def _by_search_tsearch(self, query, operand, maybe_negate):
# type: (Query, str, ConditionTransform) -> Query
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
ts_locs_array = func.ts_match_locs_array
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
column("rendered_content"),
tsquery).label("content_matches"))
# We HTML-escape the subject in Postgres to avoid doing a server round-trip
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
func.escape_html(column("subject")),
tsquery).label("subject_matches"))
# Do quoted string matching. We really want phrase
# search here so we can ignore punctuation and do
# stemming, but there isn't a standard phrase search
# mechanism in Postgres
for term in re.findall('"[^"]+"|\S+', operand):
if term[0] == '"' and term[-1] == '"':
term = term[1:-1]
term = '%' + connection.ops.prep_for_like_query(term) + '%'
cond = or_(column("content").ilike(term),
column("subject").ilike(term))
query = query.where(maybe_negate(cond))
cond = column("search_tsvector").op("@@")(tsquery)
return query.where(maybe_negate(cond))
# Apparently, the offsets we get from tsearch_extras are counted in
# unicode characters, not in bytes, so we do our processing with text,
# not bytes.
def highlight_string_text_offsets(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
string = force_text(text)
highlight_start = u'<span class="highlight">'
highlight_stop = u'</span>'
pos = 0
result = u''
for loc in locs:
(offset, length) = loc
result += string[pos:offset]
result += highlight_start
result += string[offset:offset + length]
result += highlight_stop
pos = offset + length
result += string[pos:]
return result
def highlight_string_bytes_offsets(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
string = force_bytes(text)
highlight_start = b'<span class="highlight">'
highlight_stop = b'</span>'
pos = 0
result = b''
for loc in locs:
(offset, length) = loc
result += string[pos:offset]
result += highlight_start
result += string[offset:offset + length]
result += highlight_stop
pos = offset + length
result += string[pos:]
return force_text(result)
def highlight_string(text, locs):
# type: (AnyStr, Iterable[Tuple[int, int]]) -> text_type
if settings.USING_PGROONGA:
return highlight_string_bytes_offsets(text, locs)
else:
return highlight_string_text_offsets(text, locs)
def get_search_fields(rendered_content, subject, content_matches, subject_matches):
# type: (text_type, text_type, Iterable[Tuple[int, int]], Iterable[Tuple[int, int]]) -> Dict[str, text_type]
return dict(match_content=highlight_string(rendered_content, content_matches),
match_subject=highlight_string(escape_html(subject), subject_matches))
def narrow_parameter(json):
# type: (str) -> List[Dict[str, Any]]
# FIXME: A hack to support old mobile clients
if json == '{}':
return None
data = ujson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
def convert_term(elem):
# type: (Union[Dict, List]) -> Dict[str, Any]
# We have to support a legacy tuple format.
if isinstance(elem, list):
if (len(elem) != 2
or any(not isinstance(x, str) and not isinstance(x, six.text_type)
for x in elem)):
raise ValueError("element is not a string pair")
return dict(operator=elem[0], operand=elem[1])
if isinstance(elem, dict):
validator = check_dict([
('operator', check_string),
('operand', check_string),
])
error = validator('elem', elem)
if error:
raise JsonableError(error)
# whitelist the fields we care about for now
return dict(
operator=elem['operator'],
operand=elem['operand'],
negated=elem.get('negated', False),
)
raise ValueError("element is not a dictionary")
return list(map(convert_term, data))
def is_public_stream(stream_name, realm):
# type: (text_type, Realm) -> bool
"""
Determine whether a stream is public, so that
our caller can decide whether we can get
historical messages for a narrowing search.
Because of the way our search is currently structured,
we may be passed an invalid stream here. We return
False in that situation, and subsequent code will do
validation and raise the appropriate JsonableError.
"""
stream = get_stream(stream_name, realm)
if stream is None:
return False
return stream.is_public()
def ok_to_include_history(narrow, realm):
# type: (Iterable[Dict[str, Any]], Realm) -> bool
# There are occasions where we need to find Message rows that
# have no corresponding UserMessage row, because the user is
# reading a public stream that might include messages that
# were sent while the user was not subscribed, but which they are
# allowed to see. We have to be very careful about constructing
# queries in those situations, so this function should return True
# only if we are 100% sure that we're gonna add a clause to the
# query that narrows to a particular public stream on the user's realm.
# If we screw this up, then we can get into a nasty situation of
# polluting our narrow results with messages from other realms.
include_history = False
if narrow is not None:
for term in narrow:
if term['operator'] == "stream" and not term.get('negated', False):
if is_public_stream(term['operand'], realm):
include_history = True
# Disable historical messages if the user is narrowing on anything
# that's a property on the UserMessage table. There cannot be
# historical messages in these cases anyway.
for term in narrow:
if term['operator'] == "is":
include_history = False
return include_history
def get_stream_name_from_narrow(narrow):
# type: (Iterable[Dict[str, Any]]) -> Optional[text_type]
for term in narrow:
if term['operator'] == 'stream':
return term['operand'].lower()
return None
def exclude_muting_conditions(user_profile, narrow):
# type: (UserProfile, Iterable[Dict[str, Any]]) -> List[Selectable]
conditions = []
stream_name = get_stream_name_from_narrow(narrow)
if stream_name is None:
rows = Subscription.objects.filter(
user_profile=user_profile,
active=True,
in_home_view=False,
recipient__type=Recipient.STREAM
).values('recipient_id')
muted_recipient_ids = [row['recipient_id'] for row in rows]
condition = not_(column("recipient_id").in_(muted_recipient_ids))
conditions.append(condition)
muted_topics = ujson.loads(user_profile.muted_topics)
if muted_topics:
if stream_name is not None:
muted_topics = [m for m in muted_topics if m[0].lower() == stream_name]
if not muted_topics:
return conditions
muted_streams = bulk_get_streams(user_profile.realm,
[muted[0] for muted in muted_topics])
muted_recipients = bulk_get_recipients(Recipient.STREAM,
[stream.id for stream in six.itervalues(muted_streams)])
recipient_map = dict((s.name.lower(), muted_recipients[s.id].id)
for s in six.itervalues(muted_streams))
muted_topics = [m for m in muted_topics if m[0].lower() in recipient_map]
if muted_topics:
def mute_cond(muted):
# type: (Tuple[str, str]) -> Selectable
stream_cond = column("recipient_id") == recipient_map[muted[0].lower()]
topic_cond = func.upper(column("subject")) == func.upper(muted[1])
return and_(stream_cond, topic_cond)
condition = not_(or_(*list(map(mute_cond, muted_topics))))
return conditions + [condition]
return conditions
@has_request_variables
def get_old_messages_backend(request, user_profile,
anchor = REQ(converter=int),
num_before = REQ(converter=to_non_negative_int),
num_after = REQ(converter=to_non_negative_int),
narrow = REQ('narrow', converter=narrow_parameter, default=None),
use_first_unread_anchor = REQ(default=False, converter=ujson.loads),
apply_markdown=REQ(default=True,
converter=ujson.loads)):
# type: (HttpRequest, UserProfile, int, int, int, Optional[List[Dict[str, Any]]], bool, bool) -> HttpResponse
include_history = ok_to_include_history(narrow, user_profile.realm)
if include_history and not use_first_unread_anchor:
query = select([column("id").label("message_id")], None, "zerver_message")
inner_msg_id_col = literal_column("zerver_message.id")
elif narrow is None:
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
"zerver_usermessage")
inner_msg_id_col = column("message_id")
else:
# TODO: Don't do this join if we're not doing a search
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
inner_msg_id_col = column("message_id")
num_extra_messages = 1
is_search = False
if narrow is not None:
# Add some metadata to our logging data for narrows
verbose_operators = []
for term in narrow:
if term['operator'] == "is":
verbose_operators.append("is:" + term['operand'])
else:
verbose_operators.append(term['operator'])
request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)
# Build the query for the narrow
num_extra_messages = 0
builder = NarrowBuilder(user_profile, inner_msg_id_col)
for term in narrow:
if term['operator'] == 'search' and not is_search:
query = query.column("subject").column("rendered_content")
is_search = True
query = builder.add_term(query, term)
# We add 1 to the number of messages requested if no narrow was
# specified to ensure that the resulting list always contains the
# anchor message. If a narrow was specified, the anchor message
# might not match the narrow anyway.
if num_after != 0:
num_after += num_extra_messages
else:
num_before += num_extra_messages
sa_conn = get_sqlalchemy_connection()
if use_first_unread_anchor:
condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0
# We exclude messages on muted topics when finding the first unread
# message in this narrow
muting_conditions = exclude_muting_conditions(user_profile, narrow)
if muting_conditions:
condition = and_(condition, *muting_conditions)
first_unread_query = query.where(condition)
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
if len(first_unread_result) > 0:
anchor = first_unread_result[0][0]
else:
anchor = 10000000000000000
before_query = None
after_query = None
if num_before != 0:
before_anchor = anchor
if num_after != 0:
# Don't include the anchor in both the before query and the after query
before_anchor = anchor - 1
before_query = query.where(inner_msg_id_col <= before_anchor) \
.order_by(inner_msg_id_col.desc()).limit(num_before)
if num_after != 0:
after_query = query.where(inner_msg_id_col >= anchor) \
.order_by(inner_msg_id_col.asc()).limit(num_after)
if num_before == 0 and num_after == 0:
# This can happen when a narrow is specified.
after_query = query.where(inner_msg_id_col == anchor)
if before_query is not None:
if after_query is not None:
query = union_all(before_query.self_group(), after_query.self_group())
else:
query = before_query
else:
query = after_query
main_query = alias(query)
query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
# This is a hack to tag the query we use for testing
query = query.prefix_with("/* get_old_messages */")
query_result = list(sa_conn.execute(query).fetchall())
# The following is a little messy, but ensures that the code paths
# are similar regardless of the value of include_history. The
# 'user_messages' dictionary maps each message to the user's
# UserMessage object for that message, which we will attach to the
# rendered message dict before returning it. We attempt to
# bulk-fetch rendered message dicts from remote cache using the
# 'messages' list.
search_fields = dict() # type: Dict[int, Dict[str, text_type]]
message_ids = [] # type: List[int]
user_message_flags = {} # type: Dict[int, List[str]]
if include_history:
message_ids = [row[0] for row in query_result]
# TODO: This could be done with an outer join instead of two queries
user_message_flags = dict((user_message.message_id, user_message.flags_list()) for user_message in
UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids))
for row in query_result:
message_id = row[0]
if user_message_flags.get(message_id) is None:
user_message_flags[message_id] = ["read", "historical"]
if is_search:
(_, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
else:
for row in query_result:
message_id = row[0]
flags = row[1]
user_message_flags[message_id] = parse_usermessage_flags(flags)
message_ids.append(message_id)
if is_search:
(_, _, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
cache_transformer = lambda row: MessageDict.build_dict_from_raw_db_row(row, apply_markdown)
id_fetcher = lambda row: row['id']
message_dicts = generic_bulk_cached_fetch(lambda message_id: to_dict_cache_key_id(message_id, apply_markdown),
Message.get_raw_db_rows,
message_ids,
id_fetcher=id_fetcher,
cache_transformer=cache_transformer,
extractor=extract_message_dict,
setter=stringify_message_dict)
message_list = []
for message_id in message_ids:
msg_dict = message_dicts[message_id]
msg_dict.update({"flags": user_message_flags[message_id]})
msg_dict.update(search_fields.get(message_id, {}))
message_list.append(msg_dict)
statsd.incr('loaded_old_messages', len(message_list))
ret = {'messages': message_list,
"result": "success",
"msg": ""}
return json_success(ret)
@has_request_variables
def update_message_flags(request, user_profile,
messages=REQ(validator=check_list(check_int)),
operation=REQ('op'), flag=REQ(),
all=REQ(validator=check_bool, default=False),
stream_name=REQ(default=None),
topic_name=REQ(default=None)):
# type: (HttpRequest, UserProfile, List[int], text_type, text_type, bool, Optional[text_type], Optional[text_type]) -> HttpResponse
if all:
target_count_str = "all"
else:
target_count_str = str(len(messages))
log_data_str = "[%s %s/%s]" % (operation, flag, target_count_str)
request._log_data["extra"] = log_data_str
stream = None
if stream_name is not None:
stream = get_stream(stream_name, user_profile.realm)
if not stream:
raise JsonableError(_('No such stream \'%s\'') % (stream_name,))
if topic_name:
topic_exists = UserMessage.objects.filter(user_profile=user_profile,
message__recipient__type_id=stream.id,
message__recipient__type=Recipient.STREAM,
message__subject__iexact=topic_name).exists()
if not topic_exists:
raise JsonableError(_('No such topic \'%s\'') % (topic_name,))
count = do_update_message_flags(user_profile, operation, flag, messages,
all, stream, topic_name)
# If we succeed, update log data str with the actual count for how
# many messages were updated.
if count != len(messages):
log_data_str = "[%s %s/%s] actually %s" % (operation, flag, target_count_str, count)
request._log_data["extra"] = log_data_str
return json_success({'result': 'success',
'messages': messages,
'msg': ''})
def create_mirrored_message_users(request, user_profile, recipients):
# type: (HttpResponse, UserProfile, Iterable[text_type]) -> Tuple[bool, UserProfile]
if "sender" not in request.POST:
return (False, None)
sender_email = request.POST["sender"].strip().lower()
referenced_users = set([sender_email])
if request.POST['type'] == 'private':
for email in recipients:
referenced_users.add(email.lower())
if request.client.name == "zephyr_mirror":
user_check = same_realm_zephyr_user
fullname_function = compute_mit_user_fullname
elif request.client.name == "irc_mirror":
user_check = same_realm_irc_user
fullname_function = compute_irc_user_fullname
elif request.client.name in ("jabber_mirror", "JabberMirror"):
user_check = same_realm_jabber_user
fullname_function = compute_jabber_user_fullname
else:
# Unrecognized mirroring client
return (False, None)
for email in referenced_users:
# Check that all referenced users are in our realm:
if not user_check(user_profile, email):
return (False, None)
# Create users for the referenced users, if needed.
for email in referenced_users:
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
sender = get_user_profile_by_email(sender_email)
return (True, sender)
def same_realm_zephyr_user(user_profile, email):
# type: (UserProfile, text_type) -> bool
#
# Are the sender and recipient both addresses in the same Zephyr
# mirroring realm? We have to handle this specially, inferring
# the domain from the e-mail address, because the recipient may
# not existing in Zulip and we may need to make a stub Zephyr
# mirroring user on the fly.
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email)
return user_profile.realm.is_zephyr_mirror_realm and \
RealmAlias.objects.filter(realm=user_profile.realm, domain=domain).exists()
def same_realm_irc_user(user_profile, email):
# type: (UserProfile, text_type) -> bool
# Check whether the target email address is an IRC user in the
# same realm as user_profile, i.e. if the domain were example.com,
# the IRC user would need to be [email protected]
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email).replace("irc.", "")
return RealmAlias.objects.filter(realm=user_profile.realm, domain=domain).exists()
def same_realm_jabber_user(user_profile, email):
# type: (UserProfile, text_type) -> bool
try:
validators.validate_email(email)
except ValidationError:
return False
# If your Jabber users have a different email domain than the
# Zulip users, this is where you would do any translation.
domain = email_to_domain(email)
return RealmAlias.objects.filter(realm=user_profile.realm, domain=domain).exists()
# We do not @require_login for send_message_backend, since it is used
# both from the API and the web service. Code calling
# send_message_backend should either check the API key or check that
# the user is logged in.
@has_request_variables
def send_message_backend(request, user_profile,
message_type_name = REQ('type'),
message_to = REQ('to', converter=extract_recipients, default=[]),
forged = REQ(default=False),
subject_name = REQ('subject', lambda x: x.strip(), None),
message_content = REQ('content'),
domain = REQ('domain', default=None),
local_id = REQ(default=None),
queue_id = REQ(default=None)):
# type: (HttpRequest, UserProfile, text_type, List[text_type], bool, Optional[text_type], text_type, Optional[text_type], Optional[text_type], Optional[text_type]) -> HttpResponse
client = request.client
is_super_user = request.user.is_api_super_user
if forged and not is_super_user:
return json_error(_("User not authorized for this query"))
realm = None
if domain and domain != user_profile.realm.domain:
if not is_super_user:
# The email gateway bot needs to be able to send messages in
# any realm.
return json_error(_("User not authorized for this query"))
realm = get_realm(domain)
if not realm:
return json_error(_("Unknown domain %s") % (domain,))
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
# Here's how security works for mirroring:
#
# For private messages, the message must be (1) both sent and
# received exclusively by users in your realm, and (2)
# received by the forwarding user.
#
# For stream messages, the message must be (1) being forwarded
# by an API superuser for your realm and (2) being sent to a
# mirrored stream (any stream for the Zephyr and Jabber
# mirrors, but only streams with names starting with a "#" for
# IRC mirrors)
#
# The security checks are split between the below code
# (especially create_mirrored_message_users which checks the
# same-realm constraint) and recipient_for_emails (which
# checks that PMs are received by the forwarding user)
if "sender" not in request.POST:
return json_error(_("Missing sender"))
if message_type_name != "private" and not is_super_user:
return json_error(_("User not authorized for this query"))
(valid_input, mirror_sender) = \
create_mirrored_message_users(request, user_profile, message_to)
if not valid_input:
return json_error(_("Invalid mirrored message"))
if client.name == "zephyr_mirror" and not user_profile.realm.is_zephyr_mirror_realm:
return json_error(_("Invalid mirrored realm"))
if (client.name == "irc_mirror" and message_type_name != "private" and
not message_to[0].startswith("#")):
return json_error(_("IRC stream names must start with #"))
sender = mirror_sender
else:
sender = user_profile
ret = check_send_message(sender, client, message_type_name, message_to,
subject_name, message_content, forged=forged,
forged_timestamp = request.POST.get('time'),
forwarder_user_profile=user_profile, realm=realm,
local_id=local_id, sender_queue_id=queue_id)
return json_success({"id": ret})
@authenticated_json_post_view
def json_update_message(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return update_message_backend(request, user_profile)
@has_request_variables
def update_message_backend(request, user_profile,
message_id=REQ(converter=to_non_negative_int),
subject=REQ(default=None),
propagate_mode=REQ(default="change_one"),
content=REQ(default=None)):
# type: (HttpRequest, UserProfile, int, Optional[text_type], Optional[str], Optional[text_type]) -> HttpResponse
if not user_profile.realm.allow_message_editing:
return json_error(_("Your organization has turned off message editing."))
try:
message = Message.objects.select_related().get(id=message_id)
except Message.DoesNotExist:
raise JsonableError(_("Unknown message id"))
# You only have permission to edit a message if:
# 1. You sent it, OR:
# 2. This is a topic-only edit for a (no topic) message, OR:
# 3. This is a topic-only edit and you are an admin.
if message.sender == user_profile:
pass
elif (content is None) and ((message.topic_name() == "(no topic)") or
user_profile.is_realm_admin):
pass
else:
raise JsonableError(_("You don't have permission to edit this message"))
# If there is a change to the content, check that it hasn't been too long
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
# past the limit, and in case there are network issues, etc. The 15 comes
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
# you change this value also change those two parameters in message_edit.js.
edit_limit_buffer = 20
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
if (now() - message.pub_date) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has past"))
if subject is None and content is None:
return json_error(_("Nothing to change"))
if subject is not None:
subject = subject.strip()
if subject == "":
raise JsonableError(_("Topic can't be empty"))
rendered_content = None
links_for_embed = set() # type: Set[text_type]
if content is not None:
content = content.strip()
if content == "":
content = "(deleted)"
content = truncate_body(content)
# We exclude UserMessage.flags.historical rows since those
# users did not receive the message originally, and thus
# probably are not relevant for reprocessed alert_words,
# mentions and similar rendering features. This may be a
# decision we change in the future.
ums = UserMessage.objects.filter(message=message.id,
flags=~UserMessage.flags.historical)
message_users = {get_user_profile_by_id(um.user_profile_id) for um in ums}
# If rendering fails, the called code will raise a JsonableError.
rendered_content = render_incoming_message(message,
content=content,
message_users=message_users)
links_for_embed |= message.links_for_preview
do_update_message(user_profile, message, subject, propagate_mode, content, rendered_content)
if links_for_embed and getattr(settings, 'INLINE_URL_EMBED_PREVIEW', None):
event_data = {
'message_id': message.id,
'message_content': message.content,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data, lambda x: None)
return json_success()
@has_request_variables
def json_fetch_raw_message(request, user_profile,
message_id=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
(message, user_message) = access_message(user_profile, message_id)
return json_success({"raw_content": message.content})
@has_request_variables
def render_message_backend(request, user_profile, content=REQ()):
# type: (HttpRequest, UserProfile, text_type) -> HttpResponse
message = Message()
message.sender = user_profile
message.content = content
message.sending_client = request.client
rendered_content = render_markdown(message, content, domain=user_profile.realm.domain)
return json_success({"rendered": rendered_content})
@authenticated_json_post_view
def json_messages_in_narrow(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return messages_in_narrow_backend(request, user_profile)
@has_request_variables
def messages_in_narrow_backend(request, user_profile,
msg_ids = REQ(validator=check_list(check_int)),
narrow = REQ(converter=narrow_parameter)):
# type: (HttpRequest, UserProfile, List[int], List[Dict[str, Any]]) -> HttpResponse
# Note that this function will only work on messages the user
# actually received
# TODO: We assume that the narrow is a search. For now this works because
# the browser only ever calls this function for searches, since it can't
# apply that narrow operator itself.
query = select([column("message_id"), column("subject"), column("rendered_content")],
and_(column("user_profile_id") == literal(user_profile.id),
column("message_id").in_(msg_ids)),
join("zerver_usermessage", "zerver_message",
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
builder = NarrowBuilder(user_profile, column("message_id"))
for term in narrow:
query = builder.add_term(query, term)
sa_conn = get_sqlalchemy_connection()
query_result = list(sa_conn.execute(query).fetchall())
search_fields = dict()
for row in query_result:
(message_id, subject, rendered_content, content_matches, subject_matches) = row
search_fields[message_id] = get_search_fields(rendered_content, subject,
content_matches, subject_matches)
return json_success({"messages": search_fields})
| apache-2.0 | 8,876,619,067,596,775,000 | 43.919759 | 183 | 0.604488 | false |
ebrian/dispenserd | tests/test.py | 1 | 7171 | import unittest
import requests
import sys
import random
import json
class TestDispenserd(unittest.TestCase):
base_url = 'http://127.0.0.1:8282'
def test010_is_running(self):
res = requests.get(self.base_url + '/')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
def test020_queue_is_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['main']), 0)
def test030_queue_fills(self):
for i in range(0, 100):
res = requests.post(self.base_url + '/schedule', \
json={'priority': random.randint(0, 125), 'message': 'job #' + str(i)})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
self.assertEqual(json['code'], 0)
def test031_queue_not_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['main']), 100)
def test032_queue_properly_ordered(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
previous_priority = -1
previous_date = ''
for job in json['main']:
self.assertLessEqual(previous_priority, job['priority'])
if previous_priority == job['priority']:
self.assertLessEqual(previous_date, job['timestamp'])
previous_priority = job['priority']
previous_date = job['timestamp']
def test033_queue_drains(self):
for i in range(0, 100):
res = requests.post(self.base_url + '/receive_noblock')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['message'].startswith('job #'), True)
def test034_queue_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['main']), 0)
def test040_queues_fill(self):
for i in range(0, 30):
res = requests.post(self.base_url + '/schedule', \
json={'lane': 'lane1', 'priority': random.randint(0, 125), 'message': 'job #' + str(i)})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
self.assertEqual(json['code'], 0)
for i in range(0, 50):
res = requests.post(self.base_url + '/schedule', \
json={'lane': 'lane2', 'priority': random.randint(0, 125), 'message': 'job #' + str(i)})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
self.assertEqual(json['code'], 0)
for i in range(0, 70):
res = requests.post(self.base_url + '/schedule', \
json={'lane': 'lane3', 'priority': random.randint(0, 125), 'message': 'job #' + str(i)})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['status'], 'ok')
self.assertEqual(json['code'], 0)
def test041_queues_not_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['lane1']), 30)
self.assertEqual(len(json['lane2']), 50)
self.assertEqual(len(json['lane3']), 70)
def test042_queues_properly_ordered(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
previous_priority = -1
previous_date = ''
for job in json['lane1']:
self.assertLessEqual(previous_priority, job['priority'])
if previous_priority == job['priority']:
self.assertLessEqual(previous_date, job['timestamp'])
previous_priority = job['priority']
previous_date = job['timestamp']
res = requests.get(self.base_url + '/jobs')
json = res.json()
previous_priority = -1
previous_date = ''
for job in json['lane2']:
self.assertLessEqual(previous_priority, job['priority'])
if previous_priority == job['priority']:
self.assertLessEqual(previous_date, job['timestamp'])
previous_priority = job['priority']
previous_date = job['timestamp']
res = requests.get(self.base_url + '/jobs')
json = res.json()
previous_priority = -1
previous_date = ''
for job in json['lane3']:
self.assertLessEqual(previous_priority, job['priority'])
if previous_priority == job['priority']:
self.assertLessEqual(previous_date, job['timestamp'])
previous_priority = job['priority']
previous_date = job['timestamp']
def test043_queue1_drains(self):
for i in range(0, 30):
res = requests.post(self.base_url + '/receive_noblock', \
json={'lane': 'lane1'})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['message'].startswith('job #'), True)
def test044_queue1_empty_queue23_full(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['lane1']), 0)
self.assertEqual(len(json['lane2']), 50)
self.assertEqual(len(json['lane3']), 70)
def test045_queue2_drains(self):
for i in range(0, 50):
res = requests.post(self.base_url + '/receive_noblock', \
json={'lane': 'lane2'})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['message'].startswith('job #'), True)
def test046_queue12_empty_queue3_full(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['lane1']), 0)
self.assertEqual(len(json['lane2']), 0)
self.assertEqual(len(json['lane3']), 70)
def test047_queue3_drains(self):
for i in range(0, 70):
res = requests.post(self.base_url + '/receive_noblock', \
json={'lane': 'lane3'})
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(json['message'].startswith('job #'), True)
def test048_queue123_empty(self):
res = requests.get(self.base_url + '/jobs')
json = res.json()
self.assertEqual(res.status_code, 200)
self.assertEqual(len(json['lane1']), 0)
self.assertEqual(len(json['lane2']), 0)
self.assertEqual(len(json['lane3']), 0)
suite = unittest.TestLoader().loadTestsFromTestCase(TestDispenserd)
ret = unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(not ret)
| mit | 5,116,270,982,148,051,000 | 39.514124 | 104 | 0.569516 | false |
chop-dbhi/brptoolkit-demo-harvest | brp_demo/formatters.py | 1 | 36221 | from django.core.urlresolvers import reverse
from avocado.formatters import registry
from serrano.formatters import HTMLFormatter
from brp_demo.models import *
from django.template import Context
from django.template.loader import get_template
from django.conf import settings
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import logging
log = logging.getLogger(__name__)
class cBioLinkFormatter(HTMLFormatter):
def to_html(self, value, **context):
# http://reslnbrp_demobio01.research.chop.edu:8080/cbioportal/case.do?cancer_study_id=cranio_resnicklab_2013&case_id=7316_100
from .models import NautilusSubject, PortalSubject
sub = PortalSubject.objects.get(pk=value)
sdgs = sub.nautilussubject_set.all()
html = '<ul>'
for sdg in sdgs:
html += '<em>{0}</em>'.format(sdg.sample_subject_id)
if hasattr(sdg, 'cbiosample'):
html += '<li><a href="{0}case.do?cancer_study_id={1}&case_id={2}" target="_blank">View in cBio</a></li>'.format(settings.CBIO_HOST, sdg.cbiosample.cancer_study_identifier, sdg.cbiosample.stable_id)
else:
html += '<li>Not Available</li>'
return html
class SpecimenLocationFormatter(HTMLFormatter):
def to_html(self, values, **context):
from avocado.models import DataField
plate_locations = ['plate_order', 'plate_column', 'plate_row']
html_str = ""
for name in plate_locations:
if values[name] is not None:
data_field = DataField.objects.get_by_natural_key('brp_demo', 'specimen', name)
html_str += "<tr><td>{0}</td><td>{1}</td></tr>".format(data_field, values[name])
if html_str != "":
return "<table class='table table-striped table-condensed'>{0}</table>".format(html_str)
return ""
to_html.process_multiple = True
class PatientSummaryFormatter(HTMLFormatter):
def to_html(self, value, **context):
url = reverse('patient-detail', kwargs={'pk': value})
return '<a href="{0}">View Summary</a>'.format(url)
def to_csv(self, value, **context):
return ''
class PathologyReportFormatter(HTMLFormatter):
def to_html(self, value, **context):
from .models import NautilusSubject, PortalSubject
try:
sub = PortalSubject.objects.get(ehb_id=value)
sdgs = sub.nautilussubject_set.all()
except:
return '<em>Not Available</em>'
html = '<ul>'
for sdg in sdgs:
html += '<em>{0}</em>'.format(sdg.sample_subject_id)
if not sdg.pathreport_set.all():
html+= '<li><em>Not Available</em></li>'
for each in sdg.pathreport_set.all():
html += '<li><a href="{0}">Pathology Report</a></li>'.format(each.path_url)
html += '</ul>'
return html
def to_csv(self, value, **context):
from .models import NautilusSubject
try:
sub = NautilusSubject.objects.get(sample_subject_id=value)
except:
return ''
if not sub.pathreport_set.all():
return ''
csv_ = ''
for each in sub.pathreport_set.all():
csv_ += '{0},'.format(each.path_url)
csv_.rstrip(',')
return csv_
class OperativeReportFormatter(HTMLFormatter):
def to_html(self, value, **context):
from .models import NautilusSubject, PortalSubject
try:
sub = PortalSubject.objects.get(ehb_id=value)
sdgs = sub.nautilussubject_set.all()
except:
return '<em>Not Available</em>'
html = '<ul>'
for sdg in sdgs:
html += '<em>{0}</em>'.format(sdg.sample_subject_id)
if not sdg.operativereport_set.all():
html+= '<li><em>Not Available</em></li>'
for each in sdg.operativereport_set.all():
html += '<li><a href="{0}">Operative Report</a></li>'.format(each.op_url)
html += '</ul>'
return html
def to_csv(self, value, **context):
from .models import NautilusSubject
try:
sub = NautilusSubject.objects.get(sample_subject_id=value)
except:
return ''
if not sub.operativereport_set.all():
return ''
csv_ = ''
for each in sub.operativereport_set.all():
csv_ += '{0},'.format(each.op_url)
csv_.rstrip(',')
return csv_
class EnrollmentTypeFormatter(HTMLFormatter):
def to_html(self, value, **context):
from .models import PortalSubject
try:
sub = PortalSubject.objects.get(ehb_id=value)
diags = sub.diagnosis_set.all()
except:
return '<em>Not Available</em>'
count = 1
for diag in diags:
html = '<ul>'
if diag.diagnosis_type:
html += '<em>{0}</em>'.format(diag.diagnosis_type)
else:
html += '<em>Diagnosis {0}</em>'.format(count)
if not diag.enrollment_type:
html += '<li><em>Unknown</em></li>'
else:
html += '<li>{0}</li>'.format(diag.enrollment_type)
html += '</ul>'
count += 1
return html
def to_csv(self, value, **context):
try:
sub = PortalSubject.objects.get(ehb_id=value)
diags = sub.diagnosis_set.all()
except:
return ''
for diag in diags:
csv = ''
if diag.diagnosis_type:
csv += '{0} - '.format(diag.diagnosis_type)
if not diag.enrollment_type:
csv += 'Unknown,'
else:
csv += '{0},'.format(diag.enrollment_type)
return csv.rstrip(',')
class AltEnrollmentTypeFormatter(HTMLFormatter):
def to_html(self, value, **context):
from .models import PortalSubject
try:
sub = PortalSubject.objects.get(ehb_id=value)
diags = sub.diagnosis_set.all()
except:
return '<em>Not Available</em>'
for diag in diags:
html = '<ul>'
if diag.diagnosis_type:
html += '<em>{0}</em>'.format(diag.diagnosis_type)
if not diag.enrollment_type:
html += '<li><em>Unknown</em></li>'
else:
html += '<li>{0}</li>'.format(diag.enrollment_type)
html += '</ul>'
return html
def to_csv(self, value, **context):
try:
sub = PortalSubject.objects.get(ehb_id=value)
diags = sub.diagnosis_set.all()
except:
return ''
for diag in diags:
csv = ''
if diag.diagnosis_type:
csv += '{0} - '.format(diag.diagnosis_type)
if not diag.enrollment_type:
csv += 'Unknown,'
else:
csv += '{0},'.format(diag.enrollment_type)
return csv.rstrip(',')
class LinkAggFormatter(HTMLFormatter):
def to_html(self, values, **kwargs):
from .models import PathFolders, PortalSubject
sub = PortalSubject.objects.get(ehb_id=values['ehb_id'])
sdgs = sub.nautilussubject_set.all()
html = '<i class="icon-info-sign"></i>'
content = "Pathology slide images and scans are provided in .svs format which is viewable using Aperio ImageScope software. <br><br>Aperio ImageScope software can be downloaded <a target=\'_blank\' href=\'http://www.aperio.com/appcenter\'>here</a>"
popover = '<script>$(".icon-info-sign").popover({"html":true,"title":"File format info","content":"' + content + '"})</script>'
urls = ['<ul>']
for sdg in sdgs:
urls.append('<ul><em>{0}</em>'.format(sdg.sample_subject_id))
folders = PathFolders.objects.filter(sample_subject_id=sdg.sample_subject_id)
links = folders.values('description', 'folder_link')
for link in links:
urls.append('<li><a href="{folder_link}">{description}</a></li>'.format(**link))
urls.append('</ul>')
if sdgs and links:
return html + ''.join(urls) + '</ul>' + popover
else:
return ''
to_html.process_multiple = True
def to_csv(self, values, **kwargs):
folders = PathFolders.objects.filter(sample_subject_id=values['sample_subject_id'])
links = folders.values('description', 'folder_link')
_str = ''
for link in links:
_str += '{folder_link},'.format(**link)
return _str
to_csv.process_multiple = True
class AliquotAggFormatter(HTMLFormatter):
field = 'aliquots'
def _object_to_string(self, aliquot):
xstr = lambda s: '' if s is None else str(s)
fmt = '%s %s\n' % (
xstr(aliquot.aliquot_name),
xstr(aliquot.secondary_sample_type))
if aliquot.volume_remaining:
fmt += '<br>\tVolume Remaining: %s %s' % (
xstr(aliquot.volume_remaining),
xstr(aliquot.vol_units))
if aliquot.concentration:
fmt += '<br>\tConcentration: %s %s' % (
xstr(aliquot.concentration),
xstr(aliquot.conc_units))
if aliquot.concentration is None and aliquot.volume_remaining is None:
fmt += '<br>\tVolume and Concentration Unknown'
return fmt
def _object_detail(self, aliquot):
fmt = 'Name: %s' % aliquot.aliquot_name
fmt += '<br>Type: %s' % aliquot.tissue_type
fmt += '<br>Received On: %s' % aliquot.received_on
fmt += '<br>Event: %s' % aliquot.collection_event_name
fmt += '<br>Note: <br> %s' % aliquot.draw_note
try:
if aliquot.sample_type == 'Tissue':
if aliquot.diagnosis_id.diagnosis_type:
fmt += '<br>Associated Diagnosis: <br> %s' % aliquot.diagnosis_id.diagnosis_type
except:
pass
if aliquot.volume_remaining is None or aliquot.volume_received is None:
fmt += '<br>Availability: <i>Unknown</i> <br>'
try:
avail = float(aliquot.volume_received) / float(aliquot.volume_remaining) * 100
except:
avail = 0.00
fmt += '<br>Availability: %s <br>' % ('''<div class=\\\"progress progress-striped\\\"><div class=\\\"bar\\\" style=\\\"width: {}%;\\\"></div></div>'''.format(avail))
return fmt
def _build_html(self, pk):
sdgs = NautilusSubject.objects.filter(ehb_id=pk).all()
visit_aliquot_set = {}
for subject in sdgs:
visits = subject.nautilusvisit_set.all()
visit_aliquot_set[subject.sample_subject_id] = {}
for visit in visits:
visit_aliquot_set[subject.sample_subject_id][visit.visit_name] = {}
for sample_type in visit.nautilusaliquot_set.filter(parent_aliquot_id__isnull=True).distinct('sample_type').all():
visit_aliquot_set[subject.sample_subject_id][visit.visit_name][sample_type.sample_type] = []
for aliq in visit.nautilusaliquot_set.filter(sample_type=sample_type.sample_type).filter(parent_aliquot_id__isnull=True).all():
aliquot = {
'aliquot': self._object_to_string(aliq),
'id': aliq.aliquot_id,
'content': self._object_detail(aliq),
'children': []
}
for child in visit.nautilusaliquot_set.filter(parent_aliquot_id=aliq.aliquot_id).all():
aliquot['children'].append({
'id': child.aliquot_id,
'aliquot': self._object_to_string(child),
'content': self._object_detail(child)
})
visit_aliquot_set[subject.sample_subject_id][visit.visit_name][sample_type.sample_type].append(aliquot)
return visit_aliquot_set
def _build_csv(self, pk, **context):
sdgs = NautilusSubject.objects.filter(ehb_id=pk).all()
aliquots = ''
for sdg in sdgs:
visits = sdg.nautilusvisit_set.all()
for visit in visits:
for aliq in visit.nautilusaliquot_set.all():
if aliq.secondary_sample_code:
aliquots += "{0} - {1},".format(aliq.aliquot_name, aliq.secondary_sample_code)
else:
aliquots += "{0},".format(aliq.aliquot_name)
return aliquots.rstrip(',')
def to_csv(self, value, **context):
return self._build_csv(value)
def to_html(self, value, **context):
return '<button class="btn btn-primary aliquot_button" data-toggle="modal" data-target="#aliquotList" data-id="{0}">Aliquots</button>'.format(value)
def __call__(self, values, preferred_formats=None, **context):
# Create a copy of the preferred formats since each set values may
# be processed slightly differently (e.g. mixed data type in column)
# which could cause exceptions that would not be present during
# processing of other values
if preferred_formats is None:
preferred_formats = self.default_formats
preferred_formats = list(preferred_formats) + ['raw']
# Create a OrderedDict of the values relative to the
# concept fields objects the values represent. This
# enables key-based access to the values rather than
# relying on position.
if not isinstance(values, OrderedDict):
# Wrap single values
if not isinstance(values, (list, tuple)):
values = [values]
values = OrderedDict(zip(self.keys, values))
# Iterate over all preferred formats and attempt to process the values.
# For formatter methods that process all values must be tracked and
# attempted only once. They are removed from the list once attempted.
# If no preferred multi-value methods succeed, each value is processed
# independently with the remaining formats
for f in iter(preferred_formats):
method = getattr(self, u'to_{0}'.format(f), None)
# This formatter does not support this format, remove it
# from the available list
if not method:
preferred_formats.pop(0)
continue
# The implicit behavior when handling multiple values is to process
# them independently since, in most cases, they are not dependent
# on one another, but rather should be represented together since
# the data is related. A formatter method can be flagged to process
# all values together by setting the attribute
# `process_multiple=True`. we must # check to if that flag has been
# set and simply pass through the values and context to the method
# as is. if ``process_multiple`` is not set, each value is handled
# independently
if getattr(method, 'process_multiple', False):
try:
output = method(values, fields=self.fields,
concept=self.concept,
process_multiple=True, **context)
if not isinstance(output, dict):
return OrderedDict([(self.concept.name, output)])
return output
# Remove from the preferred formats list since it failed
except Exception:
if self.concept and self.concept not in self._errors:
self._errors[self.concept] = None
log.warning(u'Multi-value formatter error',
exc_info=True)
preferred_formats.pop(0)
# The output is independent of the input. Formatters may output more
# or less values than what was entered.
output = OrderedDict()
# Attempt to process each
for i, (key, value) in enumerate(values.iteritems()):
for f in preferred_formats:
method = getattr(self, u'to_{0}'.format(f))
field = self.fields[key] if self.fields else None
try:
fvalue = method(value, field=field, concept=self.concept,
process_multiple=False, **context)
if isinstance(fvalue, dict):
output.update(fvalue)
else:
output[self.field] = fvalue
break
except Exception:
if field and field not in self._errors:
self._errors[field] = None
log.warning(u'Single-value formatter error',
exc_info=True)
return output
class AggregationFormatter(HTMLFormatter):
'''
Formatter that aggregates 1-N relationships where the base model
is related to a PortalSubject
'''
model = None
order_by = None
field = None
def _aggregate(self):
pass
def _aggregates_to_html(self):
aggregates = self._aggregate()
if aggregates:
return '<ul><li>{0}</li></ul>'.format(
'</li><li>'.join(str(v) for v in aggregates))
else:
return '<em> None Listed </em>'
def _aggregates_to_csv(self):
aggregates = self._aggregate()
if aggregates:
return'{0}'.format(','.join(str(v) for v in aggregates))
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
return self._aggregates_to_csv()
def to_html(self, pk, **context):
self.pk = pk
self.context = context
return self._aggregates_to_html()
def __call__(self, values, preferred_formats=None, **context):
# Create a copy of the preferred formats since each set values may
# be processed slightly differently (e.g. mixed data type in column)
# which could cause exceptions that would not be present during
# processing of other values
if preferred_formats is None:
preferred_formats = self.default_formats
preferred_formats = list(preferred_formats) + ['raw']
# Create a OrderedDict of the values relative to the
# concept fields objects the values represent. This
# enables key-based access to the values rather than
# relying on position.
if not isinstance(values, OrderedDict):
# Wrap single values
if not isinstance(values, (list, tuple)):
values = [values]
values = OrderedDict(zip(self.keys, values))
# Iterate over all preferred formats and attempt to process the values.
# For formatter methods that process all values must be tracked and
# attempted only once. They are removed from the list once attempted.
# If no preferred multi-value methods succeed, each value is processed
# independently with the remaining formats
for f in iter(preferred_formats):
method = getattr(self, u'to_{0}'.format(f), None)
# This formatter does not support this format, remove it
# from the available list
if not method:
preferred_formats.pop(0)
continue
# The implicit behavior when handling multiple values is to process
# them independently since, in most cases, they are not dependent
# on one another, but rather should be represented together since
# the data is related. A formatter method can be flagged to process
# all values together by setting the attribute
# `process_multiple=True`. we must # check to if that flag has been
# set and simply pass through the values and context to the method
# as is. if ``process_multiple`` is not set, each value is handled
# independently
if getattr(method, 'process_multiple', False):
try:
output = method(values, fields=self.fields, concept=self.concept, process_multiple=True, **context)
if not isinstance(output, dict):
return OrderedDict([(self.concept.name, output)])
return output
# Remove from the preferred formats list since it failed
except Exception:
if self.concept and self.concept not in self._errors:
self._errors[self.concept] = None
log.warning(u'Multi-value formatter error', exc_info=True)
preferred_formats.pop(0)
# The output is independent of the input. Formatters may output more
# or less values than what was entered.
output = OrderedDict()
# Attempt to process each
for i, (key, value) in enumerate(values.iteritems()):
for f in preferred_formats:
method = getattr(self, u'to_{0}'.format(f))
field = self.fields[key] if self.fields else None
try:
fvalue = method(value, field=field, concept=self.concept, process_multiple=False, **context)
if isinstance(fvalue, dict):
output.update(fvalue)
else:
# Override the key value so that CSV exports have the correct header name
output[self.field] = fvalue
break
except Exception:
raise
if field and field not in self._errors:
self._errors[field] = None
# log.warning(u'Single-value formatter error', exc_info=True)
return output
# Model Specific Base Aggregators
class SubjectAggregationFormatter(AggregationFormatter):
def _aggregate(self):
if self.distinct:
if self.order_by:
aggregates = self.model.objects.filter(ehb_id=self.pk).order_by(self.order_by).distinct().values_list(self.field, flat=True)
else:
aggregates = self.model.objects.filter(ehb_id=self.pk).distinct().values_list(self.field, flat=True)
else:
if self.order_by:
aggregates = self.model.objects.filter(ehb_id=self.pk).order_by(self.order_by).values_list(self.value, flat=True)
else:
aggregates = self.model.objects.filter(ehb_id=self.pk).values_list(self.value, flat=True)
if None in aggregates:
return None
else:
return aggregates
class AgeAtDiagAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
if diagnosis.age:
html += '<li>{0} Months</li>'.format(diagnosis.age)
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
if diagnosis.age:
_str += '{0}'.format(diagnosis.age)
else:
_str += ','
return _str
class AgeDescAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
if diagnosis.age:
html += '<li>{0}</li>'.format(diagnosis.age_description)
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
if diagnosis.age:
_str += '{0}'.format(diagnosis.age)
else:
_str += ','
return _str
class AgeYmdAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
if diagnosis.age:
html += '<li>{0}</li>'.format(diagnosis.age_ymd)
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
if diagnosis.age:
_str += '{0}'.format(diagnosis.age)
else:
_str += ','
return _str
class DiagnosisAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
model_name = self.model._meta.object_name.lower()
aggregates = getattr(diagnosis, '{0}_set'.format(model_name)).all()
if aggregates:
for each in aggregates:
html += '<li>{0}</li>'.format(getattr(each, self.field))
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
model_name = self.model._meta.object_name.lower()
aggregates = getattr(diagnosis, '{0}_set'.format(model_name)).all()
if aggregates:
for each in aggregates:
_str += '{0}'.format(getattr(each, self.field))
else:
_str += ','
return _str
class DiagnosisTypeAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('age').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
try:
last_dx = diagnosis.monthsbetweendx.months_last_diag
except:
last_dx = None
if last_dx:
label = label + " ({0} months since last Dx)".format(diagnosis.monthsbetweendx.months_last_diag)
html += '<li>{0}</li><ul>'.format(label)
html += '<li>{0}</li>'.format(diagnosis.pathhistology_aggr)
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0};{1},'.format(diagnosis.diagnosis_type, diagnosis.pathhistology_aggr)
else:
_str += 'Diagnosis {0};{1}'.format(diag_count, diagnosis.pathhistology_aggr)
diag_count += 1
return _str
class UpdateAggregationFormatter(AggregationFormatter):
def _aggregate(self):
aggregates = PortalSubject.objects.get(ehb_id=self.pk).diagnosis_set.order_by('date_of_diagnosis').all()
return aggregates
def _aggregates_to_html(self):
html = '<ul>'
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
label = diagnosis.diagnosis_type
else:
label = 'Diagnosis {0}'.format(diag_count)
diag_count += 1
html += '<li>{0}</li><ul>'.format(label)
model_name = self.model._meta.object_name.lower()
aggregates = getattr(diagnosis, '{0}_set'.format(model_name)).all()
if aggregates:
for update in aggregates:
html += '<li>{0}</li>'.format(update.update_type)
field = getattr(update, self.field)
if field:
html += '<ul><li>{0}</li></ul>'.format(field)
else:
html += '<ul><li>{0}</li></ul>'.format('<em>Unknown</em>')
else:
html += '<li><em>None Listed</em></li>'
html += '</ul>'
html += '</ul>'
return html
def to_csv(self, pk, **context):
self.pk = pk
self.context = context
_str = ''
for diagnosis in self._aggregate():
diag_count = 1
if diagnosis.diagnosis_type:
_str += '{0},'.format(diagnosis.diagnosis_type)
else:
_str += 'Diagnosis {0};'.format(diag_count)
diag_count += 1
model_name = self.model._meta.object_name.lower()
aggregates = getattr(diagnosis, '{0}_set'.format(model_name)).all()
if aggregates:
for update in aggregates:
_str += '{0} Month Update,'.format(update.update_month)
field = getattr(update, self.field)
if field:
_str += '{0},'.format(field)
else:
_str += ','
else:
_str += ','
return _str
# Diagnosis Based Aggregations
class PathDiagFormatter(DiagnosisAggregationFormatter):
model = PathHistology
field = 'path_histology'
distinct = True
class MolecularTestsDoneFormatter(DiagnosisAggregationFormatter):
model = TumorOrMolecularTestsD
field = 'tumor_or_molecular_tests_d'
distinct = True
class MetasAtSubmitSiteFormatter(DiagnosisAggregationFormatter):
model = MetasAtSubmitSite
field = 'metas_at_submit_site'
distinct = True
class SubjectClinStatusFormatter(UpdateAggregationFormatter):
model = Update
field = 'clin_status'
distinct = True
# Portal Subject Based Aggregations
class FamilyHistoryFormatter(SubjectAggregationFormatter):
model = MedicalHistoryMain
field = 'family_history'
distinct = True
class TumorLocFormatter(SubjectAggregationFormatter):
model = TumorLocationIn
field = 'tumor_location_in'
distinct = True
class RaceFormatter(SubjectAggregationFormatter):
model = Race
field = 'race'
distinct = True
class RelapseNumberFormatter(SubjectAggregationFormatter):
model = Diagnosis
field = 'relapse_number2_7d6'
distinct = True
order_by = 'date_of_diagnosis'
class SiteOfProgressionFormatter(SubjectAggregationFormatter):
model = Diagnosis
field = 'site_prog'
distinct = True
order_by = 'date_of_diagnosis'
class DiagnosisTypeListFormatter(SubjectAggregationFormatter):
model = Diagnosis
field = 'diagnosis_type'
distinct = True
order_by = 'date_of_diagnosis'
class CancerPredispositionFormatter(SubjectAggregationFormatter):
model = CancPredispCondition
field = 'canc_predisp_condition'
distinct = True
class OtherMedConditionFormatter(SubjectAggregationFormatter):
model = OtherMedCondition
field = 'other_med_condition'
distinct = True
class LimsIDFormatter(SubjectAggregationFormatter):
model = NautilusSubject
field = 'sample_subject_id'
distinct = True
registry.register(PathologyReportFormatter, 'PathologyReportFormatter')
registry.register(OperativeReportFormatter, 'OperativeReportFormatter')
registry.register(AgeDescAggregationFormatter, 'AgeDescAggregationFormatter')
registry.register(AgeAtDiagAggregationFormatter, 'AgeAtDiagAggregationFormatter')
registry.register(AgeYmdAggregationFormatter, 'AgeYmdAggregationFormatter')
registry.register(PatientSummaryFormatter, 'PatientSummaryFormatter')
registry.register(LinkAggFormatter, 'LinkAggFormatter')
registry.register(AliquotAggFormatter, 'AliqAggFormatter')
registry.register(TumorLocFormatter, 'TumorLocFormatter')
registry.register(OtherMedConditionFormatter, 'OtherMedConditionFormatter')
registry.register(PathDiagFormatter, 'PathDiagFormatter')
registry.register(RaceFormatter, 'RaceFormatter')
registry.register(MolecularTestsDoneFormatter, 'MolecularTestsDoneFormatter')
registry.register(DiagnosisTypeListFormatter, 'DiagnosisTypeListFormatter')
registry.register(CancerPredispositionFormatter, 'CancerPredispositionFormatter')
registry.register(RelapseNumberFormatter, 'RelapseNumberFormatter')
registry.register(SiteOfProgressionFormatter, 'SiteOfProgressionFormatter')
registry.register(MetasAtSubmitSiteFormatter, 'MetasAtSubmitSiteFormatter')
registry.register(FamilyHistoryFormatter, 'FamilyHistoryFormatter')
registry.register(SubjectClinStatusFormatter, 'SubjectClinStatusFormatter')
registry.register(LimsIDFormatter, 'LimsIDFormatter')
registry.register(EnrollmentTypeFormatter, 'EnrollmentTypeFormatter')
registry.register(AltEnrollmentTypeFormatter, 'AltEnrollmentTypeFormatter')
registry.register(DiagnosisTypeAggregationFormatter, 'DiagnosisTypeAggregationFormatter')
registry.register(cBioLinkFormatter, 'cBioLinkFormatter')
| bsd-2-clause | 5,607,928,141,524,938,000 | 36.690947 | 256 | 0.574032 | false |
budurli/python-paytrail | paytrail/base.py | 1 | 2679 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
import hmac
import base64
import hashlib
from requests import Request
from paytrail.settings import BASE_API_URL, PAYTRAIL_AUTH_KEY, PAYTRAIL_ID, PAYTRAIL_SECRET
class PaytrailConnectAPIRequest(Request):
def __init__(self, **kwargs):
self.merchant_id = kwargs.pop('merchant_id')
self.merchant_secret = kwargs.pop('merchant_secret')
super(PaytrailConnectAPIRequest, self).__init__(**kwargs)
self.headers['Timestamp'] = self.get_timestamp()
self.headers['Content-MD5'] = self.get_content_md5()
self.headers['Authorization'] = self.get_authorization_signature()
def get_content_md5(self):
return hashlib.md5(self.prepare().body).digest().encode('base64').strip()
@staticmethod
def get_timestamp():
return str(datetime.now().isoformat())
def get_authorization_signature(self):
base_signature = '\n'.join([
self.method,
self.url,
'PaytrailConnectAPI {merchant_id}'.format(self.merchant_id),
self.headers['Timestamp'],
self.headers['Authorization']
])
digest = hmac.new(
key=self.merchant_secret,
msg=base_signature,
digestmod=hashlib.sha256
).digest()
signature = base64.b64encode(digest).decode()
return 'PaytrailConnectAPI {merchant_id}:{signature}'.format(self.merchant_id, signature)
class BasePaytrailClient(object):
URL_MAP = {
'authorization':
{
'url': '/connectapi/authorizations',
'method': 'POST'
},
'confirming_authorization':
{
'url': '/connectapi/authorizations/{id}/confirmation',
'method': 'POST'
},
'invalidatin_authorization':
{
'url': '/authorizations/{id}',
'method': 'POST'
}
,
'charging': '/connectapi/authorizations/{id}/charges',
'fetching_payment_status': '/connectapi/authorizations/{id}/charges/{id}',
'fetching_delivery_address': ' /connectapi/authorizations/{id}/deliveryAddresses',
}
def __init__(self, base_url=BASE_API_URL, merchant_id=PAYTRAIL_ID, merchant_secret=PAYTRAIL_SECRET):
self.base_url = base_url
self.merchant_id = merchant_id
self.merchant_secret = merchant_secret
def authorize(self, auth_key=PAYTRAIL_AUTH_KEY):
pass
def confirm_authorization(self):
pass
test_client = BasePaytrailClient()
test_client.authorize() | mit | -6,063,210,949,374,005,000 | 29.804598 | 104 | 0.605077 | false |
jazzband/site | jazzband/projects/models.py | 1 | 5929 | import os
from datetime import datetime
from uuid import uuid4
from flask import current_app, safe_join
from flask_login import current_user
from sqlalchemy import func, orm
from sqlalchemy.dialects.postgresql import UUID, JSONB
from sqlalchemy_utils import aggregated, generic_repr
from ..auth import current_user_is_roadie
from ..db import postgres as db
from ..members.models import User
from ..mixins import Syncable
@generic_repr("id", "name")
class Project(db.Model, Syncable):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False, index=True)
normalized_name = orm.column_property(func.normalize_pep426_name(name))
description = db.Column(db.Text)
html_url = db.Column(db.String(255))
subscribers_count = db.Column(db.SmallInteger, default=0, nullable=False)
stargazers_count = db.Column(db.SmallInteger, default=0, nullable=False)
forks_count = db.Column(db.SmallInteger, default=0, nullable=False)
open_issues_count = db.Column(db.SmallInteger, default=0, nullable=False)
is_active = db.Column(db.Boolean, default=True, nullable=False, index=True)
transfer_issue_url = db.Column(db.String(255))
membership = db.relationship("ProjectMembership", backref="project", lazy="dynamic")
credentials = db.relationship(
"ProjectCredential", backref="project", lazy="dynamic"
)
uploads = db.relationship(
"ProjectUpload",
backref="project",
lazy="dynamic",
order_by=lambda: ProjectUpload.ordering.desc().nullslast(),
)
created_at = db.Column(db.DateTime, nullable=True)
updated_at = db.Column(db.DateTime, nullable=True)
pushed_at = db.Column(db.DateTime, nullable=True)
__tablename__ = "projects"
__table_args__ = (
db.Index("release_name_idx", "name"),
db.Index("release_name_is_active_idx", "name", "is_active"),
)
def __str__(self):
return self.name
@aggregated("uploads", db.Column(db.SmallInteger))
def uploads_count(self):
return db.func.count("1")
@property
def current_user_is_member(self):
if not current_user:
return False
elif not current_user.is_authenticated:
return False
elif current_user_is_roadie():
return True
else:
return current_user.id in self.member_ids
@property
def member_ids(self):
return [member.user.id for member in self.membership.all()]
@property
def leads(self):
leads = self.membership.filter(
ProjectMembership.is_lead.is_(True),
ProjectMembership.user_id.in_(
User.active_members().options(orm.load_only("id"))
),
)
return [member.user for member in leads]
@property
def pypi_json_url(self):
return f"https://pypi.org/pypi/{self.normalized_name}/json" # noqa
@generic_repr("id", "project_id", "is_active", "key")
class ProjectCredential(db.Model):
id = db.Column(db.Integer, primary_key=True)
project_id = db.Column(db.Integer, db.ForeignKey("projects.id"))
is_active = db.Column(db.Boolean, default=True, nullable=False, index=True)
key = db.Column(UUID(as_uuid=True), default=uuid4)
__tablename__ = "project_credentials"
__table_args__ = (db.Index("release_key_is_active_idx", "key", "is_active"),)
def __str__(self):
return self.key.hex
@generic_repr("id", "user_id", "project_id", "is_lead")
class ProjectMembership(db.Model):
id = db.Column("id", db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
project_id = db.Column(db.Integer, db.ForeignKey("projects.id"))
joined_at = db.Column(db.DateTime, default=datetime.utcnow)
is_lead = db.Column(db.Boolean, default=False, nullable=False, index=True)
__tablename__ = "project_memberships"
def __str__(self):
return f"User: {self.user}, Project: {self.project}"
@generic_repr("id", "project_id", "filename")
class ProjectUpload(db.Model):
id = db.Column(db.Integer, primary_key=True)
project_id = db.Column(db.Integer, db.ForeignKey("projects.id"))
version = db.Column(db.Text, index=True)
path = db.Column(db.Text, unique=True, index=True)
filename = db.Column(db.Text, unique=True, index=True)
signaturename = orm.column_property(filename + ".asc")
size = db.Column(db.Integer)
md5_digest = db.Column(db.Text, unique=True, nullable=False)
sha256_digest = db.Column(db.Text, unique=True, nullable=False)
blake2_256_digest = db.Column(db.Text, unique=True, nullable=False)
uploaded_at = db.Column(db.DateTime, default=datetime.utcnow)
released_at = db.Column(db.DateTime, nullable=True)
notified_at = db.Column(db.DateTime, nullable=True, index=True)
form_data = db.Column(JSONB)
user_agent = db.Column(db.Text)
remote_addr = db.Column(db.Text)
ordering = db.Column(db.Integer, default=0)
__tablename__ = "project_uploads"
__table_args__ = (
db.CheckConstraint("sha256_digest ~* '^[A-F0-9]{64}$'"),
db.CheckConstraint("blake2_256_digest ~* '^[A-F0-9]{64}$'"),
db.Index("project_uploads_project_version", "project_id", "version"),
)
@property
def full_path(self):
# build storage path, e.g.
# /app/uploads/acme/2coffee12345678123123123123123123
return safe_join(current_app.config["UPLOAD_ROOT"], self.path)
@property
def signature_path(self):
return self.full_path + ".asc"
def __str__(self):
return self.filename
@db.event.listens_for(ProjectUpload, "after_delete")
def delete_upload_file(mapper, connection, target):
# When a model with a timestamp is updated; force update the updated
# timestamp.
os.remove(target.full_path)
if os.path.exists(target.signature_path):
os.remove(target.signature_path)
| mit | 7,964,328,019,318,782,000 | 34.933333 | 88 | 0.661326 | false |
olof/svtplay-dl | lib/svtplay_dl/service/raw.py | 1 | 1082 | from __future__ import absolute_import
import os
import re
from svtplay_dl.fetcher.dash import dashparse
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.service import Service
class Raw(Service):
def get(self):
filename = os.path.basename(self.url[: self.url.rfind("/")])
self.output["title"] = filename
streams = []
if re.search(".f4m", self.url):
self.output["ext"] = "flv"
streams.append(hdsparse(self.config, self.http.request("get", self.url, params={"hdcore": "3.7.0"}), self.url, output=self.output))
if re.search(".m3u8", self.url):
streams.append(hlsparse(self.config, self.http.request("get", self.url), self.url, output=self.output))
if re.search(".mpd", self.url):
streams.append(dashparse(self.config, self.http.request("get", self.url), self.url, output=self.output))
for stream in streams:
if stream:
for n in list(stream.keys()):
yield stream[n]
| mit | -1,032,772,615,640,684,500 | 33.903226 | 143 | 0.624769 | false |
redhat-cip/dci-ansible | callback/dci.py | 1 | 9044 | # Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.plugins.callback.default import CallbackModule as CM_default
from ansible.release import __version__ as ansible_version
from dciauth.version import __version__ as dciauth_version
from dciclient.v1.api import context as dci_context
from dciclient.v1.api import file as dci_file
from dciclient.v1.api import jobstate as dci_jobstate
from dciclient.version import __version__ as dciclient_version
COMPAT_OPTIONS = (('display_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS),
('display_ok_hosts', True),
('show_custom_stats', C.SHOW_CUSTOM_STATS),
('display_failed_stderr', False),
('check_mode_markers', False),
('show_per_host_start', False))
class CallbackModule(CM_default):
"""This callback module uploads the Ansible output to a DCI control
server."""
CALLBACK_VERSION = '2.0'
CALLBACK_TYPE = 'dci'
CALLBACK_NAME = 'dci'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self._real_display = self._display
self.verbosity = self._display.verbosity
self._display = self
self._jobstate_id = None
self._job_id = None
self._current_status = None
self._dci_context = self._build_dci_context()
self._explicit = False
self._backlog = []
self._name = None
self._content = ''
self._color = None
def get_option(self, name):
for key, val in COMPAT_OPTIONS:
if key == name:
return val
return False
@staticmethod
def _get_details():
"""Method that retrieves the appropriate credentials. """
login = os.getenv('DCI_LOGIN')
password = os.getenv('DCI_PASSWORD')
client_id = os.getenv('DCI_CLIENT_ID')
api_secret = os.getenv('DCI_API_SECRET')
url = os.getenv('DCI_CS_URL', 'https://api.distributed-ci.io')
return login, password, url, client_id, api_secret
def _build_dci_context(self):
login, password, url, client_id, api_secret = self._get_details()
user_agent = ('Ansible/%s (python-dciclient/%s, python-dciauth/%s)'
) % (ansible_version, dciclient_version, dciauth_version)
if login is not None and password is not None:
return dci_context.build_dci_context(url, login, password,
user_agent)
elif client_id is not None and api_secret is not None:
return dci_context.build_signature_context(url, client_id,
api_secret, user_agent)
def display(self, msg, color=None, screen_only=False, *args, **kwargs):
if screen_only:
return
if color is not None:
self._color = color
self._content += msg + '\n'
def banner(self, msg):
# upload the previous content when we have a new banner (start
# of task/play/playbook...)
if self._name:
if self._color == C.COLOR_SKIP:
prefix = 'skipped/'
elif self._color == C.COLOR_UNREACHABLE:
prefix = "unreachable/"
elif self._color == C.COLOR_ERROR:
prefix = 'failed/'
else:
prefix = ''
self.create_file(prefix + self._name,
self._content if self._content != '' else ' ')
self._content = ''
self._name = msg
def deprecated(self, *args, **kwargs):
pass
def create_file(self, name, content):
if self._job_id is None:
self._backlog.append({'name': name, 'content': content})
else:
kwargs = {
'name': name,
'content': content and content.encode('UTF-8'),
'mime': 'application/x-ansible-output',
'job_id': self._job_id,
'jobstate_id': self._jobstate_id
}
dci_file.create(self._dci_context, **kwargs)
def create_jobstate(self, comment, status):
if self._explicit:
return
if not status or self._current_status == status:
return
self._current_status = status
r = dci_jobstate.create(
self._dci_context,
status=self._current_status,
comment=comment,
job_id=self._job_id
)
ns = r.json()
if 'jobstate' in ns and 'id' in ns['jobstate']:
self._jobstate_id = ns['jobstate']['id']
def v2_playbook_on_stats(self, stats):
super(CallbackModule, self).v2_playbook_on_stats(stats)
# do a fake call to banner to output the last content
self.banner('')
def v2_runner_on_ok(self, result, **kwargs):
"""Event executed after each command when it succeed. Get the output
of the command and create a file associated to the current
jobstate.
"""
# Store the jobstate id when the there is an explicit call to
# set it. Example in a playbook:
#
# dci_job:
# id: "{{ job_id }}"
# status: running
#
# switch to explicit mode (not reacting to the dci_status
# variable anymore).
if ("jobstate" in result._result and
"id" in result._result["jobstate"]):
self._jobstate_id = result._result["jobstate"]["id"]
self._explicit = True
# Check if the task that just run was the schedule of an upgrade
# job. If so, set self._job_id to the new job ID
if (result._task.action == 'dci_job' and (
result._result['invocation']['module_args']['upgrade'] or
result._result['invocation']['module_args']['update'])):
self._job_id = result._result['job']['id']
self.create_jobstate(
comment='starting the update/upgrade',
status='pre-run'
)
elif (result._task.action == 'set_fact' and
'ansible_facts' in result._result and
'job_id' in result._result['ansible_facts'] and
result._result['ansible_facts']['job_id'] is not None):
self._job_id = result._result['ansible_facts']['job_id']
self.create_jobstate(comment='start up', status='new')
for rec in self._backlog:
self.create_file(rec['name'],
rec['content'])
self._backlog = []
super(CallbackModule, self).v2_runner_on_ok(result, **kwargs)
def v2_playbook_on_play_start(self, play):
"""Event executed before each play. Create a new jobstate and save
the current jobstate id.
"""
def _get_comment(play):
""" Return the comment for the new jobstate
The order of priority is as follow:
* play/vars/dci_comment
* play/name
* '' (Empty String)
"""
if play.get_vars() and 'dci_comment' in play.get_vars():
comment = play.get_vars()['dci_comment']
# If no name has been specified to the play, play.name is equal
# to the hosts value
elif play.name and play.name not in play.hosts:
comment = play.name
else:
comment = ''
return comment
super(CallbackModule, self).v2_playbook_on_play_start(play)
if not self._job_id:
return
comment = _get_comment(play)
self.create_jobstate(
comment=comment,
status=play.get_vars().get('dci_status')
)
def task_name(self, result):
"""Ensure we alway return a string"""
name = result._task.get_name()
# add the included file name in the task's name
if name == 'include_tasks':
if hasattr(result._task, 'get_ds'):
if 'include_tasks' in result._task.get_ds():
name = '%s: %s' % (name, result._task.get_ds()['include_tasks']) # noqa
return name
def v2_runner_on_unreachable(self, result):
self.create_jobstate(comment=self.task_name(result), status='failure')
super(CallbackModule, self).v2_runner_on_unreachable(result)
def v2_runner_on_failed(self, result, ignore_errors=False):
"""Event executed after each command when it fails. Get the output
of the command and create a failure jobstate and a file associated.
"""
if not ignore_errors:
self.create_jobstate(comment=self.task_name(result),
status='failure')
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
| apache-2.0 | 2,457,845,636,947,778,000 | 34.466667 | 92 | 0.555617 | false |
kubeflow/pipelines | test/sample-test/run_sample_test.py | 1 | 8450 | # Copyright 2019-2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
import os
import tarfile
import time
import utils
import yamale
import yaml
from datetime import datetime
from kfp import Client
from constants import CONFIG_DIR, DEFAULT_CONFIG, SCHEMA_CONFIG
class PySampleChecker(object):
def __init__(
self,
testname,
input,
output,
result,
experiment_name,
host,
namespace='kubeflow'
):
"""Util class for checking python sample test running results.
:param testname: test name.
:param input: The path of a pipeline file that will be submitted.
:param output: The path of the test output.
:param result: The path of the test result that will be exported.
:param host: The hostname of KFP API endpoint.
:param namespace: namespace of the deployed pipeline system. Default: kubeflow
:param experiment_name: Name of the experiment to monitor
"""
self._testname = testname
self._experiment_name = experiment_name
self._input = input
self._output = output
self._result = result
self._host = host
self._namespace = namespace
self._run_pipeline = None
self._test_timeout = None
self._test_cases = []
self._test_name = self._testname + ' Sample Test'
self._client = None
self._experiment_id = None
self._job_name = None
self._test_args = None
self._run_id = None
def run(self):
"""Run compiled KFP pipeline."""
###### Initialization ######
self._client = Client(host=self._host)
###### Check Input File ######
utils.add_junit_test(
self._test_cases, 'input generated yaml file',
os.path.exists(self._input), 'yaml file is not generated'
)
if not os.path.exists(self._input):
utils.write_junit_xml(
self._test_name, self._result, self._test_cases
)
print('Error: job not found.')
exit(1)
###### Create Experiment ######
response = self._client.create_experiment(self._experiment_name)
self._experiment_id = response.id
utils.add_junit_test(self._test_cases, 'create experiment', True)
###### Create Job ######
self._job_name = self._testname + '_sample'
###### Figure out arguments from associated config files. #######
self._test_args = {}
config_schema = yamale.make_schema(SCHEMA_CONFIG)
try:
with open(DEFAULT_CONFIG, 'r') as f:
raw_args = yaml.safe_load(f)
default_config = yamale.make_data(DEFAULT_CONFIG)
yamale.validate(
config_schema, default_config
) # If fails, a ValueError will be raised.
except yaml.YAMLError as yamlerr:
raise RuntimeError('Illegal default config:{}'.format(yamlerr))
except OSError as ose:
raise FileExistsError('Default config not found:{}'.format(ose))
else:
self._test_timeout = raw_args['test_timeout']
self._run_pipeline = raw_args['run_pipeline']
try:
config_file = os.path.join(
CONFIG_DIR, '%s.config.yaml' % self._testname
)
with open(config_file, 'r') as f:
raw_args = yaml.safe_load(f)
test_config = yamale.make_data(config_file)
yamale.validate(
config_schema, test_config
) # If fails, a ValueError will be raised.
except yaml.YAMLError as yamlerr:
print(
'No legit yaml config file found, use default args:{}'.
format(yamlerr)
)
except OSError as ose:
print(
'Config file with the same name not found, use default args:{}'.
format(ose)
)
else:
if 'arguments' in raw_args.keys() and raw_args['arguments']:
self._test_args.update(raw_args['arguments'])
if 'output' in self._test_args.keys(
): # output is a special param that has to be specified dynamically.
self._test_args['output'] = self._output
if 'test_timeout' in raw_args.keys():
self._test_timeout = raw_args['test_timeout']
if 'run_pipeline' in raw_args.keys():
self._run_pipeline = raw_args['run_pipeline']
# TODO(numerology): Special treatment for TFX::OSS sample
if self._testname == 'parameterized_tfx_oss':
self._test_args['pipeline-root'] = os.path.join(
self._test_args['output'],
'tfx_taxi_simple_' + kfp.dsl.RUN_ID_PLACEHOLDER
)
del self._test_args['output']
# Submit for pipeline running.
if self._run_pipeline:
response = self._client.run_pipeline(
self._experiment_id, self._job_name, self._input,
self._test_args
)
self._run_id = response.id
utils.add_junit_test(self._test_cases, 'create pipeline run', True)
def check(self):
"""Check pipeline run results."""
if self._run_pipeline:
###### Monitor Job ######
try:
start_time = datetime.now()
response = self._client.wait_for_run_completion(
self._run_id, self._test_timeout
)
succ = (response.run.status.lower() == 'succeeded')
end_time = datetime.now()
elapsed_time = (end_time - start_time).seconds
utils.add_junit_test(
self._test_cases, 'job completion', succ,
'waiting for job completion failure', elapsed_time
)
finally:
###### Output Argo Log for Debugging ######
workflow_json = self._client._get_workflow_json(self._run_id)
workflow_id = workflow_json['metadata']['name']
print("Argo Workflow Name: ", workflow_id)
argo_log, _ = utils.run_bash_command(
'argo logs {} -n {}'.format(
workflow_id, self._namespace
)
)
print('=========Argo Workflow Log=========')
print(argo_log)
if not succ:
utils.write_junit_xml(
self._test_name, self._result, self._test_cases
)
exit(1)
###### Validate the results for specific test cases ######
if self._testname == 'xgboost_training_cm':
# For xgboost sample, check its confusion matrix.
cm_tar_path = './confusion_matrix.tar.gz'
utils.get_artifact_in_minio(
workflow_json, 'confusion-matrix', cm_tar_path,
'mlpipeline-ui-metadata'
)
with tarfile.open(cm_tar_path) as tar_handle:
file_handles = tar_handle.getmembers()
assert len(file_handles) == 1
with tar_handle.extractfile(file_handles[0]) as f:
cm_data = f.read()
utils.add_junit_test(
self._test_cases, 'confusion matrix format',
(len(cm_data) > 0),
'the confusion matrix file is empty'
)
###### Delete Job ######
#TODO: add deletion when the backend API offers the interface.
###### Write out the test result in junit xml ######
utils.write_junit_xml(self._test_name, self._result, self._test_cases)
| apache-2.0 | 896,296,228,944,902,400 | 37.761468 | 82 | 0.539645 | false |
BetaNYC/tree-one-one | app.py | 1 | 1276 | from flask import Flask
from flask import jsonify, render_template
#from flask_cors import CORS
import math
import pandas as pd
import os
import datetime
import json
app = Flask(__name__)
@app.route("/")
def default():
return render_template('index.html')
@app.route('/test')
@app.route('/test/<metric>')
def test(metric=None):
global sample
for s in sample:
s['color'] = s[metric+'_col']
return jsonify({'polygons':sample})
def popup_text(s):
return """percent alive: %s<br>
average size: %s<br>
number of species: %s<br>"""%(s['aliveness'],s['average_size'],s['diversity'])
port = os.getenv('VCAP_APP_PORT', '5000')
if __name__ == "__main__":
# run the app
print 'loading the data...'
sample = json.load(open('data/square.json', 'r'))
for s in sample:
del s['']
try:
s['bounds'] = json.loads(s['bounds'])
s['size_col'] = s['dbh_col']
s['diversity_col'] = s['species_col']
s['size'] = s['average_size']
s['popup_text'] = popup_text(s)
except KeyError as e:
#print e, '||', s
continue
print '...done.'
app.run(debug = True)
#app.run(host='0.0.0.0', port=int(port))
| gpl-3.0 | 3,023,585,623,126,684,000 | 26.148936 | 92 | 0.552508 | false |
deepakgupta1313/models | video_prediction/prediction_train.py | 1 | 8670 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for training the prediction model."""
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from prediction_input import build_tfrecord_input
from prediction_model import construct_model
# How often to record tensorboard summaries.
SUMMARY_INTERVAL = 40
# How often to run a batch through the validation model.
VAL_INTERVAL = 200
# How often to save a model checkpoint
SAVE_INTERVAL = 2000
# tf record data location:
DATA_DIR = 'push/push_train'
# local output directory
OUT_DIR = '/tmp/data'
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', DATA_DIR, 'directory containing data.')
flags.DEFINE_string('output_dir', OUT_DIR, 'directory for model checkpoints.')
flags.DEFINE_string('event_log_dir', OUT_DIR, 'directory for writing summary.')
flags.DEFINE_integer('num_iterations', 100000, 'number of training iterations.')
flags.DEFINE_string('pretrained_model', '',
'filepath of a pretrained model to initialize from.')
flags.DEFINE_integer('sequence_length', 10,
'sequence length, including context frames.')
flags.DEFINE_integer('context_frames', 2, '# of frames before predictions.')
flags.DEFINE_integer('use_state', 1,
'Whether or not to give the state+action to the model')
flags.DEFINE_string('model', 'CDNA',
'model architecture to use - CDNA, DNA, or STP')
flags.DEFINE_integer('num_masks', 10,
'number of masks, usually 1 for DNA, 10 for CDNA, STN.')
flags.DEFINE_float('schedsamp_k', 900.0,
'The k hyperparameter for scheduled sampling,'
'-1 for no scheduled sampling.')
flags.DEFINE_float('train_val_split', 0.95,
'The percentage of files to use for the training set,'
' vs. the validation set.')
flags.DEFINE_integer('batch_size', 32, 'batch size for training')
flags.DEFINE_float('learning_rate', 0.001,
'the base learning rate of the generator')
## Helper functions
def peak_signal_to_noise_ratio(true, pred):
"""Image quality metric based on maximal signal power vs. power of the noise.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
peak signal to noise ratio (PSNR)
"""
return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0)
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
return tf.reduce_sum(tf.square(true - pred)) / tf.to_float(tf.size(pred))
class Model(object):
def __init__(self,
images=None,
actions=None,
states=None,
sequence_length=None,
reuse_scope=None):
if sequence_length is None:
sequence_length = FLAGS.sequence_length
self.prefix = prefix = tf.placeholder(tf.string, [])
self.iter_num = tf.placeholder(tf.float32, [])
summaries = []
# Split into timesteps.
actions = tf.split(1, actions.get_shape()[1], actions)
actions = [tf.squeeze(act) for act in actions]
states = tf.split(1, states.get_shape()[1], states)
states = [tf.squeeze(st) for st in states]
images = tf.split(1, images.get_shape()[1], images)
images = [tf.squeeze(img) for img in images]
if reuse_scope is None:
gen_images, gen_states = construct_model(
images,
actions,
states,
iter_num=self.iter_num,
k=FLAGS.schedsamp_k,
use_state=FLAGS.use_state,
num_masks=FLAGS.num_masks,
cdna=FLAGS.model == 'CDNA',
dna=FLAGS.model == 'DNA',
stp=FLAGS.model == 'STP',
context_frames=FLAGS.context_frames)
else: # If it's a validation or test model.
with tf.variable_scope(reuse_scope, reuse=True):
gen_images, gen_states = construct_model(
images,
actions,
states,
iter_num=self.iter_num,
k=FLAGS.schedsamp_k,
use_state=FLAGS.use_state,
num_masks=FLAGS.num_masks,
cdna=FLAGS.model == 'CDNA',
dna=FLAGS.model == 'DNA',
stp=FLAGS.model == 'STP',
context_frames=FLAGS.context_frames)
# L2 loss, PSNR for eval.
loss, psnr_all = 0.0, 0.0
for i, x, gx in zip(
range(len(gen_images)), images[FLAGS.context_frames:],
gen_images[FLAGS.context_frames - 1:]):
recon_cost = mean_squared_error(x, gx)
psnr_i = peak_signal_to_noise_ratio(x, gx)
psnr_all += psnr_i
summaries.append(
tf.scalar_summary(prefix + '_recon_cost' + str(i), recon_cost))
summaries.append(tf.scalar_summary(prefix + '_psnr' + str(i), psnr_i))
loss += recon_cost
for i, state, gen_state in zip(
range(len(gen_states)), states[FLAGS.context_frames:],
gen_states[FLAGS.context_frames - 1:]):
state_cost = mean_squared_error(state, gen_state) * 1e-4
summaries.append(
tf.scalar_summary(prefix + '_state_cost' + str(i), state_cost))
loss += state_cost
summaries.append(tf.scalar_summary(prefix + '_psnr_all', psnr_all))
self.psnr_all = psnr_all
self.loss = loss = loss / np.float32(len(images) - FLAGS.context_frames)
summaries.append(tf.scalar_summary(prefix + '_loss', loss))
self.lr = tf.placeholder_with_default(FLAGS.learning_rate, ())
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
self.summ_op = tf.merge_summary(summaries)
def main(unused_argv):
print 'Constructing models and inputs.'
with tf.variable_scope('model', reuse=None) as training_scope:
images, actions, states = build_tfrecord_input(training=True)
model = Model(images, actions, states, FLAGS.sequence_length)
with tf.variable_scope('val_model', reuse=None):
val_images, val_actions, val_states = build_tfrecord_input(training=False)
val_model = Model(val_images, val_actions, val_states,
FLAGS.sequence_length, training_scope)
print 'Constructing saver.'
# Make saver.
saver = tf.train.Saver(
tf.get_collection(tf.GraphKeys.VARIABLES), max_to_keep=0)
# Make training session.
sess = tf.InteractiveSession()
summary_writer = tf.train.SummaryWriter(
FLAGS.event_log_dir, graph=sess.graph, flush_secs=10)
if FLAGS.pretrained_model:
saver.restore(sess, FLAGS.pretrained_model)
tf.train.start_queue_runners(sess)
sess.run(tf.initialize_all_variables())
tf.logging.info('iteration number, cost')
# Run training.
for itr in range(FLAGS.num_iterations):
# Generate new batch of data.
feed_dict = {model.prefix: 'train',
model.iter_num: np.float32(itr),
model.lr: FLAGS.learning_rate}
cost, _, summary_str = sess.run([model.loss, model.train_op, model.summ_op],
feed_dict)
# Print info: iteration #, cost.
tf.logging.info(str(itr) + ' ' + str(cost))
if (itr) % VAL_INTERVAL == 2:
# Run through validation set.
feed_dict = {val_model.lr: 0.0,
val_model.prefix: 'val',
val_model.iter_num: np.float32(itr)}
_, val_summary_str = sess.run([val_model.train_op, val_model.summ_op],
feed_dict)
summary_writer.add_summary(val_summary_str, itr)
if (itr) % SAVE_INTERVAL == 2:
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model' + str(itr))
if (itr) % SUMMARY_INTERVAL:
summary_writer.add_summary(summary_str, itr)
tf.logging.info('Saving model.')
saver.save(sess, FLAGS.output_dir + '/model')
tf.logging.info('Training complete')
tf.logging.flush()
if __name__ == '__main__':
app.run()
| apache-2.0 | 3,539,439,255,335,017,500 | 33.819277 | 80 | 0.633218 | false |
yephper/django | tests/m2m_regress/tests.py | 1 | 4828 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from django.utils import six
from .models import (
Entry, Line, Post, RegressionModelSplit, SelfRefer, SelfReferChild,
SelfReferChildSibling, Tag, TagCollection, Worksheet,
)
class M2MRegressionTests(TestCase):
def test_multiple_m2m(self):
# Multiple m2m references to model must be distinguished when
# accessing the relations through an instance attribute.
s1 = SelfRefer.objects.create(name='s1')
s2 = SelfRefer.objects.create(name='s2')
s3 = SelfRefer.objects.create(name='s3')
s1.references.add(s2)
s1.related.add(s3)
e1 = Entry.objects.create(name='e1')
t1 = Tag.objects.create(name='t1')
t2 = Tag.objects.create(name='t2')
e1.topics.add(t1)
e1.related.add(t2)
self.assertQuerysetEqual(s1.references.all(), ["<SelfRefer: s2>"])
self.assertQuerysetEqual(s1.related.all(), ["<SelfRefer: s3>"])
self.assertQuerysetEqual(e1.topics.all(), ["<Tag: t1>"])
self.assertQuerysetEqual(e1.related.all(), ["<Tag: t2>"])
def test_internal_related_name_not_in_error_msg(self):
# The secret internal related names for self-referential many-to-many
# fields shouldn't appear in the list when an error is made.
six.assertRaisesRegex(
self, FieldError,
"Choices are: id, name, references, related, selfreferchild, selfreferchildsibling$",
lambda: SelfRefer.objects.filter(porcupine='fred')
)
def test_m2m_inheritance_symmetry(self):
# Test to ensure that the relationship between two inherited models
# with a self-referential m2m field maintains symmetry
sr_child = SelfReferChild(name="Hanna")
sr_child.save()
sr_sibling = SelfReferChildSibling(name="Beth")
sr_sibling.save()
sr_child.related.add(sr_sibling)
self.assertQuerysetEqual(sr_child.related.all(), ["<SelfRefer: Beth>"])
self.assertQuerysetEqual(sr_sibling.related.all(), ["<SelfRefer: Hanna>"])
def test_m2m_pk_field_type(self):
# Regression for #11311 - The primary key for models in a m2m relation
# doesn't have to be an AutoField
w = Worksheet(id='abc')
w.save()
w.delete()
def test_add_m2m_with_base_class(self):
# Regression for #11956 -- You can add an object to a m2m with the
# base class without causing integrity errors
t1 = Tag.objects.create(name='t1')
t2 = Tag.objects.create(name='t2')
c1 = TagCollection.objects.create(name='c1')
c1.tags.set([t1, t2])
c1 = TagCollection.objects.get(name='c1')
self.assertQuerysetEqual(c1.tags.all(), ["<Tag: t1>", "<Tag: t2>"], ordered=False)
self.assertQuerysetEqual(t1.tag_collections.all(), ["<TagCollection: c1>"])
def test_manager_class_caching(self):
e1 = Entry.objects.create()
e2 = Entry.objects.create()
t1 = Tag.objects.create()
t2 = Tag.objects.create()
# Get same manager twice in a row:
self.assertIs(t1.entry_set.__class__, t1.entry_set.__class__)
self.assertIs(e1.topics.__class__, e1.topics.__class__)
# Get same manager for different instances
self.assertIs(e1.topics.__class__, e2.topics.__class__)
self.assertIs(t1.entry_set.__class__, t2.entry_set.__class__)
def test_m2m_abstract_split(self):
# Regression for #19236 - an abstract class with a 'split' method
# causes a TypeError in add_lazy_relation
m1 = RegressionModelSplit(name='1')
m1.save()
def test_assigning_invalid_data_to_m2m_doesnt_clear_existing_relations(self):
t1 = Tag.objects.create(name='t1')
t2 = Tag.objects.create(name='t2')
c1 = TagCollection.objects.create(name='c1')
c1.tags.set([t1, t2])
with self.assertRaises(TypeError):
c1.tags.set(7)
c1.refresh_from_db()
self.assertQuerysetEqual(c1.tags.order_by('name'), ["<Tag: t1>", "<Tag: t2>"])
def test_multiple_forwards_only_m2m(self):
# Regression for #24505 - Multiple ManyToManyFields to same "to"
# model with related_name set to '+'.
foo = Line.objects.create(name='foo')
bar = Line.objects.create(name='bar')
post = Post.objects.create()
post.primary_lines.add(foo)
post.secondary_lines.add(bar)
self.assertQuerysetEqual(post.primary_lines.all(), ['<Line: foo>'])
self.assertQuerysetEqual(post.secondary_lines.all(), ['<Line: bar>'])
| bsd-3-clause | -8,945,947,115,624,770,000 | 36.935484 | 97 | 0.617854 | false |
JavierGarciaD/banking | banking/credit/forecast.py | 1 | 8013 | # -*- coding: utf-8 -*-
import pandas as pd
from rates.models import InterestRateModel
from credit.prepayment import PrepaymentModel
from common.db_manager import DB
from sqlalchemy import select
from sqlalchemy import and_
from sqlalchemy import asc
def vintage_sett_manual(name, forecast, nper, sdate, repricing, rate_type,
rate_level, notional, scores, pay, prepay, w_off,
rolling, credit_type):
"""
Manual constructor of settings dictionary for a Credit Vintage.
All data must be provided, no conection to external databases.
:param name:
:param forecast:
:param nper:
:param sdate:
:param repricing:
:param rate_type:
:param rate_level:
:param notional:
:param scores:
:param pay:
:param prepay:
:param w_off:
:param rolling:
:param credit_type:
:return: dictionary
"""
ans_dict = dict()
ans_dict['name'] = name
ans_dict['forecast'] = forecast
ans_dict['nper'] = nper
ans_dict['sdate'] = sdate
ans_dict['repricing'] = repricing
ans_dict['rate_type'] = rate_type
ans_dict['notional'] = notional
ans_dict['index_rates_array'] = InterestRateModel.zero(nper = forecast,
sdate = sdate)
ans_dict['rate_spreads_array'] = InterestRateModel.fixed(nper = forecast,
sdate = sdate,
level =
rate_level)
ans_dict['prepay_array'] = PrepaymentModel.psa(nper = forecast,
ceil = 0.03,
stable_period = 12)
ans_dict['prepay_per_score'] = pd.Series(data = prepay, index = scores)
ans_dict['rolling_m'] = rolling
ans_dict['scores'] = scores
ans_dict['pay_per_score'] = pd.Series(data = pay, index = scores)
ans_dict['writeoff_per_score'] = pd.Series(data = w_off, index = scores)
ans_dict['credit_type'] = credit_type
return ans_dict
def get_contract_info(product_name):
"""
Get contract info from forecast database
:return: dict with nper, rate_type, repricing_ rate_spread. And per score
provision, payment probability, prepayment probability, writeoff prob.
"""
db = DB('forecast')
########################################
# Query contract info
########################################
table = db.table('contract_info')
sql = select([table.c.nper,
table.c.credit_type,
table.c.rate_type,
table.c.repricing,
table.c.rate_spread]).where(
table.c.product_name == product_name)
# execute and fetch result
ans = db.query(sql).fetchone()
ans_dict = dict(nper = int(ans[0]),
credit_type = str(ans[1]),
rate_type = str(ans[2]),
repricing = int(ans[3]),
rate_spread = float(ans[4]))
return ans_dict
def get_credit_info(product_name):
db = DB('forecast')
table = db.table('credit_info')
scores = get_scores()
ans_dict = dict()
sql = select([table.c.payment,
table.c.prepayment,
table.c.provision,
table.c.writeoff]).where(table.c.product_name ==
product_name).order_by(asc('score'))
# Execute and fetch result
ans = db.query(sql)
pay = []
pre = []
prov = []
wo = []
for row in ans:
pay.append(row[0])
pre.append(row[1])
prov.append(row[2])
wo.append(row[3])
ans_dict['pay_per_score'] = pd.Series(data = pay, index = scores)
ans_dict['prepay_per_score'] = pd.Series(data = pre, index = scores)
ans_dict['provision_per_score'] = pd.Series(data = prov, index = scores)
ans_dict['writeoff_per_score'] = pd.Series(data = wo, index = scores)
return ans_dict
def get_scores():
"""
:return: list with available scores
"""
db = DB('forecast')
table = db.table('scores')
sql = select([table.c.score]).order_by(asc('score'))
ans = db.query(sql)
ret = []
for row in ans:
ret.append(int(row[0]))
return ret
def get_rolling(product_name):
"""
Get the rolling matrixes for a specific product
:param product_name:
:return: dict with rolling matrix for each month
"""
db = DB('forecast')
table = db.table('rolling')
scores = get_scores()
ans_dict = dict()
for each_month in range(12):
ret = []
for each_score in scores:
sql = select([table.c.roll0,
table.c.roll30,
table.c.roll60,
table.c.roll90,
table.c.roll120,
table.c.roll150,
table.c.roll180]).where(
and_(table.c.product_name == product_name,
table.c.month == each_month + 1,
table.c.score == each_score))
# Execute and fetch result
ans = list(db.query(sql).fetchone())
ret.append(ans)
ans_dict[each_month + 1] = ret
return ans_dict
def get_budget(product_name, sdate):
"""
Budget for a product, limited to data available at the database
:param product_name:
:param sdate: starting date
:return: pandas series
"""
db = DB('forecast')
table = db.table('budget')
sql = select([table.c.budget]).where(table.c.product_name ==
product_name).order_by(asc('month'))
ans = db.query(sql).fetchall()
ret = []
for row in ans:
ret.append(float(row[0]))
date_index = pd.date_range(start = sdate, periods = len(ret), freq = 'M')
return pd.Series(data = ret, index = date_index)
def vintage_sett_db(product_name, sdate, disburment, fore_length,
prepay_array, index_array):
# Gets information from forecast database about the contract_info:
contract_info = get_contract_info(product_name)
# Gets information from forecast database about the credit_info:
credit_info = get_credit_info(product_name)
# spread over index is fixed
spreads_array = InterestRateModel.fixed(nper = fore_length,
sdate = sdate,
level = contract_info[
'rate_spread'])
settings = dict(name = product_name,
nper = contract_info['nper'],
credit_type = contract_info['credit_type'],
rate_type = contract_info['rate_type'],
repricing = contract_info['repricing'],
forecast = int(fore_length),
scores = get_scores(),
sdate = pd.to_datetime(sdate),
notional = float(disburment),
index_rates_array = index_array,
rate_spreads_array=spreads_array,
prepay_array=prepay_array,
prepay_per_score=credit_info['prepay_per_score'],
rolling_m=get_rolling(product_name),
pay_per_score=credit_info['pay_per_score'],
writeoff_per_score=credit_info['writeoff_per_score']
)
return settings
if __name__ == '__main__':
from pprint import pprint
# scr = get_contract_info('tarjeta de credito')
# pprint(scr)
# score = get_scores()
# print(score)
# x = get_rolling('tarjeta de credito')
# print(x)
bdg = get_budget('tarjeta de credito', '31-01-2017')
print(bdg)
# x = get_credit_info('tarjeta de credito')
# pprint(x)
| mit | 512,306,728,074,246,300 | 31.840164 | 79 | 0.527393 | false |
eirannejad/pyRevit | pyrevitlib/pyrevit/revit/selection.py | 1 | 9893 | from pyrevit import HOST_APP, DOCS, PyRevitException
from pyrevit import framework, DB, UI
from pyrevit.coreutils.logger import get_logger
from pyrevit.revit import ensure
from pyrevit.revit import query
__all__ = ('pick_element', 'pick_element_by_category',
'pick_elements', 'pick_elements_by_category',
'get_picked_elements', 'get_picked_elements_by_category',
'pick_edge', 'pick_edges',
'pick_face', 'pick_faces',
'pick_linked', 'pick_linkeds',
'pick_elementpoint', 'pick_elementpoints',
'pick_point', 'pick_rectangle', 'get_selection_category_set',
'get_selection')
#pylint: disable=W0703,C0302,C0103
mlogger = get_logger(__name__)
class ElementSelection:
def __init__(self, element_list=None):
if element_list is None:
if HOST_APP.uidoc:
self._refs = \
[x for x in HOST_APP.uidoc.Selection.GetElementIds()]
else:
self._refs = []
else:
self._refs = ElementSelection.get_element_ids(element_list)
def __len__(self):
return len(self._refs)
def __iter__(self):
for elref in self._refs:
yield DOCS.doc.GetElement(elref)
def __getitem__(self, index):
return self.elements[index]
def __contains__(self, item):
if isinstance(item, DB.Element):
elref = item.Id
elif isinstance(item, DB.ElementId):
elref = item
else:
elref = DB.ElementId.InvalidElementId
return elref in self._refs
@classmethod
def get_element_ids(cls, mixed_list):
return ensure.ensure_element_ids(mixed_list)
@property
def is_empty(self):
return len(self._refs) == 0
@property
def elements(self):
return [DOCS.doc.GetElement(x) for x in self._refs]
@property
def element_ids(self):
return self._refs
@property
def first(self):
if self._refs:
return DOCS.doc.GetElement(self._refs[0])
@property
def last(self):
if self._refs:
return DOCS.doc.GetElement(self._refs[-1])
def set_to(self, element_list):
self._refs = ElementSelection.get_element_ids(element_list)
HOST_APP.uidoc.Selection.SetElementIds(
framework.List[DB.ElementId](self._refs)
)
HOST_APP.uidoc.RefreshActiveView()
def append(self, element_list):
self._refs.extend(ElementSelection.get_element_ids(element_list))
self.set_to(self._refs)
def include(self, element_type):
refs = [x for x in self._refs
if isinstance(DOCS.doc.GetElement(x),
element_type)]
return ElementSelection(refs)
def exclude(self, element_type):
refs = [x for x in self._refs
if not isinstance(DOCS.doc.GetElement(x),
element_type)]
return ElementSelection(refs)
def no_views(self):
return self.exclude(DB.View)
def only_views(self):
return self.include(DB.View)
def expand_groups(self):
expanded_refs = []
for element in self.elements:
if isinstance(element, DB.Group):
expanded_refs.extend(element.GetMemberIds())
else:
expanded_refs.append(element.Id)
self._refs = expanded_refs
class PickByCategorySelectionFilter(UI.Selection.ISelectionFilter):
def __init__(self, category_id):
self.category_id = category_id
# standard API override function
def AllowElement(self, element):
if element.Category and self.category_id == element.Category.Id:
return True
else:
return False
# standard API override function
def AllowReference(self, refer, point): # pylint: disable=W0613
return False
def _pick_obj(obj_type, message, multiple=False, world=False, selection_filter=None):
refs = []
try:
mlogger.debug('Picking elements: %s '
'message: %s '
'multiple: %s '
'world: %s', obj_type, message, multiple, world)
# decide which picker method to use
picker_func = HOST_APP.uidoc.Selection.PickObject
if multiple:
picker_func = HOST_APP.uidoc.Selection.PickObjects
# call the correct signature of the picker function
# if selection filter is provided
if selection_filter:
pick_result = \
picker_func(
obj_type,
selection_filter,
message
)
else:
pick_result = \
picker_func(
obj_type,
message
)
# process the results
if multiple:
refs = list(pick_result)
else:
refs = []
refs.append(pick_result)
if not refs:
mlogger.debug('Nothing picked by user...Returning None')
return None
mlogger.debug('Picked elements are: %s', refs)
if obj_type == UI.Selection.ObjectType.Element:
return_values = \
[DOCS.doc.GetElement(ref)
for ref in refs]
elif obj_type == UI.Selection.ObjectType.PointOnElement:
if world:
return_values = [ref.GlobalPoint for ref in refs]
else:
return_values = [ref.UVPoint for ref in refs]
else:
return_values = \
[DOCS.doc.GetElement(ref)
.GetGeometryObjectFromReference(ref)
for ref in refs]
mlogger.debug('Processed return elements are: %s', return_values)
if len(return_values) > 1 or multiple:
return return_values
elif len(return_values) == 1:
return return_values[0]
else:
mlogger.error('Error processing picked elements. '
'return_values should be a list.')
except Exception:
return None
def pick_element(message=''):
return _pick_obj(UI.Selection.ObjectType.Element,
message)
def pick_element_by_category(cat_name_or_builtin, message=''):
category = query.get_category(cat_name_or_builtin)
if category:
pick_filter = PickByCategorySelectionFilter(category.Id)
return _pick_obj(UI.Selection.ObjectType.Element,
message,
selection_filter=pick_filter)
else:
raise PyRevitException("Can not determine category id from: {}"
.format(cat_name_or_builtin))
def pick_elementpoint(message='', world=False):
return _pick_obj(UI.Selection.ObjectType.PointOnElement,
message,
world=world)
def pick_edge(message=''):
return _pick_obj(UI.Selection.ObjectType.Edge,
message)
def pick_face(message=''):
return _pick_obj(UI.Selection.ObjectType.Face,
message)
def pick_linked(message=''):
return _pick_obj(UI.Selection.ObjectType.LinkedElement,
message)
def pick_elements(message=''):
return _pick_obj(UI.Selection.ObjectType.Element,
message,
multiple=True)
def pick_elements_by_category(cat_name_or_builtin, message=''):
category = query.get_category(cat_name_or_builtin)
if category:
pick_filter = PickByCategorySelectionFilter(category.Id)
return _pick_obj(UI.Selection.ObjectType.Element,
message,
multiple=True,
selection_filter=pick_filter)
else:
raise PyRevitException("Can not determine category id from: {}"
.format(cat_name_or_builtin))
def get_picked_elements(message=''):
picked_element = True
while picked_element:
picked_element = pick_element(message=message)
if not picked_element:
break
yield picked_element
def get_picked_elements_by_category(cat_name_or_builtin, message=''):
picked_element = True
while picked_element:
picked_element = pick_element_by_category(cat_name_or_builtin,
message=message)
if not picked_element:
break
yield picked_element
def pick_elementpoints(message='', world=False):
return _pick_obj(UI.Selection.ObjectType.PointOnElement,
message,
multiple=True, world=world)
def pick_edges(message=''):
return _pick_obj(UI.Selection.ObjectType.Edge,
message,
multiple=True)
def pick_faces(message=''):
return _pick_obj(UI.Selection.ObjectType.Face,
message,
multiple=True)
def pick_linkeds(message=''):
return _pick_obj(UI.Selection.ObjectType.LinkedElement,
message,
multiple=True)
def pick_point(message=''):
try:
return HOST_APP.uidoc.Selection.PickPoint(message)
except Exception:
return None
def pick_rectangle(message='', pick_filter=None):
if pick_filter:
return HOST_APP.uidoc.Selection.PickElementsByRectangle(pick_filter,
message)
else:
return HOST_APP.uidoc.Selection.PickElementsByRectangle(message)
def get_selection_category_set():
selection = ElementSelection()
cset = DB.CategorySet()
for element in selection:
cset.Insert(element.Category)
return cset
def get_selection():
return ElementSelection()
| gpl-3.0 | -2,571,831,693,091,678,700 | 28.978788 | 85 | 0.570403 | false |
pontikos/uclex_browser | lookups.py | 1 | 17567 | import re
from utils import *
import itertools
import pysam
import csv
#hpo lookup
import phizz
import random
import pickle
import hashlib
import pprint
import utils
import orm
SEARCH_LIMIT = 10000
# massive genes?
#UNSUPPORTED_QUERIES = ['TTN', 'ENSG00000155657', 'CMD1G', 'CMH9', 'CMPD4', 'FLJ32040', 'LGMD2J', 'MYLK5', 'TMD', u'ENST00000342175', u'ENST00000359218', u'ENST00000342992', u'ENST00000460472', u'ENST00000589042', u'ENST00000591111']
def xpos_to_pos(xpos): return int(xpos % 1e9)
def get_gene(db, gene_id):
print(gene_id)
for g in db.genes.find({'gene_id': gene_id}): print(g)
#return g
return db.genes.find_one({'gene_id': gene_id}, fields={'_id': False})
def get_gene_by_name(db, gene_name):
# try gene_name field first
gene = db.genes.find_one({'gene_name': gene_name}, fields={'_id': False})
if gene:
return gene
# if not, try gene['other_names']
return db.genes.find_one({'other_names': gene_name}, fields={'_id': False})
def get_transcript(db, transcript_id):
transcript = db.transcripts.find_one({'transcript_id': transcript_id}, fields={'_id': False})
if not transcript:
return None
transcript['exons'] = get_exons_in_transcript(db, transcript_id)
return transcript
def get_raw_variant(db, xpos, ref, alt, get_id=False):
return db.variants.find_one({'xpos': xpos, 'ref': ref, 'alt': alt}, fields={'_id': get_id})
def get_variant(db, variant_id):
return db.variants.find_one({'variant_id':variant_id})
def get_variant(db, xpos, ref, alt):
variant = get_raw_variant(db, xpos, ref, alt, False)
print(variant)
if variant is None or 'rsid' not in variant: return variant
if variant['rsid'] == '.' or variant['rsid'] is None:
rsid = db.dbsnp.find_one({'xpos': xpos})
if rsid:
variant['rsid'] = 'rs%s' % rsid['rsid']
return variant
def get_variants_from_dbsnp(db, rsid):
if not rsid.startswith('rs'):
return None
try:
rsid = int(rsid.lstrip('rs'))
except Exception, e:
return None
position = db.dbsnp.find_one({'rsid': rsid})
if position:
variants = list(db.variants.find({'xpos': {'$lte': position['xpos'], '$gte': position['xpos']}}, fields={'_id': False}))
if variants:
#add_consequence_to_variants(variants)
return variants
return []
def get_coverage_for_bases(db, xstart, xstop=None):
"""
Get the coverage for the list of bases given by xstart->xstop, inclusive
Returns list of coverage dicts
xstop can be None if just one base, but you'll still get back a list
"""
if xstop is None:
xstop = xstart
coverages = {
doc['xpos']: doc for doc in db.base_coverage.find(
{'xpos': {'$gte': xstart, '$lte': xstop}},
fields={'_id': False}
)
}
ret = []
for i in range(xstart, xstop+1):
if i in coverages:
ret.append(coverages[i])
else:
ret.append({'xpos': i, 'pos': xpos_to_pos(i)})
for item in ret:
item['has_coverage'] = 'mean' in item
del item['xpos']
print '+++++++++++++++++++++++++++'
temp = db.base_coverage.find({'xpos': {'$gte': xstart, '$lte': xstop}})
from bson.json_util import dumps
dumps(temp)
print xstart
print xstop
print '+++++++++++++++++++++++++++++'
return ret
def get_coverage_for_transcript(db, xstart, xstop=None):
"""
:param db:
:param genomic_coord_to_exon:
:param xstart:
:param xstop:
:return:
"""
coverage_array = get_coverage_for_bases(db, xstart, xstop)
# only return coverages that have coverage (if that makes any sense?)
# return coverage_array
#print '+++++++++++++++++++++++++'
#print coverage_array
#print '+++++++++++++++++++++++++'
covered = [c for c in coverage_array if c['has_coverage']]
for c in covered: del c['has_coverage']
return covered
def get_constraint_for_transcript(db, transcript):
return db.constraint.find_one({'transcript': transcript}, fields={'_id': False})
def get_awesomebar_suggestions(g, query):
"""
This generates autocomplete suggestions when user
query is the string that user types
If it is the prefix for a gene, return list of gene names
"""
regex = re.compile('^' + re.escape(query), re.IGNORECASE)
results = (r for r in g.autocomplete_strings if regex.match(r))
results = itertools.islice(results, 0, 20)
return list(results)
# 1:1-1000
R1 = re.compile(r'^(\d+|X|Y|M|MT)\s*:\s*(\d+)-(\d+)$')
R2 = re.compile(r'^(\d+|X|Y|M|MT)\s*:\s*(\d+)$')
R3 = re.compile(r'^(\d+|X|Y|M|MT)$')
R4 = re.compile(r'^(\d+|X|Y|M|MT)\s*[-:]\s*(\d+)-([ATCG]+)-([ATCG]+)$')
def get_awesomebar_result(db, query):
"""
Similar to the above, but this is after a user types enter
We need to figure out what they meant - could be gene, variant, region
Return tuple of (datatype, identifier)
Where datatype is one of 'gene', 'variant', or 'region'
And identifier is one of:
- ensembl ID for gene
- variant ID string for variant (eg. 1-1000-A-T)
- region ID string for region (eg. 1-1000-2000)
Follow these steps:
- if query is an ensembl ID, return it
- if a gene symbol, return that gene's ensembl ID
- if an RSID, return that variant's string
Finally, note that we don't return the whole object here - only it's identifier.
This could be important for performance later
"""
query = query.strip()
print 'Query: %s' % query
if query.startswith('HP:'):
description=phizz.query_hpo([query])
#description=hpo_db.hpo.find_one({'hpo_id':query})
return 'hpo', query
if query.startswith('MIM'):
disease=phizz.query_disease([query])
return 'mim', query
# Variant
variant = orm.get_variants_by_rsid(db, query.lower())
if variant:
if len(variant) == 1:
return 'variant', variant[0]['variant_id']
else:
return 'dbsnp_variant_set', variant[0]['rsid']
variant = get_variants_from_dbsnp(db, query.lower())
if variant:
return 'variant', variant[0]['variant_id']
# variant = get_variant(db, )
# TODO - https://github.com/brettpthomas/exac_browser/issues/14
gene = get_gene_by_name(db, query)
if gene:
return 'gene', gene['gene_id']
# From here out, all should be uppercase (gene, tx, region, variant_id)
query = query.upper()
gene = get_gene_by_name(db, query)
if gene:
return 'gene', gene['gene_id']
# Ensembl formatted queries
if query.startswith('ENS'):
# Gene
gene = get_gene(db, query)
if gene:
return 'gene', gene['gene_id']
# Transcript
transcript = get_transcript(db, query)
if transcript:
return 'transcript', transcript['transcript_id']
# From here on out, only region queries
if query.startswith('CHR'):
query = query.lstrip('CHR')
# Region
m = R1.match(query)
if m:
if int(m.group(3)) < int(m.group(2)):
return 'region', 'invalid'
return 'region', '{}-{}-{}'.format(m.group(1), m.group(2), m.group(3))
m = R2.match(query)
if m:
return 'region', '{}-{}-{}'.format(m.group(1), m.group(2), m.group(2))
m = R3.match(query)
if m:
return 'region', '{}'.format(m.group(1))
m = R4.match(query)
if m:
return 'variant', '{}-{}-{}-{}'.format(m.group(1), m.group(2), m.group(3), m.group(4))
return 'not_found', query
def get_genes_in_region(db, chrom, start, stop):
"""
Genes that overlap a region
"""
xstart = get_xpos(chrom, start)
xstop = get_xpos(chrom, stop)
genes = db.genes.find({ 'xstart': {'$lte': xstop}, 'xstop': {'$gte': xstart}, }, fields={'_id': False})
return list(genes)
def get_variants_in_region(db, chrom, start, stop):
"""
Variants that overlap a region
Unclear if this will include CNVs
"""
xstart = get_xpos(chrom, start)
xstop = get_xpos(chrom, stop)
variants = list(db.variants.find({ 'xpos': {'$lte': xstop, '$gte': xstart}
}, fields={'_id': False}, limit=SEARCH_LIMIT))
#add_consequence_to_variants(variants)
return list(variants)
def remove_extraneous_information(variant):
return
del variant['genotype_depths']
del variant['genotype_qualities']
del variant['transcripts']
del variant['genes']
del variant['orig_alt_alleles']
del variant['xpos']
del variant['xstart']
del variant['xstop']
del variant['site_quality']
del variant['vep_annotations']
def get_transcripts_in_gene(db, gene_id):
"""
"""
return list(db.transcripts.find({'gene_id': gene_id}, fields={'_id': False}))
def get_exons_in_transcript(db, transcript_id):
# return sorted(
# [x for x in
# db.exons.find({'transcript_id': transcript_id}, fields={'_id': False})
# if x['feature_type'] != 'exon'],
# key=lambda k: k['start'])
return sorted(list(db.exons.find({'transcript_id': transcript_id, 'feature_type': { "$in": ['CDS', 'UTR', 'exon'] }}, fields={'_id': False})), key=lambda k: k['start'])
def get_hpo_patients(hpo_db, patients_db, hpo_id):
"""
Get patients with HPO term.
"""
patients = [p for p in patients_db.patients.find({'features.id':hpo_id}) for f in p['features'] if f['id']== hpo_id and f['observed']=='yes']
print(hpo_id,len(patients))
for r in hpo_db.hpo.find({'is_a':hpo_id}):
for i in r['id']: patients+=list(itertools.chain(get_hpo_patients(hpo_db,patients_db,i)))
#remove duplicates
patients={v['external_id']:v for v in patients}.values()
return patients
# return hpo terms found in people in which variant is found
def get_hpo(variant_str):
samples=get_samples(variant_str)
#chrom,pos,ref,alt,=str(variant_str.strip()).split('-')
d=csv.DictReader(file('/data/uclex_data/UCLexInfo/uclex-samples.csv','r'),delimiter=',')
hpo=[]
for r in d:
if r['sample'] not in samples: continue
pheno=r['phenotype']
print((r['sample'],pheno,))
if pheno.startswith('HP'):
hpo+=[phizz.query_hpo([pheno])]
elif pheno.startswith('MIM'):
hpo+=[phizz.query_disease([pheno])]
return(hpo)
def get_hpo_children(hpo_db, hpo_id):
hpo=[hpo_db.hpo.find_one({'id':hpo_id})]
for r in hpo_db.hpo.find({'is_a':hpo_id}):
for i in r['id']:
hpo+=list(itertools.chain(get_hpo_children(hpo_db,i)))
#remove duplicates
hpo={h['id'][0]:h for h in hpo}.values()
return hpo
def replace_hpo(hpo_db, hpo):
# some hpo_ids are obsolete.
record = hpo_db.hpo.find_one({'id':hpo[0]})
if not record:
print 'no record in replace_hpo'
print hpo
if 'replaced_by' in record:
new = hpo_db.hpo.find_one({'id':record['replaced_by'][0]})
return [new['id'][0], new['name'][0]]
else:
return hpo
def get_hpo_ancestors(hpo_db, hpo_id):
"""
Get HPO terms higher up in the hierarchy.
"""
h=hpo_db.hpo.find_one({'id':hpo_id})
#print(hpo_id,h)
if 'replaced_by' in h:
# not primary id, replace with primary id and try again
h = hpo_db.hpo.find_one({'id':h['replaced_by'][0]})
hpo=[h]
if 'is_a' not in h: return hpo
for hpo_parent_id in h['is_a']:
#p=hpo_db.hpo.find({'id':hpo_parent_id}):
hpo+=list(itertools.chain(get_hpo_ancestors(hpo_db,hpo_parent_id)))
#remove duplicates
hpo={h['id'][0]:h for h in hpo}.values()
return hpo
def get_hpo_ancestors_array(hpo_db, hpo_id):
# return an array of ids, instead of array of dicts
anc = get_hpo_ancestors(hpo_db, hpo_id)
result = []
for a in anc:
result.extend(a['id'])
return result
def get_hpo_size_freq(freq_file):
# read freq file
# result = {'HP:0000345':{size: 456, freq: 0.1, raw: 456/4500}}
hpo_freq = {}
inf = open(freq_file, 'r')
for l in inf:
l = l.rstrip().split('\t')
nums = l[1].split('/')
size = int(nums[0])
tot = float(nums[1])
hpo_freq[l[0]] = {'size': size, 'freq': size/tot, 'raw': l[1]}
return hpo_freq
def get_hpo_common_ancestors(hpo_db, h1, h2):
# return a list of hpo ids for h1 and h2's common ancestors
a1 = get_hpo_ancestors(hpo_db, h1)
a2 = get_hpo_ancestors(hpo_db,h2)
an1 = []
an2 = []
for a in a1:
an1.extend(a['id'])
for a in a2:
an2.extend(a['id'])
return list(set(an1) & set(an2))
def get_hpo_nearest_common_ancestors(hpo_db, h1, h2, hpo_freq):
# given hpo_freq, find out a list of nearest common ancestors
common_ans = get_hpo_common_ancestors(hpo_db, h1, h2)
freqs = [hpo_freq[h] for h in common_ans]
min_freq = min(freqs)
inds = [i for i, v in enumerate(freqs) if v == min_freq]
return [common_ans[i] for i in inds]
def hpo_minimum_set(hpo_db, hpo_ids=[]):
'''
minimize the hpo sets
results = {'HP:0000505': [ancestors]}
'''
hpo_ids = list(set(hpo_ids))
results = dict([(hpo_id, [ h['id'][0] for h in get_hpo_ancestors(hpo_db, hpo_id)],) for hpo_id in hpo_ids])
# minimise
bad_ids = []
for i in range(len(hpo_ids)):
for j in range(i+1,len(hpo_ids)):
if hpo_ids[i] in results[hpo_ids[j]]:
# i is j's ancestor, remove
bad_ids.append(hpo_ids[i])
break
if hpo_ids[j] in results[hpo_ids[i]]:
# j is i's ancestor, remove
bad_ids.append(hpo_ids[j])
return list(set(hpo_ids) - set(bad_ids))
def get_patient_hpo(hpo_db,patients_db, patient_id,ancestors=True):
"""
Get complete hierarchy of HPO terms for patient.
"""
p=patients_db.patients.find_one({'external_id':patient_id})
if 'features' not in p: return []
if ancestors:
hpo_ancestors=[]
for hpo_ids in [f['id'] for f in p['features'] if f['observed']=='yes']:
hpo_ancestors+=get_hpo_ancestors(hpo_db,hpo_ids)
# remove duplicates
hpo_ancestors={h['id'][0]:h for h in hpo_ancestors}.values()
return hpo_ancestors
else:
return [ hpo_db.hpo.find_one({'id':f['id']}) for f in p['features'] if f['observed']=='yes']
def get_gene_hpo(hpo_db,gene_name,dot=True):
"""
Get all HPO terms linked to gene name, including ancestors.
and return as dot string for plotting if dot is True.
"""
hpo_ids=[hpo['HPO-Term-ID'] for hpo in hpo_db.OMIM_ALL_FREQUENCIES_genes_to_phenotype.find({'entrez-gene-symbol':gene_name})]
if not hpo_ids:
hpo_ids=hpo_db.genes_pheno.find_one({'gene':gene_name})
# no hpo linked to gene
if hpo_ids is None: hpo_ids=[]
else: hpo_ids=hpo_ids['hpo']
hpo_ancestors=[get_hpo_ancestors(hpo_db,hid) for hid in hpo_ids]
hpo_ancestors=list(itertools.chain(*hpo_ancestors))
# remove duplicates
hpo_ancestors={h['id'][0]:h for h in hpo_ancestors}.values()
hpo_string="digraph {"
for h in hpo_ancestors:
hpo_id=h['id'][0]
hpo_label=h['name'][0]
#hpo_count=0
hpo_string+= '"{}" [style="filled", fixedsize="true", fontsize="15", shape="circle", width="0.75", fillcolor="powderblue", label="{}\n{}", color="transparent"];\n'.format(hpo_id,hpo_label,hpo_id)
for h in hpo_ancestors:
hpo_id=h['id'][0]
if 'is_a' not in h: continue
for anc in h['is_a']:
hpo_string+='"{}" -> "{}" [color="#000000", lty="solid"];\n'.format(anc,hpo_id)
hpo_string+= '}'
if dot:
return hpo_string
else:
return hpo_ancestors
# get hpo terms shared between patients
def common_hpo(hpo_db,patients_db,patient_ids):
terms_by_patient=[get_patient_hpo(hpo_db,patients_db,pid) for pid in patient_ids]
# intersection of lists
common_hpo_term_ids=frozenset.intersection(*[frozenset([y['id'][0] for y in x]) for x in terms_by_patient])
# remove ancestors
#get_hpo_ancestors(hpo_db, hpo_id):
# lookup hpo terms
common_hpo_terms=[hpo_db.hpo.find_one({'id':hpo_id}) for hpo_id in common_hpo_term_ids]
return common_hpo_terms
# get union of hpo terms seen in patients
def union_hpo(hpo_db,patients_db,patient_ids):
terms_by_patient=[get_patient_hpo(hpo_db,patients_db,pid) for pid in patient_ids]
#flatten lists
terms_by_patient=list(itertools.chain(*terms_by_patient))
# intersection of lists
terms_by_patient={h['id'][0]:h for h in terms_by_patient}.values()
return terms_by_patient
# VCF gene query
def variants_in_gene_vcf(gene_symbol):
import mygene
mg = mygene.MyGeneInfo()
g=mg.query('symbol:%s' % gene_symbol, fields='exons', species='human')
print g
exons=g['hits'][0]['exons']
for transcript in exons:
yield (transcript, exons[transcript],)
def get_patient_observed_hpo(patient, patient_db):
# returns [('HP:0000001', 'hell yeah')]
this_patient = patient_db.patients.find_one({'external_id':patient})
result = [(None, None)]
if not this_patient:
#print 'ERROR: %s not in patients db' % patient
pass
else:
if 'features' not in this_patient:
print 'WARNING: features not in ' + patient
p_features = this_patient.get('features', [{'id':'HP:0000001', 'label':'All', 'observed': 'yes' }])
result = [(f['id'], f['label']) for f in p_features if f['observed']=='yes']
return result
| mit | -3,338,054,858,156,872,700 | 33.51277 | 233 | 0.599989 | false |
partofthething/home-assistant | homeassistant/components/tado/binary_sensor.py | 1 | 7734 | """Support for Tado sensors for each zone."""
import logging
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_WINDOW,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
DATA,
DOMAIN,
SIGNAL_TADO_UPDATE_RECEIVED,
TYPE_AIR_CONDITIONING,
TYPE_BATTERY,
TYPE_HEATING,
TYPE_HOT_WATER,
TYPE_POWER,
)
from .entity import TadoDeviceEntity, TadoZoneEntity
_LOGGER = logging.getLogger(__name__)
DEVICE_SENSORS = {
TYPE_BATTERY: [
"battery state",
"connection state",
],
TYPE_POWER: [
"connection state",
],
}
ZONE_SENSORS = {
TYPE_HEATING: [
"power",
"link",
"overlay",
"early start",
"open window",
],
TYPE_AIR_CONDITIONING: [
"power",
"link",
"overlay",
"open window",
],
TYPE_HOT_WATER: ["power", "link", "overlay"],
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up the Tado sensor platform."""
tado = hass.data[DOMAIN][entry.entry_id][DATA]
devices = tado.devices
zones = tado.zones
entities = []
# Create device sensors
for device in devices:
if "batteryState" in device:
device_type = TYPE_BATTERY
else:
device_type = TYPE_POWER
entities.extend(
[
TadoDeviceBinarySensor(tado, device, variable)
for variable in DEVICE_SENSORS[device_type]
]
)
# Create zone sensors
for zone in zones:
zone_type = zone["type"]
if zone_type not in ZONE_SENSORS:
_LOGGER.warning("Unknown zone type skipped: %s", zone_type)
continue
entities.extend(
[
TadoZoneBinarySensor(tado, zone["name"], zone["id"], variable)
for variable in ZONE_SENSORS[zone_type]
]
)
if entities:
async_add_entities(entities, True)
class TadoDeviceBinarySensor(TadoDeviceEntity, BinarySensorEntity):
"""Representation of a tado Sensor."""
def __init__(self, tado, device_info, device_variable):
"""Initialize of the Tado Sensor."""
self._tado = tado
super().__init__(device_info)
self.device_variable = device_variable
self._unique_id = f"{device_variable} {self.device_id} {tado.home_id}"
self._state = None
async def async_added_to_hass(self):
"""Register for sensor updates."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(
self._tado.home_id, "device", self.device_id
),
self._async_update_callback,
)
)
self._async_update_device_data()
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.device_name} {self.device_variable}"
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this sensor."""
if self.device_variable == "battery state":
return DEVICE_CLASS_BATTERY
if self.device_variable == "connection state":
return DEVICE_CLASS_CONNECTIVITY
return None
@callback
def _async_update_callback(self):
"""Update and write state."""
self._async_update_device_data()
self.async_write_ha_state()
@callback
def _async_update_device_data(self):
"""Handle update callbacks."""
try:
self._device_info = self._tado.data["device"][self.device_id]
except KeyError:
return
if self.device_variable == "battery state":
self._state = self._device_info["batteryState"] == "LOW"
elif self.device_variable == "connection state":
self._state = self._device_info.get("connectionState", {}).get(
"value", False
)
class TadoZoneBinarySensor(TadoZoneEntity, BinarySensorEntity):
"""Representation of a tado Sensor."""
def __init__(self, tado, zone_name, zone_id, zone_variable):
"""Initialize of the Tado Sensor."""
self._tado = tado
super().__init__(zone_name, tado.home_id, zone_id)
self.zone_variable = zone_variable
self._unique_id = f"{zone_variable} {zone_id} {tado.home_id}"
self._state = None
self._state_attributes = None
self._tado_zone_data = None
async def async_added_to_hass(self):
"""Register for sensor updates."""
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(
self._tado.home_id, "zone", self.zone_id
),
self._async_update_callback,
)
)
self._async_update_zone_data()
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.zone_name} {self.zone_variable}"
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this sensor."""
if self.zone_variable == "early start":
return DEVICE_CLASS_POWER
if self.zone_variable == "link":
return DEVICE_CLASS_CONNECTIVITY
if self.zone_variable == "open window":
return DEVICE_CLASS_WINDOW
if self.zone_variable == "overlay":
return DEVICE_CLASS_POWER
if self.zone_variable == "power":
return DEVICE_CLASS_POWER
return None
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
@callback
def _async_update_callback(self):
"""Update and write state."""
self._async_update_zone_data()
self.async_write_ha_state()
@callback
def _async_update_zone_data(self):
"""Handle update callbacks."""
try:
self._tado_zone_data = self._tado.data["zone"][self.zone_id]
except KeyError:
return
if self.zone_variable == "power":
self._state = self._tado_zone_data.power == "ON"
elif self.zone_variable == "link":
self._state = self._tado_zone_data.link == "ONLINE"
elif self.zone_variable == "overlay":
self._state = self._tado_zone_data.overlay_active
if self._tado_zone_data.overlay_active:
self._state_attributes = {
"termination": self._tado_zone_data.overlay_termination_type
}
elif self.zone_variable == "early start":
self._state = self._tado_zone_data.preparation
elif self.zone_variable == "open window":
self._state = bool(
self._tado_zone_data.open_window
or self._tado_zone_data.open_window_detected
)
self._state_attributes = self._tado_zone_data.open_window_attr
| mit | -1,611,426,186,836,054,300 | 27.32967 | 80 | 0.571115 | false |
ciex/souma | synapse/electrical.py | 1 | 26338 | import datetime
import json
import logging
import requests
import iso8601
import os
from base64 import b64encode
from Crypto import Random
from dateutil.parser import parse as dateutil_parse
from gevent import Greenlet
from hashlib import sha256
from humanize import naturaltime
from operator import itemgetter
from nucleus import notification_signals, ERROR, create_session
from nucleus.models import Persona, Souma
from web_ui import app
API_VERSION = 0
API_VERSION_LONG = 0.1
class ElectricalSynapse(object):
"""
Handle connection to HTTP endpoints, specifically the Glia web service (Singleton)
Parameters:
host (String) The IP address of a Glia server to connect to
"""
_instance = None
def __new__(cls, *args, **kwargs):
"""Singleton pattern"""
if not cls._instance:
app.logger.error("Creating new ElectricalSynapse Singleton")
cls._instance = super(ElectricalSynapse, cls).__new__(cls, *args, **kwargs)
cls._instance._fresh = True
else:
cls._instance._fresh = False
return cls._instance
def __init__(self, parent=None, host=None):
from synapse import Synapse
self.logger = logging.getLogger('e-synapse')
self.logger.setLevel(app.config['LOG_LEVEL'])
if host is None:
host = app.config['LOGIN_SERVER']
if app.config["LOGIN_SERVER_SSL"] is True:
protocol = "https"
else:
protocol = "http"
self.host = "{proto}://{host}".format(proto=protocol, host=host)
# Core setup
if parent:
self.synapse = parent
else:
self.synapse = Synapse()
self.souma = Souma.query.get(app.config["SOUMA_ID"]) # The Souma which hosts this Synapse
self.session = requests.Session() # Session object to use for requests
self._peers = dict()
self._sessions = dict() # Holds session info for owned Personas (see _get_session(), _set_session()
self.rng = Random.new()
# Setup signals
notification_signals.signal('local-model-changed').connect(self.on_local_model_changed)
# Test connection to glia-server
try:
server_info, errors = self._request_resource("GET", [])
except requests.ConnectionError, e:
self.logger.error("Could not establish connection to glia server\n* {}".format(e))
raise
# Register souma if neccessary
if errors:
# Check for SOUMA_NOT_FOUND error code in server response
if server_info and ERROR["SOUMA_NOT_FOUND"](None)[0] in map(itemgetter(0), server_info["meta"]["errors"]):
if self.souma_register():
server_info, errors = self._request_resource("GET", [])
else:
self._log_errors("Error registering Souma", errors)
raise requests.ConnectionError()
else:
self._log_errors("Error connecting to Glia", errors)
raise requests.ConnectionError()
try:
self.logger.info(
"\n".join(["{:=^80}".format(" GLIA INFO "),
"{:>12}: {} ({})".format("status",
server_info["server_status"][0]["status_message"],
server_info["server_status"][0]["status_code"]),
"{:>12}: {}".format("server id", server_info["server_status"][0]["id"]),
"{:>12}: {}".format("personas", server_info["server_status"][0]["personas_registered"]),
"{:>12}: {}".format("vesicles", server_info["server_status"][0]["vesicles_stored"])
]))
except KeyError, e:
self.logger.warning("Received invalid server status: Missing {}".format(e))
def _get_session(self, persona):
"""
Return the current session id for persona or create a new session
Parameters:
persona (Persona): A persona object to be logged in
Returns:
dict A dictionary with keys `id`, containing a session id and
`timeout` containing a datetime object for the session's
timeout
"""
if persona.id in self._sessions:
return self._sessions[persona.id]
else:
return self.persona_login(persona)
def _set_session(self, persona, session_id, timeout):
"""
Store a new session id for persona
Args:
persona (persona)
session_id (str): New session id. If set to none, the session is removed
timeout (str): ISO formatted datetime of session timeout
"""
if session_id is None:
del self._sessions[persona.id]
else:
to = dateutil_parse(timeout)
self._sessions[persona.id] = {
'id': session_id,
'timeout': to
}
def _log_errors(self, msg, errors, level="error"):
"""
Log a list of errors to the logger
Args:
msg(str): A message describing the error source
errors(list): A list of error messages
Raises:
ValueError: If the specified log level is invalid
"""
if level not in ["debug", "info", "warning", "error"]:
raise ValueError("Invalid log level {}".format(level))
call = getattr(self.logger, level)
call("{msg}:\n{list}".format(msg=msg, list="\n* ".join(str(e) for e in errors)))
def _keepalive(self, persona):
"""
Keep @param persona's glia session alive by sending a keep-alive request
If there is no session yet for persona, she is logged in.
"""
self.logger.info("Sending keepalive for {}".format(persona))
session_id = self._get_session(persona)["id"]
resp, errors = self._request_resource("GET", ["sessions", session_id])
if errors:
self._log_errors("Error requesting keepalive for {}".format(persona), errors)
# Login if session is invalid
if ERROR["INVALID_SESSION"] in resp['meta']['errors']:
self.login(persona)
else:
session_id = resp['sessions'][0]['id']
timeout = resp['sessions'][0]['timeout']
self._set_session(persona, session_id, timeout)
self._queue_keepalive(persona)
def _queue_keepalive(self, persona, timeout=900):
"""
Queue keepalive for persona in @param timeout seconds (default 15 minutes)
"""
buf = 10 # seconds
remaining = (self._get_session(persona)['timeout'] - datetime.datetime.utcnow()).seconds - buf
if (remaining - buf) < 0:
remaining = 2
self.logger.debug("Next keepalive for {} queued in {} seconds".format(persona, remaining))
ping = Greenlet(self._keepalive, persona)
ping.start_later(remaining)
def _glia_auth(self, url, payload=None):
"""Returns headers with HTTP Glia Authentication for given request data
Args:
url: Requested URL
payload: Payload of the request if any
"""
if payload is None:
payload = str()
rand = self.rng.read(16)
headers = dict()
headers['Glia-Souma'] = self.souma.id
headers['Glia-Rand'] = b64encode(rand)
headers['Glia-Auth'] = self.souma.sign("".join([
str(self.souma.id),
rand,
str(url),
payload
]))
return headers
def _request_resource(self, method, endpoint, params=None, payload=None):
"""
Request a resource from the server
Args:
method (str): One of "GET", "POST", "PUT", "PATCH", "DELETE"
endpoint (list): A list of strings forming the path of the API endpoint
params (dict): Optional parameters to attach to the query strings
payload (object): Will be attached to the request JSON encoded
Returns:
A tuple of two elements:
[0] (object) The (decoded) response of the server
[1] (list) A list of error strings specified in the `errors` field of the response
"""
# Validate params
HTTP_METHODS_1 = ("GET", "DELETE")
HTTP_METHODS_2 = ("POST", "PUT", "PATCH") # have `data` parameter
if method not in HTTP_METHODS_1 and method not in HTTP_METHODS_2:
raise ValueError("Invalid request method {}".form(method))
if payload:
if not isinstance(payload, dict):
raise ValueError("Payload must be a dictionary type")
try:
payload_json = json.dumps(payload)
except ValueError, e:
raise ValueError("Error encoding payload of {}:\n{}".format(self, e))
else:
payload_json = None
# Construct URL
url_elems = [self.host, "v" + str(API_VERSION)]
url_elems.extend(endpoint)
url = "/".join(url_elems) + "/"
# Heroku's SSL cert is bundled with Souma
cert = os.path.join(app.config["RUNTIME_DIR"], "static", "herokuapp.com.pem")
# Make request
errors = list()
parsing_failed = False
# Authenticate request
headers = self._glia_auth(url, payload_json)
call = getattr(self.session, method.lower())
try:
if method in HTTP_METHODS_1:
self.logger.debug("{} {}".format(method, url))
r = call(url, headers=headers, params=params, verify=cert)
else:
self.logger.debug("{} {}\n{}".format(method, url, payload_json))
headers['Content-Type'] = "application/json"
r = call(url, payload_json, headers=headers, params=params, verify=cert)
r.raise_for_status()
except requests.exceptions.RequestException, e:
errors.append(e)
# Try parsing the response
resp = None
try:
resp = r.json()
self.logger.debug("Received data:\n{}".format(resp))
except ValueError, e:
resp = None
parsing_failed = True
errors.append("Parsing JSON failed: {}".format(e))
except UnboundLocalError:
parsing_failed = True
errors.append("No data received")
error_strings = list()
if not parsing_failed and "meta" in resp and 'errors' in resp["meta"]:
for error in resp['meta']['errors']:
error_strings.append("{}: {}".format(error[0], error[1]))
# Don't return empty error_strings if parsing server errors has failed, return client-side errors instead
elif errors:
error_strings = errors
# Log all errors
if errors:
self.logger.error("{} {} / {} failed.\nParam: {}\nPayload: {}\nErrors:\n* {}".format(
method, endpoint, url, params, payload_json, "\n* ".join(str(e) for e in errors)))
return (resp, error_strings)
def _update_peer_list(self, persona):
"""
Retrieve current IPs of persona's peers
Args:
persona (persona): The Persona whose peers will be located
Returns:
list A list of error messages or None
"""
self.logger.info("Updating peerlist for {}".format(persona))
contacts = Persona.query.get(persona.id).contacts
peer_ids = list() # peers we want to look up
for p in contacts:
peer_ids.append(p.id)
if len(peer_ids) == 0:
self.logger.info("{} has no peers. Peerlist update cancelled.".format(persona))
else:
# ask glia server for peer info
resp, errors = self._request_resource("GET", ["sessions"], params={'ids': ",".join(peer_ids)})
# TODO: Remove peers that are no longer online
if errors:
self._log_errors("Error updating peer list", errors)
return errors
else:
offline = 0
for infodict in resp['sessions']:
p_id = infodict['id']
soumas = infodict['soumas']
if soumas:
for souma in soumas:
# self.souma_discovered.send(self._update_peer_list, message=souma)
pass
else:
offline += 1
self.logger.info("No online souma found for {}".format(contacts.get(p_id)))
self.logger.info("Updated peer list: {}/{} online".format(
len(resp["sessions"]) - offline, len(resp["sessions"])))
def get_persona(self, persona_id):
"""Returns a Persona object for persona_id, loading it from Glia if neccessary
Args:
persona_id (String): ID of the required Persona
Returns:
Persona: If a record was found
None: If no record was found
"""
persona = Persona.query.get(persona_id)
if persona:
return persona
else:
resp, errors = self.persona_info(persona_id)
if errors or (resp and "personas" in resp and len(resp["personas"]) == 0):
self._log_errors("Error requesting Persona {} from server".format(persona_id), errors)
return None
else:
persona = Persona.query.get(persona_id)
return persona
def find_persona(self, address):
"""
Find personas by their email address
Args:
address (string): Email address to search for
Returns:
list A list of dictionaries containing found profile information
Keys:
"persona_id",
"username",
"host",
"port_external",
"port_internal",
"crypt_public",
"sign_public",
"connectable"
"""
self.logger.info("Requesting persona record for '{}'".format(address))
payload = {
"email_hash": [sha256(address).hexdigest(), ]
}
return self._request_resource("POST", ["personas"], payload=payload)
def login_all(self):
"""
Login all personas with a non-empty private key
"""
persona_set = Persona.query.filter('sign_private != ""').all()
if len(persona_set) == 0:
self.logger.warning("No controlled Persona found.")
else:
self.logger.info("Logging in {} personas".format(len(persona_set)))
for p in persona_set:
self.persona_login(p)
def myelin_receive(self, recipient_id, interval=None):
"""
Request Vesicles directed at recipient from Myelin and pass them on to Synapse for handling.
Parameters:
recipient_id (String) The ID of the Persona for which to listen
interval (int) If set to an amount of seconds, the function will repeatedly be called again in this interval
"""
recipient = Persona.query.get(recipient_id)
if not recipient:
self.logger.error("Could not find Persona {}".format(recipient_id))
return
self.logger.debug("Updating Myelin of {} at {} second intervals".format(recipient, interval))
params = dict()
# Determine offset
offset = recipient.myelin_offset
if offset is not None:
self.logger.debug("Last Vesicle for {} received {} ({})".format(
recipient, naturaltime(datetime.datetime.utcnow() - offset), offset))
params["offset"] = str(recipient.myelin_offset)
resp, errors = self._request_resource("GET", ["myelin", "recipient", recipient.id], params, None)
if errors:
self._log_errors("Error receiving from Myelin", errors)
else:
for v in resp["vesicles"]:
vesicle = self.synapse.handle_vesicle(v)
if vesicle is not None:
myelin_modified = iso8601.parse_date(
resp["meta"]["myelin_modified"][vesicle.id]).replace(tzinfo=None)
if offset is None or myelin_modified > offset:
offset = myelin_modified
# Update recipient's offset if a more recent Vesicle has been received
if offset is not None:
if recipient.myelin_offset is None or offset > recipient.myelin_offset:
recipient.myelin_offset = offset
session = create_session()
session.add(recipient)
session.commit()
# session.close()
# Schedule this method to be called in again in interval seconds
if interval is not None:
update = Greenlet(self.myelin_receive, recipient_id, interval)
update.start_later(interval)
def myelin_store(self, vesicle):
"""
Store a Vesicle in Myelin
Parameters:
vesicle (Vesicle) The vesicle to be stored
Returns:
list List of error strings if such occurred
"""
data = {
"vesicles": [vesicle.json(), ]
}
resp, errors = self._request_resource("PUT", ["myelin", "vesicles", vesicle.id], payload=data)
if errors:
self._log_errors("Error transmitting {} to Myelin".format(vesicle), errors)
return errors
else:
self.logger.debug("Transmitted {} to Myelin".format(vesicle))
def on_local_model_changed(self, sender, message):
"""Check if Personas were changed and call register / unregister method"""
if message["object_type"] == "Persona":
persona = Persona.query.get(message["object_id"])
if message["action"] == "insert":
self.persona_register(persona)
elif message["action"] == "update":
self.logger.warning("Updating Persona profiles in Glia not yet supported")
elif message["action"] == "delete":
self.persona_unregister(persona)
def persona_info(self, persona_id):
"""
Return a dictionary containing info about a persona, storing it in the db as well.
Parameters:
persona_id (str): Persona ID
Returns:
tuple 0: persona's public profile and auth_token for authentication
as a dict, 1: list of errors while requesting from server
"""
resp, errors = self._request_resource("GET", ["personas", persona_id])
if errors:
self._log_errors("Error retrieving Persona profile for ID: {}".format(persona_id), errors)
else:
pinfo = resp["personas"][0]
modified = iso8601.parse_date(pinfo["modified"])
p = Persona.query.get(persona_id)
if p is None:
session = create_session()
try:
p = Persona(
_stub=True,
id=persona_id,
username=pinfo["username"],
email=pinfo.get("email"),
modified=modified,
sign_public=pinfo["sign_public"],
crypt_public=pinfo["crypt_public"]
)
session.add(p)
session.commit()
self.logger.info("Loaded {} from Glia server".format(p))
except KeyError, e:
self.logger.warning("Missing key in server response for storing new Persona: {}".format(e))
errors.append("Missing key in server response for storing new Persona: {}".format(e))
except:
session.rollback()
raise
finally:
# session.close()
pass
else:
# TODO: Update persona info
pass
return resp, errors
def persona_login(self, persona):
"""
Login a persona on the server, register if not existing. Start myelinated reception if activated.
Returns:
str -- new session id
"""
# Check current state
if persona.id in self._sessions:
return self._get_session(persona)["session_id"]
# Obtain auth token
info, errors = self._request_resource("GET", ["personas", persona.id])
if errors:
self._log_errors("Error logging in", errors)
# Register persona if not existing
if ERROR["OBJECT_NOT_FOUND"](None)[0] in map(itemgetter(0), info["meta"]["errors"]):
errors = self.persona_register(persona)
if errors:
self.logger.error("Failed logging in / registering {}.".format(persona))
return None
else:
session = self._get_session(persona)
return session["id"]
try:
auth = info["personas"][0]["auth"]
except KeyError, e:
self.logger.warning("Server sent invalid response: Missing `{}` field.".format(e))
return None
# Send login request
data = {
"personas": [{
"id": persona.id,
'auth_signed': persona.sign(auth),
'reply_to': app.config["SYNAPSE_PORT"]
}]
}
resp, errors = self._request_resource("POST", ["sessions"], payload=data)
# Read response
if errors:
self._log_errors("Login failed", errors)
return None
else:
session_id = resp["sessions"][0]['id']
timeout = resp["sessions"][0]['timeout']
self.logger.info("Persona {} logged in until {}".format(persona, timeout))
self._set_session(persona, session_id, timeout)
self._queue_keepalive(persona)
self._update_peer_list(persona)
if app.config["ENABLE_MYELIN"]:
self.myelin_receive(persona.id, interval=app.config["MYELIN_POLLING_INTERVAL"])
return {
"id": session_id,
"timeout": timeout
}
def persona_logout(self, persona):
"""
Terminate persona's session on the host
Parameters:
persona -- persona to be logged out
Returns:
dict -- error_name:error_message
"""
self.logger.info("Logging out {}".format(persona))
ses = self._get_session(persona)
resp, errors = self._request_resource("DELETE", ["personas", persona.id, ses["id"]])
if errors:
self._log_errors("Error logging out", errors)
return errors
else:
self._set_session(persona, None, None)
self.logger.info("Logged out {}".format(persona))
def persona_register(self, persona):
"""
Register a persona on the server
Parameters:
persona -- persona to be registered
Returns:
list -- error messages
"""
# Create request
data = {
"personas": [{
'persona_id': persona.id,
'username': persona.username,
'modified': persona.modified.isoformat(),
'email_hash': persona.get_email_hash(),
'sign_public': persona.sign_public,
'crypt_public': persona.crypt_public,
'reply_to': app.config['SYNAPSE_PORT']
}, ]
}
response, errors = self._request_resource("PUT", ["personas", persona.id], payload=data)
if errors:
self._log_errors("Error creating glia profile for {}".format(persona), errors)
return errors
# Evaluate response
try:
session_id = response['sessions'][0]['id']
timeout = response['sessions'][0]['timeout']
except KeyError, e:
return ["Invalid server response: Missing key `{}`".format(e)]
self.logger.info("Registered {} with server.".format(persona))
self._set_session(persona, session_id, timeout)
self._update_peer_list(persona)
self._queue_keepalive(persona)
if app.config["ENABLE_MYELIN"]:
self.myelin_receive(persona.id, interval=app.config["MYELIN_POLLING_INTERVAL"])
def persona_unregister(self, persona):
"""
Remove persona's data from the glia server
Parameters:
persona (persona):persona to be unregistered
Returns:
dict error_name:error_message
"""
self.logger.info("Unregistering {}".format(persona))
response, errors = self._request_resource("DELETE", ["personas", persona.id])
if errors:
self._log_errors("Error unregistering persona {}".format(persona), errors)
else:
self.logger.info("Unregisterd persona {} from Glia server:\n{}".format(persona, response))
def souma_register(self):
"""
Register this Souma with the glia server
Returns:
bool -- True if successful
"""
self.logger.info("Registering local {} with Glia-server".format(self.souma))
data = {
"soumas": [{
"id": self.souma.id,
"crypt_public": self.souma.crypt_public,
"sign_public": self.souma.sign_public,
}, ]
}
response, errors = self._request_resource("POST", ["soumas"], payload=data)
if errors:
self._log_errors("Registering {} failed".format(self.souma), errors)
return False
else:
self.logger.info("Successfully registered {} with server".format(self.souma))
return True
def shutdown(self):
"""
Terminate connections and logout
"""
for p in self._sessions:
self.persona_logout(p['id'])
| apache-2.0 | 2,572,205,078,655,930,000 | 34.688347 | 120 | 0.549244 | false |
whiler/a-walk-in-python | spider/spider.py | 1 | 10172 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# file: spider.py
# author: whiler
# license: BSD
# 引入 http.cookiejar 模块,提供 Cookie 支持
import http.cookiejar
# 引入模块 logging 模块,记录运行日志
import logging
# 引入 socket 模块,用于捕获 HTTP 超时错误
import socket
# 引入 urllib.request 模块,处理 HTTP 连接,创建 HTTP 请求
import urllib.request
# 引入 urllib.error ,用于捕获 HTTPError 错误
import urllib.error
# 引入 urllib.parse 模块,处理链接地址
import urllib.parse
# 引入 lxml.html 第三方包,解析 HTML
import lxml.html
# 配置基本的日志记录格式
logging.basicConfig(level=logging.NOTSET,
format='[%(levelname)s]\t%(asctime)s\t%(message)s',
datefmt='%Y-%m-%d %H:%M:%S %Z')
class Spider(object):
"""
定义一个 Spider 类,实现简单的网络爬虫
"""
def __init__(self, seeds, store):
"""
:seeds: 种子地址列
:store: 数据存储
"""
self.seeds = seeds
self.store = store
# 伪装成一个正常的浏览器,需要的请求头信息
self.headers = {
# 伪装成 Firefox 浏览器
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:46.0) Gecko/20100101 Firefox/46.0',
# 假装通过 http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/index.html 访问到这些被抓取的网页
'Referer': 'http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/index.html'
}
# 使用一个集合来记录已经访问过的链接地址
self.visited = set()
# 支持 Cookie
self.cookie = http.cookiejar.CookieJar()
self.processor = urllib.request.HTTPCookieProcessor(self.cookie)
self.opener = urllib.request.build_opener(self.processor)
def download(self, url):
"""
下载一个链接地址的内容
:url: 链接地址
"""
# 记录正在下载的地址
logging.debug('downloading ' + url)
# 构建一个 HTTP 请求
request = urllib.request.Request(url, headers=self.headers)
# 默认读取到的内容为空
raw = b''
# 开始捕获异常
try:
# 创建一个 HTTP 连接,并设置 10 秒超时
connection = self.opener.open(request, timeout=10.0)
# 从连接读取内容
raw = connection.read()
except urllib.error.HTTPError:
# 若发生 HTTPError 错误,记下连接地址
msg = 'download [' + url + '] raised urllib.request.HTTPError, skiped'
logging.exception(msg)
except socket.timeout:
# 若发生超时异常,记下连接地址
msg = 'download [' + url + '] raised socket.timeout, skiped'
logging.exception(msg)
except Exception:
# 若发生其他异常错误,记下连接地址
msg = 'download [' + url + '] failed, skiped'
logging.exception(msg)
else:
# 没有发生异常错误,关闭连接
connection.close()
# 将读取到的内容按照 GB18030 字符集编码方式进行解码
content = raw.decode('gb18030')
return content
def extract_urls(self, url, content):
"""
从内容中抽取链接地址
:url: 内容的来源地址
:content: 内容
"""
# 使用一个列表来保存链接地址
urls = list()
# 在 HTML 文件中,链接的 CSS 选择器是 a
selector = 'a'
# 使用 lxml.html.fromstring 解析 HTML 内容,构建节点树
root = lxml.html.fromstring(content)
# 遍历节点树中的每一个链接节点
for node in root.cssselect(selector):
# 获得链接节点的 href 属性值
relative = node.attrib.get('href', '')
if relative:
# 使用 urllib.parse 的 urljoin 函数,将相对地址转换为实际地址
real = urllib.parse.urljoin(url, relative)
# 将实际的地址添加到地址列表中
urls.append(real)
return urls
def extract_something(self, url, content):
"""
从内容中抽取感兴趣的信息
:url: 内容的来源地址
:content: 内容
"""
# 使用 lxml.html.fromstring 解析 HTML 内容,构建节点树
root = lxml.html.fromstring(content)
# 使用字典来保存每一个地区名称和区划代码
locations = dict()
# 使用 http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/ 的长度作为计算当前链接地址相对地址的偏移量
offset = len('http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/')
# 截取当前链接地址偏移量以后的字符串作为相对地址
relative = url[offset:]
# 相对地址中 / 的数目为行政层级
depth = relative.count('/')
if 'index.html' == relative:
# 省、自治区、直辖市和特别行政区
# 表格中链接的文字就是区域的名称,链接地址中的数字就是区划代码
# 表格的单元格中的链接的 CSS 选择器
selector = '.provincetr td a'
# 遍历每一个链接节点
for node in root.cssselect(selector):
# 获得链接节点的链接地址,这是一个相对地址
href = node.attrib.get('href', '')
# 在链接地址中找到 . 的偏移量
offset = href.find('.')
# 截取链接地址中 . 以前的字符串作为区划代码
code = href[:offset]
# 追加 0 ,补齐12位
code = code.ljust(12, '0')
# 链接的文字内容就是区域名称
name = node.text_content()
# 将区划代码和名称关联起来
locations[code] = name
elif depth < 4:
# 市、区县、乡镇、村社区
# 每一行第一个单元格的内容是区划代码,最后一个单元格的内容是区域名称
if 0 == depth:
# 第一层级是市
selector = '.citytr'
elif 1 == depth:
# 第二层级是区县
selector = '.countytr'
elif 2 == depth:
# 第三层级是乡镇
selector = '.towntr'
else:
# 第四层级是村、社区
selector = '.villagetr'
# 遍历每一个节点,逐行处理表格中的每一行
for node in root.cssselect(selector):
# 获得这一行的所有单元格
cells = node.cssselect('td')
# 第一个单元格的内容是区划代码
code = cells[0].text_content()
# 最后一个单元格的内容是名称
name = cells[-1].text_content()
if code.isdigit():
# 全是是数字的才是合法的区划代码
locations[code] = name
else:
# 行政村村民小组和社区居民小组没有区划代码
logging.warn(url)
return locations
def dump(self, locations):
"""
保存抓取得到的数据
:locations: 从每一个网页抓取得到的区域字典
"""
return self.store(locations)
def filter(self, urls):
"""
过滤链接地址
:urls: 链接地址列表
"""
# 使用集合保存允许抓取的链接地址
allowed = set()
# 遍历地址列表中的每一个地址
for url in urls:
if url in self.visited:
# 已经访问过的地址,不允许再抓取
continue
elif not url.startswith('http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/'):
# 不抓取不是以 http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/ 开始的链接地址
continue
else:
# 将以 http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/ 开始,又没有访问过的地址,添加到允许访问的列表中
allowed.add(url)
return allowed
def work(self):
"""
开始干活
"""
# 遍历种子地址中的每一个地址
for url in self.seeds:
# 抓取一个地址
self.crawl(url)
return 0
def crawl(self, url):
"""
抓取一个地址
:url: 链接地址
"""
# 下载链接地址对应的内容
content = self.download(url)
if not content:
# 没有内容,退出抓取
return
# 将链接地址添加到访问过的地址集合中
self.visited.add(url)
# 从内容中抽取感兴趣的内容
data = self.extract_something(url, content)
# 保存抓取得到的数据
self.dump(data)
# 从内容中抽取链接地址
urls = self.extract_urls(url, content)
# 过滤抽取得到的链接地址
allowed = self.filter(urls)
# 遍历这些地址
for url in allowed:
# 抓取一个地址
self.crawl(url)
def store(locations):
"""
保存解析得到的数据,仅仅打印出来。
"""
for code, name in locations.items():
msg = '[' + code + ']:' + name
logging.debug(msg)
return 0
if '__main__' == __name__:
logging.info('begin')
# 种子地址列表
seeds = ['http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2014/index.html']
# 创建一个 Spider 对象
spider = Spider(seeds, store)
# 调用 spider 的 work 方法,开始抓取
spider.work()
logging.info('finish')
| cc0-1.0 | 5,744,109,774,003,198,000 | 24.326923 | 111 | 0.51468 | false |
karulont/combopt | project5/draw.py | 1 | 1977 | import turtle
OFFSET=200
MULTIPLE=10
def draw(n,paths):
cords=[(0,0),(n,0),(n,n),(0,n),(0,0)]
turtle.penup()
for c in cords:
turtle.setpos(getCoord(c[0]),getCoord(c[1]))
turtle.pendown()
## turtle.left(90)
## turtle.penup()
## turtle.goto(-OFFSET,-OFFSET)
## turtle.pendown()
prevz=-1
for path in paths:
turtle.penup()
for stepi in range(1,len(path)-1):
step=path[stepi]
if len(step)==2:
continue
x,y,z=step
turtle.pencolor(getrgb(z))
turtle.setpos(getCoord(x),getCoord(y))
print(stepi)
## if stepi==1 or stepi==(len(path)-2):
if prevz!=z or stepi==1 or stepi==(len(path)-2):
turtle.write(str(x)+","+str(y))
prevz=z
turtle.setpos(getCoord(x),getCoord(y))
turtle.pendown()
turtle.ht()
input()
def getCoord(x):
return x*MULTIPLE-OFFSET
def getrgb(z):
x=[0,0,0]
x[z]=turtle.colormode()
return tuple(x)
if __name__=="__main__":
draw(48,[[[21, 0], (21, 0, 0), (22, 0, 0), (23, 0, 0), (24, 0, 0), (25, 0, 0), (26, 0, 0), (27, 0, 0), (28, 0, 0), (29, 0, 0), (30, 0, 0), (31, 0, 0), (32, 0, 0), (33, 0, 0), (34, 0, 0), (35, 0, 0), (36, 0, 0), (37, 0, 0), (38, 0, 0), (39, 0, 0), (40, 0, 0), (41, 0, 0), [41, 0]],
[[34, 0], (34, 0, 1), (34, 1, 1), (34, 2, 1), (34, 3, 1), (34, 4, 1), (34, 5, 1), (34, 6, 1), (34, 7, 1), (34, 8, 1), (34, 9, 1), (34, 10, 1), (34, 11, 1), (34, 12, 1), (34, 13, 1), (34, 14, 1), (34, 15, 1), (34, 16, 1), (34, 17, 1), (34, 18, 1), (34, 19, 1), (34, 20, 1), (34, 21, 1), (34, 22, 1), (34, 23, 1), (34, 24, 1), (34, 25, 1), (34, 26, 1), (34, 27, 1), (34, 28, 1), (34, 28, 0), (35, 28, 0), (36, 28, 0), (37, 28, 0), (38, 28, 0), (39, 28, 0), (40, 28, 0), (41, 28, 0), (42, 28, 0), (43, 28, 0), (44, 28, 0), (45, 28, 0), (46, 28, 0), (47, 28, 0), [47, 28]]])
| mit | 3,498,382,939,808,764,000 | 38.54 | 579 | 0.438543 | false |
joaks1/PyMsBayes | pymsbayes/utils/functions.py | 1 | 4664 | #! /usr/bin/env python
import sys
import os
import errno
import random
import string
from pymsbayes.utils import GLOBAL_RNG
from pymsbayes.fileio import process_file_arg
def mkdr(path):
"""
Creates directory `path`, but suppresses error if `path` already exists.
"""
try:
os.makedirs(path)
except OSError, e:
if e.errno == errno.EEXIST:
pass
else:
raise e
def mk_new_dir(path):
attempt = -1
while True:
try:
if attempt < 0:
os.makedirs(path)
return path
else:
p = path.rstrip(os.path.sep) + '-' + str(attempt)
os.makedirs(p)
return p
except OSError, e:
if e.errno == errno.EEXIST:
attempt += 1
continue
else:
raise e
def get_new_path(path, max_attempts = 1000):
path = os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
if not os.path.exists(path):
f = open(path, 'w')
f.close()
return path
attempt = 0
while True:
p = '-'.join([path, str(attempt)])
if not os.path.exists(p):
f = open(p, 'w')
f.close()
return p
if attempt >= max_attempts:
raise Exception('failed to get unique path')
attempt += 1
def get_sublist_greater_than(values, threshold):
return [v for v in values if v > threshold]
def frange(start, stop, num_steps, include_end_point = False):
inc = (float(stop - start) / num_steps)
for i in range(num_steps):
yield start + (i * inc)
if include_end_point:
yield stop
def random_str(length=8,
char_pool=string.ascii_letters + string.digits):
return ''.join(random.choice(char_pool) for i in range(length))
def get_random_int(rng = GLOBAL_RNG):
return rng.randint(1, 999999999)
def get_indices_of_patterns(target_list, regex_list, sort=True):
indices = []
for regex in regex_list:
indices.extend([i for i, e in enumerate(target_list) if regex.match(e)])
if sort:
return sorted(indices)
return indices
def get_indices_of_strings(target_list, string_list, sort=True):
indices = []
for s in string_list:
indices.extend([i for i, e in enumerate(target_list) if s.strip() == e.strip()])
if sort:
return sorted(indices)
return indices
def list_splitter(l, n, by_size=False):
"""
Returns generator that yields list `l` as `n` sublists, or as `n`-sized
sublists if `by_size` is True.
"""
if n < 1:
raise StopIteration
elif by_size:
for i in range(0, len(l), n):
yield l[i:i+n]
else:
if n > len(l):
n = len(l)
step_size = len(l)/int(n)
if step_size < 1:
step_size = 1
# for i in range(0, len(l), step_size):
# yield l[i:i+step_size]
i = -step_size
for i in range(0, ((n-1)*step_size), step_size):
yield l[i:i+step_size]
yield l[i+step_size:]
def whereis(file_name):
"""
Returns the first absolute path to `file_name` encountered in $PATH.
Returns `None` if `file_name` is not found in $PATH.
"""
paths = os.environ.get('PATH', '').split(':')
for path in paths:
abs_path = os.path.join(path, file_name)
if os.path.exists(abs_path) and not os.path.isdir(abs_path):
return abs_path
break
return None
def is_file(path):
if not path:
return False
if not os.path.isfile(path):
return False
return True
def is_dir(path):
if not path:
return False
if not os.path.isdir(path):
return False
return True
def is_executable(path):
return is_file(path) and os.access(path, os.X_OK)
def which(exe):
if is_executable(exe):
return exe
name = os.path.basename(exe)
for p in os.environ['PATH'].split(os.pathsep):
p = p.strip('"')
exe_path = os.path.join(p, name)
if is_executable(exe_path):
return exe_path
return None
def long_division(dividend, diviser):
n, d = int(dividend), int(diviser)
quotient = n / d
remainder = n - (d * quotient)
return quotient, remainder
def get_tolerance(num_prior_samples, num_posterior_samples):
return num_posterior_samples / float(num_prior_samples)
def least_common_multiple(x):
y = [i for i in x]
while True:
if len(set(y)) == 1:
return y[0]
min_index = y.index(min(y))
y[min_index] += x[min_index]
| gpl-3.0 | -502,087,976,988,867,900 | 26.116279 | 88 | 0.566681 | false |
minlexx/skype_movie_bot | classes/yandex_translate.py | 1 | 2217 | # -*- coding: utf-8 -*-
import sys
import collections
# external libraries
import requests
import requests.exceptions
class YandexTranslate:
def __init__(self, yandex_api_key: str):
self._apikey = yandex_api_key
self._yt_url = 'https://translate.yandex.net/api/v1.5/tr.json/translate'
def translate(self, q: str, src_lang: str, dst_lang: str, fmt: str = 'plain') -> str:
"""
Translates string using Yandex translation service
:param q: strint to translate
:param src_lang: source lang code ('jp')
:param dst_lang: dest lang code ('en')
:param fmt: text format: 'plain' or 'html'
:return: translated string
"""
retval = ''
if fmt not in ['plain', 'html']:
raise ValueError('fmt must be plain or html!')
params = collections.OrderedDict()
params['key'] = self._apikey
params['text'] = q
params['lang'] = src_lang + '-' + dst_lang
params['format'] = fmt
try:
r = requests.get(self._yt_url, params=params)
r.raise_for_status()
response = r.json()
if type(response) == dict:
if 'text' in response:
retval = response['text']
except requests.exceptions.RequestException as re:
sys.stderr.write('Network error: {0}'.format(str(re)))
return retval
def test_yandextranslate(yandex_api_key: str):
yt = YandexTranslate(yandex_api_key)
res = yt.translate('はい', 'ja', 'en')
print(res)
res = yt.translate('少女', 'ja', 'en')
print(res)
res = yt.translate('カグラ使われが送るワイバーン生活 0日目(テスト動画)', 'ja', 'en')
print(res)
def yandex_translate_jp_en(text: str) -> str:
yt = YandexTranslate('trnsl.1.1.20160418T102823Z.888167e74b48bd0b.1c6431f34c3e545d654a8f77054d609de0a87ce3')
return yt.translate(text, 'jp', 'en')
if __name__ == '__main__':
api = 'trnsl.1.1.20160418T102823Z.888167e74b48bd0b.1c6431f34c3e545d654a8f77054d609de0a87ce3'
test_yandextranslate(api)
| gpl-3.0 | -2,401,171,451,816,979,500 | 29.779412 | 112 | 0.576122 | false |
azizmb/django-activity-stream | actstream/templatetags/activity_tags.py | 1 | 6757 | from django.template import Variable, Library, Node, TemplateSyntaxError,\
VariableDoesNotExist
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from actstream.models import Follow
register = Library()
def _is_following_helper(context, actor):
return Follow.objects.is_following(context.get('user'), actor)
class DisplayActivityFollowLabel(Node):
def __init__(self, actor, follow, unfollow):
self.actor = Variable(actor)
self.follow = follow
self.unfollow = unfollow
def render(self, context):
actor_instance = self.actor.resolve(context)
if _is_following_helper(context, actor_instance):
return self.unfollow
return self.follow
def do_activity_follow_label(parser, tokens):
bits = tokens.contents.split()
if len(bits) != 4:
raise TemplateSyntaxError, "Accepted format {% activity_follow_label [instance] [follow_string] [unfollow_string] %}"
else:
return DisplayActivityFollowLabel(*bits[1:])
class DisplayActivityFollowUrl(Node):
def __init__(self, actor):
self.actor = Variable(actor)
def render(self, context):
actor_instance = self.actor.resolve(context)
content_type = ContentType.objects.get_for_model(actor_instance).pk
if _is_following_helper(context, actor_instance):
return reverse('actstream_unfollow', kwargs={'content_type_id': content_type, 'object_id': actor_instance.pk})
return reverse('actstream_follow', kwargs={'content_type_id': content_type, 'object_id': actor_instance.pk})
def do_activity_follow_url(parser, tokens):
bits = tokens.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError, "Accepted format {% activity_follow_url [instance] %}"
else:
return DisplayActivityFollowUrl(bits[1])
@register.simple_tag
def activity_followers_url(instance):
content_type = ContentType.objects.get_for_model(instance).pk
return reverse('actstream_followers',
kwargs={'content_type_id': content_type, 'object_id': instance.pk})
@register.simple_tag
def activity_followers_count(instance):
return Follow.objects.for_object(instance).count()
class AsNode(Node):
"""
Base template Node class for template tags that takes a predefined number
of arguments, ending in an optional 'as var' section.
"""
args_count = 1
@classmethod
def handle_token(cls, parser, token):
"""
Class method to parse and return a Node.
"""
bits = token.contents.split()
args_count = len(bits) - 1
if args_count >= 2 and bits[-2] == 'as':
as_var = bits[-1]
args_count -= 2
else:
as_var = None
if args_count != cls.args_count:
arg_list = ' '.join(['[arg]' * cls.args_count])
raise TemplateSyntaxError("Accepted formats {%% %(tagname)s "
"%(args)s %%} or {%% %(tagname)s %(args)s as [var] %%}" %
{'tagname': bits[0], 'args': arg_list})
args = [parser.compile_filter(token) for token in
bits[1:args_count + 1]]
return cls(args, varname=as_var)
def __init__(self, args, varname=None):
self.args = args
self.varname = varname
def render(self, context):
result = self.render_result(context)
if self.varname is not None:
context[self.varname] = result
return ''
return result
def render_result(self, context):
raise NotImplementedError("Must be implemented by a subclass")
class DisplayActionLabel(AsNode):
def render_result(self, context):
actor_instance = self.args[0].resolve(context)
try:
user = Variable("request.user").resolve(context)
except VariableDoesNotExist:
user = None
try:
if user and user == actor_instance.user:
result = " your "
else:
result = " %s's " % (actor_instance.user.get_full_name() or
actor_instance.user.username)
except ValueError:
result = ""
result += actor_instance.get_label()
return result
class DisplayAction(AsNode):
def render_result(self, context):
action_instance = self.args[0].resolve(context)
templates = [
'activity/%s/action.html' % action_instance.verb.replace(' ', '_'),
'activity/action.html',
]
return render_to_string(templates, {'action': action_instance},
context)
class DisplayActionShort(Node):
def __init__(self, action, varname=None):
self.action = Variable(action)
self.varname = varname
def render(self, context):
action_instance = self.args[0].resolve(context)
templates = [
'activity/%s/action.html' % action_instance.verb.replace(' ', '_'),
'activity/action.html',
]
return render_to_string(templates, {'action': action_instance,
'hide_actor': True}, context)
class DisplayGroupedActions(AsNode):
def render(self, context):
actions_instance = self.args[0].resolve(context)
templates = [
'activity/%s/action.html' %
actions_instance.verb.replace(' ', '_'),
'activity/action.html',
]
return render_to_string(templates, {'actions': actions_instance},
context)
class UserContentTypeNode(Node):
def __init__(self, *args):
self.args = args
def render(self, context):
context[self.args[-1]] = ContentType.objects.get_for_model(User)
return ''
def display_action(parser, token):
return DisplayAction.handle_token(parser, token)
def display_action_short(parser, token):
return DisplayActionShort.handle_token(parser, token)
def display_grouped_actions(parser, token):
return DisplayGroupedActions.handle_token(parser, token)
def action_label(parser, token):
return DisplayActionLabel.handle_token(parser, token)
# TODO: remove this, it's heinous
def get_user_contenttype(parser, token):
return UserContentTypeNode(*token.split_contents())
def is_following(user, actor):
return Follow.objects.is_following(user, actor)
register.filter(is_following)
register.tag(display_action)
register.tag(display_action_short)
register.tag(display_grouped_actions)
register.tag(action_label)
register.tag(get_user_contenttype)
register.tag('activity_follow_url', do_activity_follow_url)
register.tag('activity_follow_label', do_activity_follow_label)
| bsd-3-clause | -565,753,785,344,135,000 | 31.485577 | 125 | 0.641261 | false |
qilicun/python | python2/PyMOTW-1.132/PyMOTW/zipimport/zipimport_get_data.py | 1 | 1323 | #!/usr/bin/env python
#
# Copyright 2007 Doug Hellmann.
#
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Doug
# Hellmann not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# DOUG HELLMANN DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Retrieving the data for a module within a zip archive.
"""
#end_pymotw_header
import sys
sys.path.insert(0, 'zipimport_example.zip')
import os
import example_package
print example_package.__file__
print example_package.__loader__.get_data('example_package/README.txt')
| gpl-3.0 | -5,497,708,476,643,799,000 | 34.756757 | 71 | 0.76644 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.