repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jfhbrook/pyee | tests/test_async.py | 1 | 3955 | # -*- coding: utf-8 -*-
import pytest
import pytest_asyncio.plugin # noqa
from asyncio import Future, wait_for
try:
from asyncio.exceptions import TimeoutError
except ImportError:
from concurrent.futures import TimeoutError
from mock import Mock
from twisted.internet.defer import succeed
from pyee import AsyncIOEventEmitter, TwistedEventEmitter
class PyeeTestError(Exception):
pass
@pytest.mark.asyncio
async def test_asyncio_emit(event_loop):
"""Test that AsyncIOEventEmitter can handle wrapping
coroutines
"""
ee = AsyncIOEventEmitter(loop=event_loop)
should_call = Future(loop=event_loop)
@ee.on('event')
async def event_handler():
should_call.set_result(True)
ee.emit('event')
result = await wait_for(should_call, 0.1)
assert result is True
@pytest.mark.asyncio
async def test_asyncio_once_emit(event_loop):
"""Test that AsyncIOEventEmitter also wrap coroutines when
using once
"""
ee = AsyncIOEventEmitter(loop=event_loop)
should_call = Future(loop=event_loop)
@ee.once('event')
async def event_handler():
should_call.set_result(True)
ee.emit('event')
result = await wait_for(should_call, 0.1)
assert result is True
@pytest.mark.asyncio
async def test_asyncio_error(event_loop):
"""Test that AsyncIOEventEmitter can handle errors when
wrapping coroutines
"""
ee = AsyncIOEventEmitter(loop=event_loop)
should_call = Future(loop=event_loop)
@ee.on('event')
async def event_handler():
raise PyeeTestError()
@ee.on('error')
def handle_error(exc):
should_call.set_result(exc)
ee.emit('event')
result = await wait_for(should_call, 0.1)
assert isinstance(result, PyeeTestError)
@pytest.mark.asyncio
async def test_asyncio_cancellation(event_loop):
"""Test that AsyncIOEventEmitter can handle Future cancellations"""
cancel_me = Future(loop=event_loop)
should_not_call = Future(loop=event_loop)
ee = AsyncIOEventEmitter(loop=event_loop)
@ee.on('event')
async def event_handler():
cancel_me.cancel()
@ee.on('error')
def handle_error(exc):
should_not_call.set_result(None)
ee.emit('event')
try:
await wait_for(should_not_call, 0.1)
except TimeoutError:
pass
else:
raise PyeeTestError()
@pytest.mark.asyncio
async def test_sync_error(event_loop):
"""Test that regular functions have the same error handling as coroutines
"""
ee = AsyncIOEventEmitter(loop=event_loop)
should_call = Future(loop=event_loop)
@ee.on('event')
def sync_handler():
raise PyeeTestError()
@ee.on('error')
def handle_error(exc):
should_call.set_result(exc)
ee.emit('event')
result = await wait_for(should_call, 0.1)
assert isinstance(result, PyeeTestError)
def test_twisted_emit():
"""Test that TwistedEventEmitter can handle wrapping
coroutines
"""
ee = TwistedEventEmitter()
should_call = Mock()
@ee.on('event')
async def event_handler():
_ = await succeed('yes!')
should_call(True)
ee.emit('event')
should_call.assert_called_once()
def test_twisted_once():
"""Test that TwistedEventEmitter also wraps coroutines for
once
"""
ee = TwistedEventEmitter()
should_call = Mock()
@ee.once('event')
async def event_handler():
_ = await succeed('yes!')
should_call(True)
ee.emit('event')
should_call.assert_called_once()
def test_twisted_error():
"""Test that TwistedEventEmitters handle Failures when wrapping coroutines.
"""
ee = TwistedEventEmitter()
should_call = Mock()
@ee.on('event')
async def event_handler():
raise PyeeTestError()
@ee.on('failure')
def handle_error(e):
should_call(e)
ee.emit('event')
should_call.assert_called_once()
| mit | -4,029,142,267,549,402,000 | 19.492228 | 79 | 0.655626 | false |
cedricbonhomme/pyAggr3g470r | newspipe/notifications/emails.py | 1 | 3028 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Newspipe - A web news aggregator.
# Copyright (C) 2010-2021 Cédric Bonhomme - https://www.cedricbonhomme.org
#
# For more information: https://sr.ht/~cedric/newspipe
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email import charset
from newspipe.bootstrap import application
from newspipe.web.decorators import async_maker
logger = logging.getLogger(__name__)
@async_maker
def send_async_email(mfrom, mto, msg):
try:
s = smtplib.SMTP(application.config["NOTIFICATION_HOST"])
s.login(
application.config["NOTIFICATION_USERNAME"],
application.config["NOTIFICATION_PASSWORD"],
)
except Exception:
logger.exception("send_async_email raised:")
else:
s.sendmail(mfrom, mto, msg.as_bytes().decode(encoding="UTF-8"))
s.quit()
def send(*args, **kwargs):
"""
This functions enables to send email via different method.
"""
send_smtp(**kwargs)
def send_smtp(to="", subject="", plaintext="", html=""):
"""
Send an email.
"""
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart("alternative")
ch = charset.add_charset("utf-8", charset.QP)
msg.set_charset(ch)
msg["Subject"] = subject
msg["From"] = application.config["MAIL_DEFAULT_SENDER"]
msg["To"] = to
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(plaintext, "plain", "utf-8")
# part2 = MIMEText(html, "html", "utf-8")
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
# msg.attach(part2)
try:
s = smtplib.SMTP(application.config["MAIL_SERVER"])
if application.config["MAIL_USERNAME"] is not None:
s.login(
application.config["MAIL_USERNAME"],
application.config["MAIL_PASSWORD"],
)
except Exception:
logger.exception("send_smtp raised:")
else:
s.sendmail(
application.config["MAIL_DEFAULT_SENDER"],
msg["To"],
msg.as_bytes().decode(encoding="UTF-8"),
)
s.quit()
| agpl-3.0 | 7,342,659,087,970,925,000 | 31.548387 | 80 | 0.661381 | false |
vvinuv/pymorph | pymorph/yetbackfunc.py | 1 | 3453 | import os
import sys
import numpy as np
import pymconvolve
import numpy.ma as ma
import fitsio
from mask_or_fit import GetSExObj
from runsexfunc import RunSex
def QuarterMask(z, zm, xcntr, ycntr, bbya, pa, quarter):
nxpts, nypts = z.shape
zmm = np.ones_like(z)
co = np.cos(pa * np.pi / 180.0)
si = np.sin(pa * np.pi / 180.0)
one_minus_eg_sq = (bbya)**2.0
x, y = np.meshgrid(np.arange(nxpts), np.arange(nypts))
xrot = (x - xcntr) * co + (y - ycntr) * si
xrot = xrot.T
xsq = xrot**2.0
yrot = (xcntr - x) * si + (y - ycntr) * co
yrot = yrot.T
ysq = yrot**2.0
r = np.sqrt(xsq + ysq / one_minus_eg_sq)
if quarter == 0:
condition = xrot > -1e5
if quarter == 1:
condition = (xrot - 0 >= 0) & (yrot - 0 >= 0)
if quarter == 2:
condition = (xrot - 0 < 0) & (yrot - 0 >= 0)
if quarter == 3:
condition = (xrot - 0 < 0) & (yrot - 0 < 0)
if quarter == 4:
condition = (xrot - 0 >= 0) & (yrot - 0 < 0)
zmm[condition] = 0
zmm = zm + zmm
zmm[np.where(zmm > 0)] = 1
return np.median(ma.masked_array(z, zmm).compressed())
def FindYetSky(fstring, sex_params, SEX_PATH, gimg, wimg, scat,
X0, Y0, check_fits, SEx_GAIN,
center_err=5., median_std=1.3, sconfig='seg', verbose=False):
#from astropy.io import fits
if verbose:
print(scat)
RunSex(sex_params, SEX_PATH, gimg, wimg, scat, SEx_GAIN,
check_fits=check_fits, sconfig='seg')
f = fitsio.FITS(gimg)
z = f[0].read()
f.close()
if verbose:
print(z.shape)
print(gimg)
fseg = fitsio.FITS(check_fits)
zm = fseg[0].read()
fseg.close()
#f = fits.open(gimg)
#z = f[0].data
#f.close()
#fseg = fits.open(check_fits)
#zm = fseg[0].data
#fseg.close()
if verbose:
print(zm.shape)
SexSky, SkyYet = 9999, 9999
SkyMed, SkyMin = 9999, 9999
SkyQua, SkySig = 9999, 9999
for l_s in open(scat):
v_s = [float(l) for l in l_s.split()]
obj = GetSExObj(NXPTS=None, NYPTS=None, values=v_s)
#sys.exit()
SexId = obj.sex_num
xcntr = obj.xcntr
ycntr = obj.ycntr
pa = obj.pos_ang
bbya = obj.bbya
a = obj.maj_axis
b = a * bbya
hr = obj.radius
sky = obj.sky
if np.abs(X0 - obj.xcntr) < center_err and np.abs(Y0 - obj.ycntr) < center_err:
boxcar = np.reshape(np.ones(3 * 3), (3, 3))
zm = pymconvolve.Convolve(zm, boxcar)
zm[np.where(zm > 0)] = 1
SkyQua = []
for ii in np.arange(1, 5):
SkyQua.append(QuarterMask(z, zm,
obj.xcntr - 1.0, obj.ycntr - 1.0,
bbya, pa, ii))
SkyQua = np.array(SkyQua)
SexSky = obj.sky
tmpstd = np.std(ma.masked_array(z, zm).compressed())
tmpmed = np.median(ma.masked_array(z, zm).compressed())
zm[np.where((z - tmpmed) > median_std * tmpstd)] = 1
SkyYet = np.median(ma.masked_array(z, zm).compressed())
SkyMed = np.median(SkyQua)
SkyMin = np.min(SkyQua)
SkySig = np.std(ma.masked_array(z, zm).compressed())
# os.system('rm -f SegCat.cat default_seg.sex seg.fits')
break
return SexSky, SkyYet, SkyMed, SkyMin, SkyQua, SkySig
| gpl-2.0 | 7,017,160,199,089,343,000 | 27.073171 | 87 | 0.523313 | false |
sylsaint/computation | chapter1/spec/nfa_spec.py | 1 | 1757 | #!/usr/bin/env python
#!coding:utf-8
import unittest
import sys
import set_path
from nfa import NFA
class TestNFA(unittest.TestCase):
EPSILON = 'epsilon'
start = 'q0'
final = {'q0'}
sigma = ['0', '1', 'epsilon']
states = ['q0', 'q1', 'q2']
ttable = [[{}, {'q1'}, {'q2'}],
[{'q1', 'q2'}, {'q2'}, {}],
[{'q0'}, {}, {}]]
nfa = NFA(states, sigma, ttable, start, final)
def test_e_transition(self):
self.nfa.transition('q0', self.EPSILON)
self.assertEqual(self.nfa.is_accepted(), True)
def test_1_transition(self):
self.nfa.transition('q0', '1')
self.assertEqual(self.nfa.is_accepted(), False)
def test_multi_transition(self):
self.nfa.transitions({'q0', 'q2'}, '1')
self.assertEqual(self.nfa.is_accepted(), False)
self.assertSetEqual(self.nfa.current, {'q1'})
def test_sequence_110(self):
self.nfa.reset()
self.nfa.handle('110')
self.assertEqual(self.nfa.is_accepted(), True)
self.assertSetEqual(self.nfa.current, {'q0', 'q2'})
def test_sequence_100(self):
self.nfa.reset()
self.nfa.handle('100')
self.assertEqual(self.nfa.is_accepted(), True)
self.assertSetEqual(self.nfa.current, {'q0', 'q1', 'q2'})
def test_sequence_000(self):
self.nfa.reset()
self.nfa.handle('000')
self.assertEqual(self.nfa.is_accepted(), True)
self.assertSetEqual(self.nfa.current, {'q0', 'q2'})
def test_sequence_111(self):
self.nfa.reset()
self.nfa.handle('111')
self.assertEqual(self.nfa.is_accepted(), False)
self.assertSetEqual(self.nfa.current, set())
if __name__ == "__main__":
unittest.main()
| mit | -8,341,895,363,137,537,000 | 27.33871 | 65 | 0.574274 | false |
DataDog/integrations-core | network/datadog_checks/network/network.py | 1 | 47373 | # (C) Datadog, Inc. 2010-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
"""
Collects network metrics.
"""
import array
import distutils.spawn
import os
import re
import socket
import struct
from collections import defaultdict
import psutil
from six import PY3, iteritems, itervalues
from datadog_checks.base import AgentCheck, ConfigurationError, is_affirmative
from datadog_checks.base.utils.common import pattern_filter
from datadog_checks.base.utils.platform import Platform
from datadog_checks.base.utils.subprocess_output import SubprocessOutputEmptyError, get_subprocess_output
try:
import datadog_agent
except ImportError:
from datadog_checks.base.stubs import datadog_agent
try:
import fcntl
except ImportError:
fcntl = None
if PY3:
long = int
BSD_TCP_METRICS = [
(re.compile(r"^\s*(\d+) data packets \(\d+ bytes\) retransmitted\s*$"), 'system.net.tcp.retrans_packs'),
(re.compile(r"^\s*(\d+) packets sent\s*$"), 'system.net.tcp.sent_packs'),
(re.compile(r"^\s*(\d+) packets received\s*$"), 'system.net.tcp.rcv_packs'),
]
SOLARIS_TCP_METRICS = [
(re.compile(r"\s*tcpRetransSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.retrans_segs'),
(re.compile(r"\s*tcpOutDataSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.in_segs'),
(re.compile(r"\s*tcpInSegs\s*=\s*(\d+)\s*"), 'system.net.tcp.out_segs'),
]
# constants for extracting ethtool data via ioctl
SIOCETHTOOL = 0x8946
ETHTOOL_GSTRINGS = 0x0000001B
ETHTOOL_GSSET_INFO = 0x00000037
ETHTOOL_GSTATS = 0x0000001D
ETH_SS_STATS = 0x1
ETH_GSTRING_LEN = 32
# ENA metrics that we're collecting
# https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-Agent-network-performance.html
ENA_METRIC_PREFIX = "aws.ec2."
ENA_METRIC_NAMES = [
"bw_in_allowance_exceeded",
"bw_out_allowance_exceeded",
"conntrack_allowance_exceeded",
"linklocal_allowance_exceeded",
"pps_allowance_exceeded",
]
class Network(AgentCheck):
SOURCE_TYPE_NAME = 'system'
PSUTIL_TYPE_MAPPING = {socket.SOCK_STREAM: 'tcp', socket.SOCK_DGRAM: 'udp'}
PSUTIL_FAMILY_MAPPING = {socket.AF_INET: '4', socket.AF_INET6: '6'}
def check(self, instance):
if instance is None:
instance = {}
self._excluded_ifaces = instance.get('excluded_interfaces', [])
if not isinstance(self._excluded_ifaces, list):
raise ConfigurationError(
"Expected 'excluded_interfaces' to be a list, got '{}'".format(type(self._excluded_ifaces).__name__)
)
self._collect_cx_state = instance.get('collect_connection_state', False)
self._collect_cx_queues = instance.get('collect_connection_queues', False)
self._collect_rate_metrics = instance.get('collect_rate_metrics', True)
self._collect_count_metrics = instance.get('collect_count_metrics', False)
self._collect_ena_metrics = instance.get('collect_aws_ena_metrics', False)
if fcntl is None and self._collect_ena_metrics:
raise ConfigurationError("fcntl not importable, collect_aws_ena_metrics should be disabled")
# This decides whether we should split or combine connection states,
# along with a few other things
self._setup_metrics(instance)
self._exclude_iface_re = None
exclude_re = instance.get('excluded_interface_re', None)
if exclude_re:
self.log.debug("Excluding network devices matching: %s", exclude_re)
self._exclude_iface_re = re.compile(exclude_re)
if Platform.is_linux():
self._check_linux(instance)
elif Platform.is_bsd():
self._check_bsd(instance)
elif Platform.is_solaris():
self._check_solaris(instance)
elif Platform.is_windows():
self._check_psutil(instance)
def _setup_metrics(self, instance):
self._combine_connection_states = instance.get('combine_connection_states', True)
if self._combine_connection_states:
self.cx_state_gauge = {
('udp4', 'connections'): 'system.net.udp4.connections',
('udp6', 'connections'): 'system.net.udp6.connections',
('tcp4', 'established'): 'system.net.tcp4.established',
('tcp4', 'opening'): 'system.net.tcp4.opening',
('tcp4', 'closing'): 'system.net.tcp4.closing',
('tcp4', 'listening'): 'system.net.tcp4.listening',
('tcp4', 'time_wait'): 'system.net.tcp4.time_wait',
('tcp6', 'established'): 'system.net.tcp6.established',
('tcp6', 'opening'): 'system.net.tcp6.opening',
('tcp6', 'closing'): 'system.net.tcp6.closing',
('tcp6', 'listening'): 'system.net.tcp6.listening',
('tcp6', 'time_wait'): 'system.net.tcp6.time_wait',
}
self.tcp_states = {
"ss": {
"ESTAB": "established",
"SYN-SENT": "opening",
"SYN-RECV": "opening",
"FIN-WAIT-1": "closing",
"FIN-WAIT-2": "closing",
"TIME-WAIT": "time_wait",
"UNCONN": "closing",
"CLOSE-WAIT": "closing",
"LAST-ACK": "closing",
"LISTEN": "listening",
"CLOSING": "closing",
},
"netstat": {
"ESTABLISHED": "established",
"SYN_SENT": "opening",
"SYN_RECV": "opening",
"FIN_WAIT1": "closing",
"FIN_WAIT2": "closing",
"TIME_WAIT": "time_wait",
"CLOSE": "closing",
"CLOSE_WAIT": "closing",
"LAST_ACK": "closing",
"LISTEN": "listening",
"CLOSING": "closing",
},
"psutil": {
psutil.CONN_ESTABLISHED: "established",
psutil.CONN_SYN_SENT: "opening",
psutil.CONN_SYN_RECV: "opening",
psutil.CONN_FIN_WAIT1: "closing",
psutil.CONN_FIN_WAIT2: "closing",
psutil.CONN_TIME_WAIT: "time_wait",
psutil.CONN_CLOSE: "closing",
psutil.CONN_CLOSE_WAIT: "closing",
psutil.CONN_LAST_ACK: "closing",
psutil.CONN_LISTEN: "listening",
psutil.CONN_CLOSING: "closing",
psutil.CONN_NONE: "connections", # CONN_NONE is always returned for udp connections
},
}
else:
self.cx_state_gauge = {
('udp4', 'connections'): 'system.net.udp4.connections',
('udp6', 'connections'): 'system.net.udp6.connections',
('tcp4', 'estab'): 'system.net.tcp4.estab',
('tcp4', 'syn_sent'): 'system.net.tcp4.syn_sent',
('tcp4', 'syn_recv'): 'system.net.tcp4.syn_recv',
('tcp4', 'fin_wait_1'): 'system.net.tcp4.fin_wait_1',
('tcp4', 'fin_wait_2'): 'system.net.tcp4.fin_wait_2',
('tcp4', 'time_wait'): 'system.net.tcp4.time_wait',
('tcp4', 'unconn'): 'system.net.tcp4.unconn',
('tcp4', 'close'): 'system.net.tcp4.close',
('tcp4', 'close_wait'): 'system.net.tcp4.close_wait',
('tcp4', 'closing'): 'system.net.tcp4.closing',
('tcp4', 'listen'): 'system.net.tcp4.listen',
('tcp4', 'last_ack'): 'system.net.tcp4.time_wait',
('tcp6', 'estab'): 'system.net.tcp6.estab',
('tcp6', 'syn_sent'): 'system.net.tcp6.syn_sent',
('tcp6', 'syn_recv'): 'system.net.tcp6.syn_recv',
('tcp6', 'fin_wait_1'): 'system.net.tcp6.fin_wait_1',
('tcp6', 'fin_wait_2'): 'system.net.tcp6.fin_wait_2',
('tcp6', 'time_wait'): 'system.net.tcp6.time_wait',
('tcp6', 'unconn'): 'system.net.tcp6.unconn',
('tcp6', 'close'): 'system.net.tcp6.close',
('tcp6', 'close_wait'): 'system.net.tcp6.close_wait',
('tcp6', 'closing'): 'system.net.tcp6.closing',
('tcp6', 'listen'): 'system.net.tcp6.listen',
('tcp6', 'last_ack'): 'system.net.tcp6.time_wait',
}
self.tcp_states = {
"ss": {
"ESTAB": "estab",
"SYN-SENT": "syn_sent",
"SYN-RECV": "syn_recv",
"FIN-WAIT-1": "fin_wait_1",
"FIN-WAIT-2": "fin_wait_2",
"TIME-WAIT": "time_wait",
"UNCONN": "unconn",
"CLOSE-WAIT": "close_wait",
"LAST-ACK": "last_ack",
"LISTEN": "listen",
"CLOSING": "closing",
},
"netstat": {
"ESTABLISHED": "estab",
"SYN_SENT": "syn_sent",
"SYN_RECV": "syn_recv",
"FIN_WAIT1": "fin_wait_1",
"FIN_WAIT2": "fin_wait_2",
"TIME_WAIT": "time_wait",
"CLOSE": "close",
"CLOSE_WAIT": "close_wait",
"LAST_ACK": "last_ack",
"LISTEN": "listen",
"CLOSING": "closing",
},
"psutil": {
psutil.CONN_ESTABLISHED: "estab",
psutil.CONN_SYN_SENT: "syn_sent",
psutil.CONN_SYN_RECV: "syn_recv",
psutil.CONN_FIN_WAIT1: "fin_wait_1",
psutil.CONN_FIN_WAIT2: "fin_wait_2",
psutil.CONN_TIME_WAIT: "time_wait",
psutil.CONN_CLOSE: "close",
psutil.CONN_CLOSE_WAIT: "close_wait",
psutil.CONN_LAST_ACK: "last_ack",
psutil.CONN_LISTEN: "listen",
psutil.CONN_CLOSING: "closing",
psutil.CONN_NONE: "connections", # CONN_NONE is always returned for udp connections
},
}
def _submit_netmetric(self, metric, value, tags=None):
if self._collect_rate_metrics:
self.rate(metric, value, tags=tags)
if self._collect_count_metrics:
self.monotonic_count('{}.count'.format(metric), value, tags=tags)
def _submit_devicemetrics(self, iface, vals_by_metric, tags):
if iface in self._excluded_ifaces or (self._exclude_iface_re and self._exclude_iface_re.match(iface)):
# Skip this network interface.
return False
# adding the device to the tags as device_name is deprecated
metric_tags = [] if tags is None else tags[:]
metric_tags.append('device:{}'.format(iface))
expected_metrics = [
'bytes_rcvd',
'bytes_sent',
'packets_in.count',
'packets_in.error',
'packets_out.count',
'packets_out.error',
]
for m in expected_metrics:
assert m in vals_by_metric
assert len(vals_by_metric) == len(expected_metrics)
count = 0
for metric, val in iteritems(vals_by_metric):
self.rate('system.net.%s' % metric, val, tags=metric_tags)
count += 1
self.log.debug("tracked %s network metrics for interface %s", count, iface)
def _submit_ena_metrics(self, iface, vals_by_metric, tags):
if iface in self._excluded_ifaces or (self._exclude_iface_re and self._exclude_iface_re.match(iface)):
# Skip this network interface.
return False
metric_tags = [] if tags is None else tags[:]
metric_tags.append('device:{}'.format(iface))
allowed = [ENA_METRIC_PREFIX + m for m in ENA_METRIC_NAMES]
for m in vals_by_metric:
assert m in allowed
count = 0
for metric, val in iteritems(vals_by_metric):
self.gauge('system.net.%s' % metric, val, tags=metric_tags)
count += 1
self.log.debug("tracked %s network ena metrics for interface %s", count, iface)
def _parse_value(self, v):
try:
return long(v)
except ValueError:
return 0
def _submit_regexed_values(self, output, regex_list, tags):
lines = output.splitlines()
for line in lines:
for regex, metric in regex_list:
value = re.match(regex, line)
if value:
self._submit_netmetric(metric, self._parse_value(value.group(1)), tags=tags)
def _is_collect_cx_state_runnable(self, proc_location):
"""
Determine if collect_connection_state is set and can effectively run.
If self._collect_cx_state is True and a custom proc_location is provided, the system cannot
run `ss` or `netstat` over a custom proc_location
:param proc_location: str
:return: bool
"""
if self._collect_cx_state is False:
return False
if proc_location != "/proc":
# If we have `ss`, we're fine with a non-standard `/proc` location
if distutils.spawn.find_executable("ss") is None:
self.warning(
"Cannot collect connection state: `ss` cannot be found and "
"currently with a custom /proc path: %s",
proc_location,
)
return False
else:
return True
return True
def _check_linux(self, instance):
"""
_check_linux can be run inside a container and still collects the network metrics from the host
For that procfs_path can be set to something like "/host/proc"
When a custom procfs_path is set, the collect_connection_state option is ignored
"""
proc_location = datadog_agent.get_config('procfs_path')
if not proc_location:
proc_location = '/proc'
proc_location = proc_location.rstrip('/')
custom_tags = instance.get('tags', [])
net_proc_base_location = self._get_net_proc_base_location(proc_location)
if self._is_collect_cx_state_runnable(net_proc_base_location):
try:
self.log.debug("Using `ss` to collect connection state")
# Try using `ss` for increased performance over `netstat`
ss_env = {"PROC_ROOT": net_proc_base_location}
# By providing the environment variables in ss_env, the PATH will be overriden. In CentOS,
# datadog-agent PATH is "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", while sh PATH
# will be '/usr/local/bin:/usr/bin'. In CentOS, ss is located in /sbin and /usr/sbin, not
# in the sh PATH, which will result in network metric collection failure.
#
# The line below will set sh PATH explicitly as the datadog-agent PATH to fix that issue.
if "PATH" in os.environ:
ss_env["PATH"] = os.environ["PATH"]
metrics = self._get_metrics()
for ip_version in ['4', '6']:
# Call `ss` for each IP version because there's no built-in way of distinguishing
# between the IP versions in the output
# Also calls `ss` for each protocol, because on some systems (e.g. Ubuntu 14.04), there is a
# bug that print `tcp` even if it's `udp`
# The `-H` flag isn't available on old versions of `ss`.
cmd = "ss --numeric --tcp --all --ipv{} | cut -d ' ' -f 1 | sort | uniq -c".format(ip_version)
output, _, _ = get_subprocess_output(["sh", "-c", cmd], self.log, env=ss_env)
# 7624 CLOSE-WAIT
# 72 ESTAB
# 9 LISTEN
# 1 State
# 37 TIME-WAIT
lines = output.splitlines()
self._parse_short_state_lines(lines, metrics, self.tcp_states['ss'], ip_version=ip_version)
cmd = "ss --numeric --udp --all --ipv{} | wc -l".format(ip_version)
output, _, _ = get_subprocess_output(["sh", "-c", cmd], self.log, env=ss_env)
metric = self.cx_state_gauge[('udp{}'.format(ip_version), 'connections')]
metrics[metric] = int(output) - 1 # Remove header
if self._collect_cx_queues:
cmd = "ss --numeric --tcp --all --ipv{}".format(ip_version)
output, _, _ = get_subprocess_output(["sh", "-c", cmd], self.log, env=ss_env)
for (state, recvq, sendq) in self._parse_queues("ss", output):
self.histogram('system.net.tcp.recv_q', recvq, custom_tags + ["state:" + state])
self.histogram('system.net.tcp.send_q', sendq, custom_tags + ["state:" + state])
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=custom_tags)
except OSError as e:
self.log.info("`ss` invocation failed: %s. Using `netstat` as a fallback", str(e))
output, _, _ = get_subprocess_output(["netstat", "-n", "-u", "-t", "-a"], self.log)
lines = output.splitlines()
# Active Internet connections (w/o servers)
# Proto Recv-Q Send-Q Local Address Foreign Address State
# tcp 0 0 46.105.75.4:80 79.220.227.193:2032 SYN_RECV
# tcp 0 0 46.105.75.4:143 90.56.111.177:56867 ESTABLISHED
# tcp 0 0 46.105.75.4:50468 107.20.207.175:443 TIME_WAIT
# tcp6 0 0 46.105.75.4:80 93.15.237.188:58038 FIN_WAIT2
# tcp6 0 0 46.105.75.4:80 79.220.227.193:2029 ESTABLISHED
# udp 0 0 0.0.0.0:123 0.0.0.0:*
# udp6 0 0 :::41458 :::*
metrics = self._parse_linux_cx_state(lines[2:], self.tcp_states['netstat'], 5)
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=custom_tags)
if self._collect_cx_queues:
for (state, recvq, sendq) in self._parse_queues("netstat", output):
self.histogram('system.net.tcp.recv_q', recvq, custom_tags + ["state:" + state])
self.histogram('system.net.tcp.send_q', sendq, custom_tags + ["state:" + state])
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection states.")
proc_dev_path = "{}/net/dev".format(net_proc_base_location)
try:
with open(proc_dev_path, 'r') as proc:
lines = proc.readlines()
except IOError:
# On Openshift, /proc/net/snmp is only readable by root
self.log.debug("Unable to read %s.", proc_dev_path)
lines = []
# Inter-| Receive | Transmit
# face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed # noqa: E501
# lo:45890956 112797 0 0 0 0 0 0 45890956 112797 0 0 0 0 0 0 # noqa: E501
# eth0:631947052 1042233 0 19 0 184 0 1206 1208625538 1320529 0 0 0 0 0 0 # noqa: E501
# eth1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 # noqa: E501
for line in lines[2:]:
cols = line.split(':', 1)
x = cols[1].split()
# Filter inactive interfaces
if self._parse_value(x[0]) or self._parse_value(x[8]):
iface = cols[0].strip()
metrics = {
'bytes_rcvd': self._parse_value(x[0]),
'bytes_sent': self._parse_value(x[8]),
'packets_in.count': self._parse_value(x[1]),
'packets_in.error': self._parse_value(x[2]) + self._parse_value(x[3]),
'packets_out.count': self._parse_value(x[9]),
'packets_out.error': self._parse_value(x[10]) + self._parse_value(x[11]),
}
self._submit_devicemetrics(iface, metrics, custom_tags)
# read ENA metrics, if configured and available
if self._collect_ena_metrics:
ena_metrics = self._collect_ena(iface)
if ena_metrics:
self._submit_ena_metrics(iface, ena_metrics, custom_tags)
netstat_data = {}
for f in ['netstat', 'snmp']:
proc_data_path = "{}/net/{}".format(net_proc_base_location, f)
try:
with open(proc_data_path, 'r') as netstat:
while True:
n_header = netstat.readline()
if not n_header:
break # No more? Abort!
n_data = netstat.readline()
h_parts = n_header.strip().split(' ')
h_values = n_data.strip().split(' ')
ns_category = h_parts[0][:-1]
netstat_data[ns_category] = {}
# Turn the data into a dictionary
for idx, hpart in enumerate(h_parts[1:]):
netstat_data[ns_category][hpart] = h_values[idx + 1]
except IOError:
# On Openshift, /proc/net/snmp is only readable by root
self.log.debug("Unable to read %s.", proc_data_path)
nstat_metrics_names = {
'Tcp': {
'RetransSegs': 'system.net.tcp.retrans_segs',
'InSegs': 'system.net.tcp.in_segs',
'OutSegs': 'system.net.tcp.out_segs',
},
'TcpExt': {
'ListenOverflows': 'system.net.tcp.listen_overflows',
'ListenDrops': 'system.net.tcp.listen_drops',
'TCPBacklogDrop': 'system.net.tcp.backlog_drops',
'TCPRetransFail': 'system.net.tcp.failed_retransmits',
},
'Udp': {
'InDatagrams': 'system.net.udp.in_datagrams',
'NoPorts': 'system.net.udp.no_ports',
'InErrors': 'system.net.udp.in_errors',
'OutDatagrams': 'system.net.udp.out_datagrams',
'RcvbufErrors': 'system.net.udp.rcv_buf_errors',
'SndbufErrors': 'system.net.udp.snd_buf_errors',
'InCsumErrors': 'system.net.udp.in_csum_errors',
},
}
# Skip the first line, as it's junk
for k in nstat_metrics_names:
for met in nstat_metrics_names[k]:
if met in netstat_data.get(k, {}):
self._submit_netmetric(
nstat_metrics_names[k][met], self._parse_value(netstat_data[k][met]), tags=custom_tags
)
# Get the conntrack -S information
conntrack_path = instance.get('conntrack_path')
use_sudo_conntrack = is_affirmative(instance.get('use_sudo_conntrack', True))
if conntrack_path is not None:
self._add_conntrack_stats_metrics(conntrack_path, use_sudo_conntrack, custom_tags)
# Get the rest of the metric by reading the files. Metrics available since kernel 3.6
conntrack_files_location = os.path.join(proc_location, 'sys', 'net', 'netfilter')
# By default, only max and count are reported. However if the blacklist is set,
# the whitelist is loosing its default value
blacklisted_files = instance.get('blacklist_conntrack_metrics')
whitelisted_files = instance.get('whitelist_conntrack_metrics')
if blacklisted_files is None and whitelisted_files is None:
whitelisted_files = ['max', 'count']
available_files = []
# Get the metrics to read
try:
for metric_file in os.listdir(conntrack_files_location):
if (
os.path.isfile(os.path.join(conntrack_files_location, metric_file))
and 'nf_conntrack_' in metric_file
):
available_files.append(metric_file[len('nf_conntrack_') :])
except Exception as e:
self.log.debug("Unable to list the files in %s. %s", conntrack_files_location, e)
filtered_available_files = pattern_filter(
available_files, whitelist=whitelisted_files, blacklist=blacklisted_files
)
for metric_name in filtered_available_files:
metric_file_location = os.path.join(conntrack_files_location, 'nf_conntrack_{}'.format(metric_name))
try:
with open(metric_file_location, 'r') as conntrack_file:
# Checking it's an integer
try:
value = int(conntrack_file.read().rstrip())
self.gauge('system.net.conntrack.{}'.format(metric_name), value, tags=custom_tags)
except ValueError:
self.log.debug("%s is not an integer", metric_name)
except IOError as e:
self.log.debug("Unable to read %s, skipping %s.", metric_file_location, e)
@staticmethod
def _get_net_proc_base_location(proc_location):
if Platform.is_containerized() and proc_location != "/proc":
net_proc_base_location = "%s/1" % proc_location
else:
net_proc_base_location = proc_location
return net_proc_base_location
def _add_conntrack_stats_metrics(self, conntrack_path, use_sudo_conntrack, tags):
"""
Parse the output of conntrack -S
Add the parsed metrics
"""
try:
cmd = [conntrack_path, "-S"]
if use_sudo_conntrack:
cmd.insert(0, "sudo")
output, _, _ = get_subprocess_output(cmd, self.log)
# conntrack -S sample:
# cpu=0 found=27644 invalid=19060 ignore=485633411 insert=0 insert_failed=1 \
# drop=1 early_drop=0 error=0 search_restart=39936711
# cpu=1 found=21960 invalid=17288 ignore=475938848 insert=0 insert_failed=1 \
# drop=1 early_drop=0 error=0 search_restart=36983181
lines = output.splitlines()
for line in lines:
cols = line.split()
cpu_num = cols[0].split('=')[-1]
cpu_tag = ['cpu:{}'.format(cpu_num)]
cols = cols[1:]
for cell in cols:
metric, value = cell.split('=')
self.monotonic_count('system.net.conntrack.{}'.format(metric), int(value), tags=tags + cpu_tag)
except SubprocessOutputEmptyError:
self.log.debug("Couldn't use %s to get conntrack stats", conntrack_path)
def _get_metrics(self):
return {val: 0 for val in itervalues(self.cx_state_gauge)}
def _parse_short_state_lines(self, lines, metrics, tcp_states, ip_version):
for line in lines:
value, state = line.split()
proto = "tcp{0}".format(ip_version)
if state in tcp_states:
metric = self.cx_state_gauge[proto, tcp_states[state]]
metrics[metric] += int(value)
def _parse_linux_cx_state(self, lines, tcp_states, state_col, protocol=None, ip_version=None):
"""
Parse the output of the command that retrieves the connection state (either `ss` or `netstat`)
Returns a dict metric_name -> value
"""
metrics = self._get_metrics()
for l in lines:
cols = l.split()
if cols[0].startswith('tcp') or protocol == 'tcp':
proto = "tcp{0}".format(ip_version) if ip_version else ("tcp4", "tcp6")[cols[0] == "tcp6"]
if cols[state_col] in tcp_states:
metric = self.cx_state_gauge[proto, tcp_states[cols[state_col]]]
metrics[metric] += 1
elif cols[0].startswith('udp') or protocol == 'udp':
proto = "udp{0}".format(ip_version) if ip_version else ("udp4", "udp6")[cols[0] == "udp6"]
metric = self.cx_state_gauge[proto, 'connections']
metrics[metric] += 1
return metrics
def _check_bsd(self, instance):
netstat_flags = ['-i', '-b']
custom_tags = instance.get('tags', [])
# FreeBSD's netstat truncates device names unless you pass '-W'
if Platform.is_freebsd():
netstat_flags.append('-W')
try:
output, _, _ = get_subprocess_output(["netstat"] + netstat_flags, self.log)
lines = output.splitlines()
# Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll
# lo0 16384 <Link#1> 318258 0 428252203 318258 0 428252203 0
# lo0 16384 localhost fe80:1::1 318258 - 428252203 318258 - 428252203 -
# lo0 16384 127 localhost 318258 - 428252203 318258 - 428252203 -
# lo0 16384 localhost ::1 318258 - 428252203 318258 - 428252203 -
# gif0* 1280 <Link#2> 0 0 0 0 0 0 0
# stf0* 1280 <Link#3> 0 0 0 0 0 0 0
# en0 1500 <Link#4> 04:0c:ce:db:4e:fa 20801309 0 13835457425 15149389 0 11508790198 0
# en0 1500 seneca.loca fe80:4::60c:ceff: 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 192.168.1 192.168.1.63 20801309 - 13835457425 15149389 - 11508790198 -
# en0 1500 2001:470:1f 2001:470:1f07:11d 20801309 - 13835457425 15149389 - 11508790198 -
# p2p0 2304 <Link#5> 06:0c:ce:db:4e:fa 0 0 0 0 0 0 0
# ham0 1404 <Link#6> 7a:79:05:4d:bf:f5 30100 0 6815204 18742 0 8494811 0
# ham0 1404 5 5.77.191.245 30100 - 6815204 18742 - 8494811 -
# ham0 1404 seneca.loca fe80:6::7879:5ff: 30100 - 6815204 18742 - 8494811 -
# ham0 1404 2620:9b::54 2620:9b::54d:bff5 30100 - 6815204 18742 - 8494811 -
headers = lines[0].split()
# Given the irregular structure of the table above, better to parse from the end of each line
# Verify headers first
# -7 -6 -5 -4 -3 -2 -1
for h in ("Ipkts", "Ierrs", "Ibytes", "Opkts", "Oerrs", "Obytes", "Coll"):
if h not in headers:
self.log.error("%s not found in %s; cannot parse", h, headers)
return False
current = None
for l in lines[1:]:
# Another header row, abort now, this is IPv6 land
if "Name" in l:
break
x = l.split()
if len(x) == 0:
break
iface = x[0]
if iface.endswith("*"):
iface = iface[:-1]
if iface == current:
# skip multiple lines of same interface
continue
else:
current = iface
# Filter inactive interfaces
if self._parse_value(x[-5]) or self._parse_value(x[-2]):
iface = current
metrics = {
'bytes_rcvd': self._parse_value(x[-5]),
'bytes_sent': self._parse_value(x[-2]),
'packets_in.count': self._parse_value(x[-7]),
'packets_in.error': self._parse_value(x[-6]),
'packets_out.count': self._parse_value(x[-4]),
'packets_out.error': self._parse_value(x[-3]),
}
self._submit_devicemetrics(iface, metrics, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection stats.")
try:
netstat, _, _ = get_subprocess_output(["netstat", "-s", "-p" "tcp"], self.log)
# 3651535 packets sent
# 972097 data packets (615753248 bytes)
# 5009 data packets (2832232 bytes) retransmitted
# 0 resends initiated by MTU discovery
# 2086952 ack-only packets (471 delayed)
# 0 URG only packets
# 0 window probe packets
# 310851 window update packets
# 336829 control packets
# 0 data packets sent after flow control
# 3058232 checksummed in software
# 3058232 segments (571218834 bytes) over IPv4
# 0 segments (0 bytes) over IPv6
# 4807551 packets received
# 1143534 acks (for 616095538 bytes)
# 165400 duplicate acks
# ...
self._submit_regexed_values(netstat, BSD_TCP_METRICS, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting TCP stats.")
proc_location = self.agentConfig.get('procfs_path', '/proc').rstrip('/')
net_proc_base_location = self._get_net_proc_base_location(proc_location)
if self._is_collect_cx_state_runnable(net_proc_base_location):
try:
self.log.debug("Using `netstat` to collect connection state")
output_TCP, _, _ = get_subprocess_output(["netstat", "-n", "-a", "-p", "tcp"], self.log)
output_UDP, _, _ = get_subprocess_output(["netstat", "-n", "-a", "-p", "udp"], self.log)
lines = output_TCP.splitlines() + output_UDP.splitlines()
# Active Internet connections (w/o servers)
# Proto Recv-Q Send-Q Local Address Foreign Address State
# tcp 0 0 46.105.75.4:80 79.220.227.193:2032 SYN_RECV
# tcp 0 0 46.105.75.4:143 90.56.111.177:56867 ESTABLISHED
# tcp 0 0 46.105.75.4:50468 107.20.207.175:443 TIME_WAIT
# tcp6 0 0 46.105.75.4:80 93.15.237.188:58038 FIN_WAIT2
# tcp6 0 0 46.105.75.4:80 79.220.227.193:2029 ESTABLISHED
# udp 0 0 0.0.0.0:123 0.0.0.0:*
# udp6 0 0 :::41458 :::*
metrics = self._parse_linux_cx_state(lines[2:], self.tcp_states['netstat'], 5)
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting connection states.")
def _check_solaris(self, instance):
# Can't get bytes sent and received via netstat
# Default to kstat -p link:0:
custom_tags = instance.get('tags', [])
try:
netstat, _, _ = get_subprocess_output(["kstat", "-p", "link:0:"], self.log)
metrics_by_interface = self._parse_solaris_netstat(netstat)
for interface, metrics in iteritems(metrics_by_interface):
self._submit_devicemetrics(interface, metrics, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting kstat stats.")
try:
netstat, _, _ = get_subprocess_output(["netstat", "-s", "-P" "tcp"], self.log)
# TCP: tcpRtoAlgorithm= 4 tcpRtoMin = 200
# tcpRtoMax = 60000 tcpMaxConn = -1
# tcpActiveOpens = 57 tcpPassiveOpens = 50
# tcpAttemptFails = 1 tcpEstabResets = 0
# tcpCurrEstab = 0 tcpOutSegs = 254
# tcpOutDataSegs = 995 tcpOutDataBytes =1216733
# tcpRetransSegs = 0 tcpRetransBytes = 0
# tcpOutAck = 185 tcpOutAckDelayed = 4
# ...
self._submit_regexed_values(netstat, SOLARIS_TCP_METRICS, custom_tags)
except SubprocessOutputEmptyError:
self.log.exception("Error collecting TCP stats.")
def _parse_solaris_netstat(self, netstat_output):
"""
Return a mapping of network metrics by interface. For example:
{ interface:
{'bytes_sent': 0,
'bytes_rcvd': 0,
'bytes_rcvd': 0,
...
}
}
"""
# Here's an example of the netstat output:
#
# link:0:net0:brdcstrcv 527336
# link:0:net0:brdcstxmt 1595
# link:0:net0:class net
# link:0:net0:collisions 0
# link:0:net0:crtime 16359935.2637943
# link:0:net0:ierrors 0
# link:0:net0:ifspeed 10000000000
# link:0:net0:ipackets 682834
# link:0:net0:ipackets64 682834
# link:0:net0:link_duplex 0
# link:0:net0:link_state 1
# link:0:net0:multircv 0
# link:0:net0:multixmt 1595
# link:0:net0:norcvbuf 0
# link:0:net0:noxmtbuf 0
# link:0:net0:obytes 12820668
# link:0:net0:obytes64 12820668
# link:0:net0:oerrors 0
# link:0:net0:opackets 105445
# link:0:net0:opackets64 105445
# link:0:net0:rbytes 113983614
# link:0:net0:rbytes64 113983614
# link:0:net0:snaptime 16834735.1607669
# link:0:net0:unknowns 0
# link:0:net0:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# link:0:net1:brdcstrcv 4947620
# link:0:net1:brdcstxmt 1594
# link:0:net1:class net
# link:0:net1:collisions 0
# link:0:net1:crtime 16359935.2839167
# link:0:net1:ierrors 0
# link:0:net1:ifspeed 10000000000
# link:0:net1:ipackets 4947620
# link:0:net1:ipackets64 4947620
# link:0:net1:link_duplex 0
# link:0:net1:link_state 1
# link:0:net1:multircv 0
# link:0:net1:multixmt 1594
# link:0:net1:norcvbuf 0
# link:0:net1:noxmtbuf 0
# link:0:net1:obytes 73324
# link:0:net1:obytes64 73324
# link:0:net1:oerrors 0
# link:0:net1:opackets 1594
# link:0:net1:opackets64 1594
# link:0:net1:rbytes 304384894
# link:0:net1:rbytes64 304384894
# link:0:net1:snaptime 16834735.1613302
# link:0:net1:unknowns 0
# link:0:net1:zonename 53aa9b7e-48ba-4152-a52b-a6368c3d9e7c
# A mapping of solaris names -> datadog names
metric_by_solaris_name = {
'rbytes64': 'bytes_rcvd',
'obytes64': 'bytes_sent',
'ipackets64': 'packets_in.count',
'ierrors': 'packets_in.error',
'opackets64': 'packets_out.count',
'oerrors': 'packets_out.error',
}
lines = [l for l in netstat_output.splitlines() if len(l) > 0]
metrics_by_interface = {}
for l in lines:
# Parse the metric & interface.
cols = l.split()
link, n, iface, name = cols[0].split(":")
assert link == "link"
# Get the datadog metric name.
ddname = metric_by_solaris_name.get(name, None)
if ddname is None:
continue
# Add it to this interface's list of metrics.
metrics = metrics_by_interface.get(iface, {})
metrics[ddname] = self._parse_value(cols[1])
metrics_by_interface[iface] = metrics
return metrics_by_interface
def _check_psutil(self, instance):
"""
Gather metrics about connections states and interfaces counters
using psutil facilities
"""
custom_tags = instance.get('tags', [])
if self._collect_cx_state:
self._cx_state_psutil(tags=custom_tags)
self._cx_counters_psutil(tags=custom_tags)
def _cx_state_psutil(self, tags=None):
"""
Collect metrics about connections state using psutil
"""
metrics = defaultdict(int)
tags = [] if tags is None else tags
for conn in psutil.net_connections():
protocol = self._parse_protocol_psutil(conn)
status = self.tcp_states['psutil'].get(conn.status)
metric = self.cx_state_gauge.get((protocol, status))
if metric is None:
self.log.warning('Metric not found for: %s,%s', protocol, status)
else:
metrics[metric] += 1
for metric, value in iteritems(metrics):
self.gauge(metric, value, tags=tags)
def _cx_counters_psutil(self, tags=None):
"""
Collect metrics about interfaces counters using psutil
"""
tags = [] if tags is None else tags
for iface, counters in iteritems(psutil.net_io_counters(pernic=True)):
metrics = {
'bytes_rcvd': counters.bytes_recv,
'bytes_sent': counters.bytes_sent,
'packets_in.count': counters.packets_recv,
'packets_in.error': counters.errin,
'packets_out.count': counters.packets_sent,
'packets_out.error': counters.errout,
}
self._submit_devicemetrics(iface, metrics, tags)
def _parse_protocol_psutil(self, conn):
"""
Returns a string describing the protocol for the given connection
in the form `tcp4`, 'udp4` as in `self.cx_state_gauge`
"""
protocol = self.PSUTIL_TYPE_MAPPING.get(conn.type, '')
family = self.PSUTIL_FAMILY_MAPPING.get(conn.family, '')
return '{}{}'.format(protocol, family)
def _parse_queues(self, tool, ss_output):
"""
for each line of `ss_output`, returns a triplet with:
* a connection state (`established`, `listening`)
* the receive queue size
* the send queue size
"""
for line in ss_output.splitlines():
fields = line.split()
if len(fields) < (6 if tool == "netstat" else 3):
continue
state_column = 0 if tool == "ss" else 5
try:
state = self.tcp_states[tool][fields[state_column]]
except KeyError:
continue
yield (state, fields[1], fields[2])
def _collect_ena(self, iface):
"""
Collect ENA metrics for given interface.
ENA metrics are collected via the ioctl SIOCETHTOOL call. At the time of writing
this method, there are no maintained Python libraries that do this. The solution
is based on:
* https://github.com/safchain/ethtool
* https://gist.github.com/yunazuno/d7cd7e1e127a39192834c75d85d45df9
"""
ethtool_socket = None
try:
ethtool_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
return self._get_ena_metrics(iface, ethtool_socket)
except OSError as e:
# this will happen for interfaces that don't support SIOCETHTOOL - e.g. loopback or docker
self.log.debug('OSError while trying to collect ENA metrics for interface %s: %s', iface, str(e))
except Exception:
self.log.exception('Unable to collect ENA metrics for interface %s', iface)
finally:
if ethtool_socket is not None:
ethtool_socket.close()
return {}
def _send_ethtool_ioctl(self, iface, sckt, data):
"""
Send an ioctl SIOCETHTOOL call for given interface with given data.
"""
ifr = struct.pack('16sP', iface.encode('utf-8'), data.buffer_info()[0])
fcntl.ioctl(sckt.fileno(), SIOCETHTOOL, ifr)
def _get_ethtool_gstringset(self, iface, sckt):
"""
Retrieve names of all ethtool stats for given interface.
"""
sset_info = array.array('B', struct.pack('IIQI', ETHTOOL_GSSET_INFO, 0, 1 << ETH_SS_STATS, 0))
self._send_ethtool_ioctl(iface, sckt, sset_info)
sset_mask, sset_len = struct.unpack('8xQI', sset_info)
if sset_mask == 0:
sset_len = 0
strings = array.array('B', struct.pack('III', ETHTOOL_GSTRINGS, ETH_SS_STATS, sset_len))
strings.extend([0] * sset_len * ETH_GSTRING_LEN)
self._send_ethtool_ioctl(iface, sckt, strings)
all_names = []
for i in range(sset_len):
offset = 12 + ETH_GSTRING_LEN * i
s = strings[offset : offset + ETH_GSTRING_LEN]
s = s.tobytes() if PY3 else s.tostring()
s = s.partition(b'\x00')[0].decode('utf-8')
all_names.append(s)
return all_names
def _get_ena_metrics(self, iface, sckt):
"""
Get all ENA metrics specified in ENA_METRICS_NAMES list and their values from ethtool.
"""
stats_names = list(self._get_ethtool_gstringset(iface, sckt))
stats_count = len(stats_names)
stats = array.array('B', struct.pack('II', ETHTOOL_GSTATS, stats_count))
# we need `stats_count * (length of uint64)` for the result
stats.extend([0] * len(struct.pack('Q', 0)) * stats_count)
self._send_ethtool_ioctl(iface, sckt, stats)
metrics = {}
for i, stat_name in enumerate(stats_names):
if stat_name in ENA_METRIC_NAMES:
offset = 8 + 8 * i
value = struct.unpack('Q', stats[offset : offset + 8])[0]
metrics[ENA_METRIC_PREFIX + stat_name] = value
return metrics
| bsd-3-clause | 250,857,119,354,836,900 | 44.993204 | 149 | 0.522956 | false |
artwr/airflow | tests/hooks/test_oracle_hook.py | 2 | 10269 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
from datetime import datetime
try:
import cx_Oracle
except ImportError:
cx_Oracle = None
import mock
import numpy
from airflow.hooks.oracle_hook import OracleHook
from airflow.models.connection import Connection
@unittest.skipIf(cx_Oracle is None, 'cx_Oracle package not present')
class TestOracleHookConn(unittest.TestCase):
def setUp(self):
super(TestOracleHookConn, self).setUp()
self.connection = Connection(
login='login',
password='password',
host='host',
port=1521
)
self.db_hook = OracleHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_host(self, mock_connect):
self.db_hook.get_conn()
mock_connect.assert_called_once()
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['user'], 'login')
self.assertEqual(kwargs['password'], 'password')
self.assertEqual(kwargs['dsn'], 'host')
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_sid(self, mock_connect):
dsn_sid = {'dsn': 'dsn', 'sid': 'sid'}
self.connection.extra = json.dumps(dsn_sid)
self.db_hook.get_conn()
mock_connect.assert_called_once()
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['dsn'],
cx_Oracle.makedsn(dsn_sid['dsn'],
self.connection.port, dsn_sid['sid']))
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_service_name(self, mock_connect):
dsn_service_name = {'dsn': 'dsn', 'service_name': 'service_name'}
self.connection.extra = json.dumps(dsn_service_name)
self.db_hook.get_conn()
mock_connect.assert_called_once()
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['dsn'], cx_Oracle.makedsn(
dsn_service_name['dsn'], self.connection.port,
service_name=dsn_service_name['service_name']))
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_encoding_without_nencoding(self, mock_connect):
self.connection.extra = json.dumps({'encoding': 'UTF-8'})
self.db_hook.get_conn()
mock_connect.assert_called_once()
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['encoding'], 'UTF-8')
self.assertEqual(kwargs['nencoding'], 'UTF-8')
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_encoding_with_nencoding(self, mock_connect):
self.connection.extra = json.dumps({'encoding': 'UTF-8', 'nencoding': 'gb2312'})
self.db_hook.get_conn()
mock_connect.assert_called_once()
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['encoding'], 'UTF-8')
self.assertEqual(kwargs['nencoding'], 'gb2312')
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_nencoding(self, mock_connect):
self.connection.extra = json.dumps({'nencoding': 'UTF-8'})
self.db_hook.get_conn()
mock_connect.assert_called_once()
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertNotIn('encoding', kwargs)
self.assertEqual(kwargs['nencoding'], 'UTF-8')
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_mode(self, mock_connect):
mode = {
'sysdba': cx_Oracle.SYSDBA,
'sysasm': cx_Oracle.SYSASM,
'sysoper': cx_Oracle.SYSOPER,
'sysbkp': cx_Oracle.SYSBKP,
'sysdgd': cx_Oracle.SYSDGD,
'syskmt': cx_Oracle.SYSKMT,
}
first = True
for m in mode:
self.connection.extra = json.dumps({'mode': m})
self.db_hook.get_conn()
if first:
mock_connect.assert_called_once()
first = False
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['mode'], mode.get(m))
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_threaded(self, mock_connect):
self.connection.extra = json.dumps({'threaded': True})
self.db_hook.get_conn()
mock_connect.assert_called_once()
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['threaded'], True)
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_events(self, mock_connect):
self.connection.extra = json.dumps({'events': True})
self.db_hook.get_conn()
mock_connect.assert_called_once()
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['events'], True)
@mock.patch('airflow.hooks.oracle_hook.cx_Oracle.connect')
def test_get_conn_purity(self, mock_connect):
purity = {
'new': cx_Oracle.ATTR_PURITY_NEW,
'self': cx_Oracle.ATTR_PURITY_SELF,
'default': cx_Oracle.ATTR_PURITY_DEFAULT
}
first = True
for p in purity:
self.connection.extra = json.dumps({'purity': p})
self.db_hook.get_conn()
if first:
mock_connect.assert_called_once()
first = False
args, kwargs = mock_connect.call_args
self.assertEqual(args, ())
self.assertEqual(kwargs['purity'], purity.get(p))
@unittest.skipIf(cx_Oracle is None, 'cx_Oracle package not present')
class TestOracleHook(unittest.TestCase):
def setUp(self):
super(TestOracleHook, self).setUp()
self.cur = mock.MagicMock()
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestOracleHook(OracleHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = UnitTestOracleHook()
def test_run_without_parameters(self):
sql = 'SQL'
self.db_hook.run(sql)
self.cur.execute.assert_called_once_with(sql)
self.conn.commit.assert_called()
def test_run_with_parameters(self):
sql = 'SQL'
param = ('p1', 'p2')
self.db_hook.run(sql, parameters=param)
self.cur.execute.assert_called_once_with(sql, param)
self.conn.commit.assert_called()
def test_insert_rows_with_fields(self):
rows = [("'basestr_with_quote", None, numpy.NAN,
numpy.datetime64('2019-01-24T01:02:03'),
datetime(2019, 1, 24), 1, 10.24, 'str')]
target_fields = ['basestring', 'none', 'numpy_nan', 'numpy_datetime64',
'datetime', 'int', 'float', 'str']
self.db_hook.insert_rows('table', rows, target_fields)
self.cur.execute.assert_called_once_with(
"INSERT /*+ APPEND */ INTO table "
"(basestring, none, numpy_nan, numpy_datetime64, datetime, int, float, str) "
"VALUES ('''basestr_with_quote',NULL,NULL,'2019-01-24T01:02:03',"
"to_date('2019-01-24 00:00:00','YYYY-MM-DD HH24:MI:SS'),1,10.24,'str')")
def test_insert_rows_without_fields(self):
rows = [("'basestr_with_quote", None, numpy.NAN,
numpy.datetime64('2019-01-24T01:02:03'),
datetime(2019, 1, 24), 1, 10.24, 'str')]
self.db_hook.insert_rows('table', rows)
self.cur.execute.assert_called_once_with(
"INSERT /*+ APPEND */ INTO table "
" VALUES ('''basestr_with_quote',NULL,NULL,'2019-01-24T01:02:03',"
"to_date('2019-01-24 00:00:00','YYYY-MM-DD HH24:MI:SS'),1,10.24,'str')")
def test_bulk_insert_rows_with_fields(self):
rows = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]
target_fields = ['col1', 'col2', 'col3']
self.db_hook.bulk_insert_rows('table', rows, target_fields)
self.cur.prepare.assert_called_once_with(
"insert into table (col1, col2, col3) values (:1, :2, :3)")
self.cur.executemany.assert_called_once_with(None, rows)
def test_bulk_insert_rows_with_commit_every(self):
rows = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]
target_fields = ['col1', 'col2', 'col3']
self.db_hook.bulk_insert_rows('table', rows, target_fields, commit_every=2)
self.cur.prepare.assert_called_with(
"insert into table (col1, col2, col3) values (:1, :2, :3)")
self.cur.executemany.assert_called_with(None, rows[2:])
def test_bulk_insert_rows_without_fields(self):
rows = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]
self.db_hook.bulk_insert_rows('table', rows)
self.cur.prepare.assert_called_once_with(
"insert into table values (:1, :2, :3)")
self.cur.executemany.assert_called_once_with(None, rows)
def test_bulk_insert_rows_no_rows(self):
rows = []
self.assertRaises(ValueError, self.db_hook.bulk_insert_rows, 'table', rows)
| apache-2.0 | -6,088,350,576,261,644,000 | 39.588933 | 89 | 0.609407 | false |
pFernbach/hpp-rbprm-corba | script/scenarios/sandbox/dynamic/slalom_hyq_interpKino05.py | 1 | 4188 | #Importing helper class for RBPRM
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
from hpp.gepetto import Viewer
#calling script darpa_hyq_path to compute root path
import slalom_hyq_pathKino05 as tp
from os import environ
ins_dir = environ['DEVEL_DIR']
db_dir = ins_dir+"/install/share/hyq-rbprm/database/hyq_"
pathId = tp.ps.numberPaths()-1
packageName = "hyq_description"
meshPackageName = "hyq_description"
rootJointType = "freeflyer"
# Information to retrieve urdf and srdf files.
urdfName = "hyq"
urdfSuffix = ""
srdfSuffix = ""
# This time we load the full body model of HyQ
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.client.basic.robot.setDimensionExtraConfigSpace(tp.extraDof)
fullBody.setJointBounds ("base_joint_xyz", [-6,6, -2.5, 2.5, 0.0, 1.])
# Setting a number of sample configurations used
nbSamples = 20000
dynamic=True
ps = tp.ProblemSolver(fullBody)
ps.client.problem.setParameter("aMax",tp.aMax)
ps.client.problem.setParameter("vMax",tp.vMax)
r = tp.Viewer (ps,viewerClient=tp.r.client)
rootName = 'base_joint_xyz'
def addLimbDb(limbId, heuristicName, loadValues = True, disableEffectorCollision = False):
fullBody.addLimbDatabase(str(db_dir+limbId+'.db'), limbId, heuristicName,loadValues, disableEffectorCollision)
rLegId = 'rfleg'
lLegId = 'lhleg'
rarmId = 'rhleg'
larmId = 'lfleg'
addLimbDb(rLegId, "manipulability")
addLimbDb(lLegId, "manipulability")
addLimbDb(rarmId, "manipulability")
addLimbDb(larmId, "manipulability")
q_0 = fullBody.getCurrentConfig();
q_init = fullBody.getCurrentConfig(); q_init[0:7] = tp.ps.configAtParam(0,0.01)[0:7] # use this to get the correct orientation
q_goal = fullBody.getCurrentConfig(); q_goal[0:7] = tp.ps.configAtParam(pathId,tp.ps.pathLength(pathId))[0:7]
dir_init = tp.ps.configAtParam(pathId,0.01)[7:10]
acc_init = tp.ps.configAtParam(pathId,0.01)[10:13]
dir_goal = tp.ps.configAtParam(pathId,tp.ps.pathLength(pathId))[7:10]
acc_goal = tp.ps.configAtParam(pathId,tp.ps.pathLength(pathId))[10:13]
configSize = fullBody.getConfigSize() -fullBody.client.basic.robot.getDimensionExtraConfigSpace()
# copy extraconfig for start and init configurations
q_init[configSize:configSize+3] = dir_init[::]
q_init[configSize+3:configSize+6] = acc_init[::]
q_goal[configSize:configSize+3] = dir_goal[::]
q_goal[configSize+3:configSize+6] = acc_goal[::]
fullBody.setStaticStability(False)
# Randomly generating a contact configuration at q_init
fullBody.setCurrentConfig (q_init)
q_init = fullBody.generateContacts(q_init,dir_init,acc_init,2)
# Randomly generating a contact configuration at q_end
fullBody.setCurrentConfig (q_goal)
q_goal = fullBody.generateContacts(q_goal, dir_goal,acc_goal,2)
# specifying the full body configurations as start and goal state of the problem
fullBody.setStartState(q_init,[larmId,rLegId,rarmId,lLegId])
fullBody.setEndState(q_goal,[larmId,rLegId,rarmId,lLegId])
r(q_init)
# computing the contact sequence
configs = fullBody.interpolate(0.08,pathId=pathId,robustnessTreshold = 0, filterStates = True)
print "number of configs =", len(configs)
r(configs[-1])
from hpp.gepetto import PathPlayer
pp = PathPlayer (fullBody.client.basic, r)
import fullBodyPlayer
player = fullBodyPlayer.Player(fullBody,pp,tp,configs,draw=True,optim_effector=False,use_velocity=dynamic,pathId = pathId)
#player.displayContactPlan()
r(configs[5])
player.interpolate(5,99)
#player.play()
"""
camera = [0.5681925415992737,
-6.707448482513428,
2.5206544399261475,
0.8217507600784302,
0.5693002343177795,
0.020600343123078346,
0.01408931240439415]
r.client.gui.setCameraTransform(0,camera)
"""
"""
import hpp.corbaserver.rbprm.tools.cwc_trajectory
import hpp.corbaserver.rbprm.tools.path_to_trajectory
import hpp.corbaserver.rbprm.tools.cwc_trajectory_helper
reload(hpp.corbaserver.rbprm.tools.cwc_trajectory)
reload(hpp.corbaserver.rbprm.tools.path_to_trajectory)
reload(hpp.corbaserver.rbprm.tools.cwc_trajectory_helper)
reload(fullBodyPlayer)
"""
| lgpl-3.0 | -8,926,711,783,031,606,000 | 28.914286 | 126 | 0.776027 | false |
endolith/scikit-image | skimage/restoration/_denoise.py | 1 | 10008 | # coding: utf-8
import numpy as np
from .. import img_as_float
from ..restoration._denoise_cy import _denoise_bilateral, _denoise_tv_bregman
from .._shared.utils import _mode_deprecations
def denoise_bilateral(image, win_size=5, sigma_range=None, sigma_spatial=1,
bins=10000, mode='constant', cval=0):
"""Denoise image using bilateral filter.
This is an edge-preserving and noise reducing denoising filter. It averages
pixels based on their spatial closeness and radiometric similarity.
Spatial closeness is measured by the gaussian function of the euclidian
distance between two pixels and a certain standard deviation
(`sigma_spatial`).
Radiometric similarity is measured by the gaussian function of the euclidian
distance between two color values and a certain standard deviation
(`sigma_range`).
Parameters
----------
image : ndarray, shape (M, N[, 3])
Input image, 2D grayscale or RGB.
win_size : int
Window size for filtering.
sigma_range : float
Standard deviation for grayvalue/color distance (radiometric
similarity). A larger value results in averaging of pixels with larger
radiometric differences. Note, that the image will be converted using
the `img_as_float` function and thus the standard deviation is in
respect to the range ``[0, 1]``. If the value is ``None`` the standard
deviation of the ``image`` will be used.
sigma_spatial : float
Standard deviation for range distance. A larger value results in
averaging of pixels with larger spatial differences.
bins : int
Number of discrete values for gaussian weights of color filtering.
A larger value results in improved accuracy.
mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}
How to handle values outside the image borders. See
`numpy.pad` for detail.
cval : string
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
denoised : ndarray
Denoised image.
References
----------
.. [1] http://users.soe.ucsc.edu/~manduchi/Papers/ICCV98.pdf
Example
-------
>>> from skimage import data, img_as_float
>>> astro = img_as_float(data.astronaut())
>>> astro = astro[220:300, 220:320]
>>> noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
>>> noisy = np.clip(noisy, 0, 1)
>>> denoised = denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15)
"""
mode = _mode_deprecations(mode)
return _denoise_bilateral(image, win_size, sigma_range, sigma_spatial,
bins, mode, cval)
def denoise_tv_bregman(image, weight, max_iter=100, eps=1e-3, isotropic=True):
"""Perform total-variation denoising using split-Bregman optimization.
Total-variation denoising (also know as total-variation regularization)
tries to find an image with less total-variation under the constraint
of being similar to the input image, which is controlled by the
regularization parameter.
Parameters
----------
image : ndarray
Input data to be denoised (converted using img_as_float`).
weight : float
Denoising weight. The smaller the `weight`, the more denoising (at
the expense of less similarity to the `input`). The regularization
parameter `lambda` is chosen as `2 * weight`.
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when::
SUM((u(n) - u(n-1))**2) < eps
max_iter : int, optional
Maximal number of iterations used for the optimization.
isotropic : boolean, optional
Switch between isotropic and anisotropic TV denoising.
Returns
-------
u : ndarray
Denoised image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Total_variation_denoising
.. [2] Tom Goldstein and Stanley Osher, "The Split Bregman Method For L1
Regularized Problems",
ftp://ftp.math.ucla.edu/pub/camreport/cam08-29.pdf
.. [3] Pascal Getreuer, "Rudin–Osher–Fatemi Total Variation Denoising
using Split Bregman" in Image Processing On Line on 2012–05–19,
http://www.ipol.im/pub/art/2012/g-tvd/article_lr.pdf
.. [4] http://www.math.ucsb.edu/~cgarcia/UGProjects/BregmanAlgorithms_JacquelineBush.pdf
"""
return _denoise_tv_bregman(image, weight, max_iter, eps, isotropic)
def _denoise_tv_chambolle_nd(im, weight=0.1, eps=2.e-4, n_iter_max=200):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
im : ndarray
n-D input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
ndim = im.ndim
p = np.zeros((im.ndim, ) + im.shape, dtype=im.dtype)
g = np.zeros_like(p)
d = np.zeros_like(im)
i = 0
while i < n_iter_max:
if i > 0:
# d will be the (negative) divergence of p
d = -p.sum(0)
slices_d = [slice(None), ] * ndim
slices_p = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_d[ax] = slice(1, None)
slices_p[ax+1] = slice(0, -1)
slices_p[0] = ax
d[slices_d] += p[slices_p]
slices_d[ax] = slice(None)
slices_p[ax+1] = slice(None)
out = im + d
else:
out = im
E = (d ** 2).sum()
# g stores the gradients of out along each axis
# e.g. g[0] is the first order finite difference along axis 0
slices_g = [slice(None), ] * (ndim + 1)
for ax in range(ndim):
slices_g[ax+1] = slice(0, -1)
slices_g[0] = ax
g[slices_g] = np.diff(out, axis=ax)
slices_g[ax+1] = slice(None)
norm = np.sqrt((g ** 2).sum(axis=0))[np.newaxis, ...]
E += weight * norm.sum()
tau = 1. / (2.*ndim)
norm *= tau / weight
norm += 1.
p -= tau * g
p /= norm
E /= float(im.size)
if i == 0:
E_init = E
E_previous = E
else:
if np.abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
def denoise_tv_chambolle(im, weight=0.1, eps=2.e-4, n_iter_max=200,
multichannel=False):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
im : ndarray of ints, uints or floats
Input data to be denoised. `im` can be of any numeric type,
but it is cast into an ndarray of floats for the computation
of the denoised image.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that
determines the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
n_iter_max : int, optional
Maximal number of iterations used for the optimization.
multichannel : bool, optional
Apply total-variation denoising separately for each channel. This
option should be true for color images, otherwise the denoising is
also applied in the channels dimension.
Returns
-------
out : ndarray
Denoised image.
Notes
-----
Make sure to set the multichannel parameter appropriately for color images.
The principle of total variation denoising is explained in
http://en.wikipedia.org/wiki/Total_variation_denoising
The principle of total variation denoising is to minimize the
total variation of the image, which can be roughly described as
the integral of the norm of the image gradient. Total variation
denoising tends to produce "cartoon-like" images, that is,
piecewise-constant images.
This code is an implementation of the algorithm of Rudin, Fatemi and Osher
that was proposed by Chambolle in [1]_.
References
----------
.. [1] A. Chambolle, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
Examples
--------
2D example on astronaut image:
>>> from skimage import color, data
>>> img = color.rgb2gray(data.astronaut())[:50, :50]
>>> img += 0.5 * img.std() * np.random.randn(*img.shape)
>>> denoised_img = denoise_tv_chambolle(img, weight=60)
3D example on synthetic data:
>>> x, y, z = np.ogrid[0:20, 0:20, 0:20]
>>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(np.float)
>>> mask += 0.2*np.random.randn(*mask.shape)
>>> res = denoise_tv_chambolle(mask, weight=100)
"""
im_type = im.dtype
if not im_type.kind == 'f':
im = img_as_float(im)
if multichannel:
out = np.zeros_like(im)
for c in range(im.shape[-1]):
out[..., c] = _denoise_tv_chambolle_nd(im[..., c], weight, eps,
n_iter_max)
else:
out = _denoise_tv_chambolle_nd(im, weight, eps, n_iter_max)
return out
| bsd-3-clause | -854,628,907,179,249,500 | 34.211268 | 92 | 0.6072 | false |
itu-oss-project-team/oss-github-analysis-project | github_analysis_tool/analyzer/analysis_utilities.py | 1 | 11551 | import os.path
import sys
import numpy as np
import pandas as pd
import random
import collections
from sklearn.feature_selection import SelectKBest, chi2
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from github_analysis_tool.services.database_service import DatabaseService
from github_analysis_tool.services.db_column_constants import Columns
from github_analysis_tool import OssConstants
class AnalysisUtilities:
def __init__(self):
self.__database_service = DatabaseService()
def drop_0_std_columns(self, df, in_place=False):
# Drop columns with 0 standard deviation
return df.drop(df.std()[(df.std() == 0)].index, axis=1, inplace=in_place)
def normalize_df(self, df_):
df = self.drop_0_std_columns(df_) # This prevents divide by zero errors on 0 std columns
return (df-df.min())/(df.max()-df.min())
def decompose_df(self, df):
headers = df.columns.values # fetch headers
indexes = df.index.values # fetch repos
values = df._get_values # fetch features.
return headers, indexes, values
def __generate_repo_stats_csv(self):
repos = self.__database_service.get_all_repos()
repos = [repo[Columns.Repo.full_name] for repo in repos]
repo_stats = {}
for repo in repos:
repo_stat = self.__database_service.get_repo_stats(repo)
if repo_stat is not None:
repo_stats[repo] = repo_stat
repo_stats_df = pd.DataFrame().from_dict(repo_stats, orient='index')
repo_stats_df.to_csv(OssConstants.REPO_STATS_PATH, sep=";")
return repo_stats_df
def get_repo_stats_df(self, refresh=False):
if not os.path.exists(OssConstants.REPO_STATS_PATH) or refresh:
return self.__generate_repo_stats_csv() # There is no repo stats CSV or we want newest stats
else:
return pd.read_csv(OssConstants.REPO_STATS_PATH, sep=";", index_col=0)
def merge_dfs_on_indexes(self, left_df, right_df, left_suffix="l", right_suffix="r"):
"""
This function merges two data frame on indexes by appending columns,
discards indexes that does not exists on both data frames
:param left_df: A pandas data frame
:param right_df: A pandas data frame
:param left_suffix: Suffix to identify left dfs columns when overlapping
:param right_suffix: Suffix to identify right dfs columns when overlapping
:return: A pandas data frame which is merge of left_df and right_df
"""
return left_df.join(right_df, how='inner', lsuffix=left_suffix, rsuffix=right_suffix)
def __count_classes(self, labels):
# count number of instances in each class
class_counts = {}
for label in labels:
if label not in class_counts:
class_counts[label] = 1
else:
class_counts[label] += 1
return class_counts
def split_data(self, observations, labels, row_labels):
"""
:param observations: Features matrix to be split
:param labels: List of labels
:param row_labels: List of {<key>:<label>} pairs
:return: training and test set as pandas df and their labels
as a list which has same order with these df's rows
"""
training_set = []
test_set = []
training_labels = []
test_labels = []
class_counts = self.__count_classes(labels)
# compute split sizes logarithmically for each class
split_sizes = {}
for class_label in class_counts:
label_count = class_counts[class_label]
if 1 < label_count < 10:
split_size = np.rint(0.50 * label_count)
elif 10 <= label_count < 20:
split_size = np.rint(0.50 * label_count)
elif label_count >= 20:
split_size = np.rint(0.60 * label_count)
else:
split_size = label_count
split_sizes[class_label] = split_size
# split data to test-train according to split_sizes.
class_counters = {}
i = 0
for repo in row_labels:
class_label = row_labels[repo]
if class_label not in class_counters:
class_counters[class_label] = 1
else:
class_counters[class_label] += 1
if class_counters[class_label] <= split_sizes[class_label]:
training_labels.append(labels[i])
training_set.append(observations[i])
else:
test_labels.append(labels[i])
test_set.append(observations[i])
i += 1
return training_set, test_set, training_labels, test_labels
def drop_rows(self, pd_data, ignored_indexes):
# Let's make sure that we will not try to remove non-existing indexes in pd_data
to_drop_indexes = [index for index in ignored_indexes if index in pd_data.index]
return pd_data.drop(to_drop_indexes)
def get_biasing_labels(self, labels, threshold = 0.50):
biasing_labels = []
class_counts = self.__count_classes(labels)
total_labels = len(labels)
for class_label in class_counts:
if class_counts[class_label] > threshold*total_labels:
biasing_labels.append(class_label)
return biasing_labels
def undersampling(self, dataset, labels, biasing_labels, size, seed):
if len(biasing_labels) == 0:
return dataset, labels
random.seed(seed) # seed the random
observation_label_pair_list = list(zip(dataset, labels)) # associate dataset rows with labels.
sampling_list = dict.fromkeys(biasing_labels, []) #create a dictionary for each biasing_label.
non_sampling_list = []
for observation_label_pair in observation_label_pair_list:
if observation_label_pair[1] in biasing_labels: # if this label is in biasing_labels
sampling_list[observation_label_pair[1]].append(observation_label_pair) #add to sampling list
else:
non_sampling_list.append(observation_label_pair) #add to nonsampling list
if len(non_sampling_list) != 0:
dataset, labels = zip(*non_sampling_list) #unzip back the values which will not be eliminated.
else:
dataset = ()
labels = ()
for biasing_label in sampling_list:
random.shuffle(sampling_list[biasing_label]) # shuffle the list
sampling_list_dataset, sampling_list_labels = zip(*sampling_list[biasing_label])
# take first size element.
dataset = sampling_list_dataset[:size] + dataset
labels = sampling_list_labels[:size] + labels
return dataset, labels
def export_confusion_matrix(self, out_file_pre_path, conf_matrix, label_names, success, fail):
out_file_path = out_file_pre_path + "_confusionMatrix.csv"
with open(out_file_path, "w") as output_file:
output_file.write(";")
for i in range(0, len(label_names)):
output_file.write(str(label_names[i]) + ";")
output_file.write("Correct;Misccorect;Ratio")
output_file.write("\n")
for row in range(0, len(conf_matrix)):
output_file.write(str(label_names[row]) + ";")
false_guess = 0
true_guess = 0
for col in range(0, len(conf_matrix[row])):
if row == col:
true_guess += conf_matrix[row][col]
else:
false_guess += conf_matrix[row][col]
output_file.write(str(conf_matrix[row][col]) + ";")
output_file.write(str(true_guess) + ";")
output_file.write(str(false_guess) + ";")
output_file.write(str(true_guess/(true_guess + false_guess)))
output_file.write("\n")
output_file.write("Total;")
for col in range(0, len(conf_matrix[0])):
output_file.write(";")
output_file.write(str(success) + ";" + str(fail) + ";")
output_file.write(str(success/(success+fail)))
def sum_matrices(self, matrices_list):
if not matrices_list:
return []
row, col = matrices_list[0].shape
total_conf_matrix = np.zeros((row, col), dtype=np.int32)
for conf_matrix in matrices_list:
total_conf_matrix = np.add(total_conf_matrix, conf_matrix)
return total_conf_matrix
def compute_total_confusion_matrix(self, conf_matrices, out_file_pre_path, label_names, sampled_scores):
print("------> Total")
total_success = 0
total_fail = 0
for sampled_score in sampled_scores:
total_success += sampled_score[0]
total_fail += sampled_score[1]
total_conf_matrix = self.sum_matrices(conf_matrices)
accuracy = total_success/(total_success+total_fail)
print(total_success, total_fail, accuracy)
self.export_confusion_matrix(out_file_pre_path, total_conf_matrix,
label_names, total_success, total_fail)
return total_success, total_fail, accuracy
def export_best_feature_names(self, df, labels, out_folder_path, k):
columns, repos, observations = self.decompose_df(df)
feature_scores = SelectKBest(chi2, k=k).fit(observations, labels).scores_
feature_scores = np.nan_to_num(feature_scores)
k_best_features = np.argpartition(feature_scores.ravel(), (-1) * k)[(-1) * k:]
k_best_feature_names = columns[k_best_features]
out_file_path = os.path.join(out_folder_path, "feature_selection.txt")
with open(out_file_path, "w") as output_file:
for feature_name in k_best_feature_names:
output_file.write(feature_name + "\n")
def find_sampling_size(self, biasing_labels, labels):
"""find the biggest class size after removing biasing labels."""
labels_except_biasing_labels = [x for x in labels if x not in biasing_labels]
label_names, label_counts = np.unique(labels_except_biasing_labels, return_counts=True)
if len(label_counts) == 0:
_, label_counts = np.unique(labels, return_counts=True)
size = int(np.min(label_counts))
else:
size = np.max(label_counts)
return size
def export_report(self, score, out_folder_path, name_of_classification):
report_file_path = os.path.join(out_folder_path, "result_report.csv")
# Dictionary for a score (Correct, Miscorrect, Ratio)
data = {
"Correct": score[0],
"Miscorrect": score[1],
"Ratio": score[2]
}
if os.path.exists(report_file_path): # if file has been created earlier
df = pd.read_csv(report_file_path, sep=";", index_col=0)
df = df[~df.index.duplicated(keep="last")] # Remove duplicate rows
new_df = pd.DataFrame(data=data, index=[name_of_classification]) # create new row
df = df.append(new_df) # append it
else:
df = pd.DataFrame(data=data, index=[name_of_classification])
df.sort_values(["Ratio"], axis=0, ascending=False, inplace=True) # sort before exporting
df.to_csv(report_file_path, sep=";")
| mit | 8,226,618,677,649,915,000 | 40.851449 | 109 | 0.598996 | false |
JMMolenaar/cadnano2.5 | cadnano/part/removeallstrandscmd.py | 1 | 1893 | from cadnano.cnproxy import UndoCommand
class RemoveAllStrandsCommand(UndoCommand):
"""
1. Remove all strands. Emits strandRemovedSignal for each.
2. Remove all oligos.
"""
def __init__(self, part):
super(RemoveAllStrandsCommand, self).__init__("remove all strands")
self._part = part
self._vhs = vhs = part.getVirtualHelices()
self._strand_sets = []
for vh in self._vhs:
x = vh.getStrandSets()
self._strand_sets.append(x[0])
self._strand_sets.append(x[1])
self._strandSetListCopies = \
[[y for y in x._strand_list] for x in self._strand_sets]
self._oligos = set(part.oligos())
# end def
def redo(self):
part = self._part
# Remove the strand
for s_set in self.__strand_set:
s_list = s_set._strand_list
for strand in s_list:
s_set.removeStrand(strand)
# end for
s_set._strand_list = []
#end for
for vh in self._vhs:
# for updating the Slice View displayed helices
part.partStrandChangedSignal.emit(part, vh)
# end for
self._oligos.clear()
# end def
def undo(self):
part = self._part
# Remove the strand
sListCopyIterator = iter(self._strandSetListCopies)
for s_set in self._strand_sets:
s_list = next(sListCopyIterator)
for strand in s_list:
s_set.strandsetStrandAddedSignal.emit(s_set, strand)
# end for
s_set._strand_list = s_list
#end for
for vh in self._vhs:
# for updating the Slice View displayed helices
part.partStrandChangedSignal.emit(part, vh)
# end for
for olg in self._oligos:
part.addOligo(olg)
# end def
# end class | mit | 4,979,934,590,713,284,000 | 32.22807 | 76 | 0.555203 | false |
cthit/CodeIT | behaviours/Jump.py | 1 | 1201 | import random
import pygame
from behaviours.Behaviour import Behaviour
from behaviours.Collide import Collide
from src.GameMethods import GameMethods
class Jump(Behaviour):
def __init__(self, jump_velocity=10, jump_key=None):
self.jump_velocity = jump_velocity
self.can_jump = False
self.jump_key = jump_key
self._game_methods = None
def update(self, delta_time, keys, config, game_methods: GameMethods):
self._game_methods = game_methods
c = self.owner.get_behaviour(Collide)
self.can_jump = False
if len(c.check_bottom(0.05)) > 0:
if self.owner.velocity.y >= 0:
self.can_jump = True
if self.jump_key is not None and keys[self.jump_key]:
self.jump_if_possible()
def jump_if_possible(self):
if self.can_jump:
self._game_methods.play_sound(random.choice([
"jump-00.wav",
"jump-01.wav",
"jump-02.wav",
"jump-03.wav"]))
self.owner.velocity.y = -self.jump_velocity
self.can_jump = False
def bind_to_key(self, keyboard_key):
self.jump_key = keyboard_key
| mit | -5,081,456,756,045,194,000 | 29.794872 | 74 | 0.592007 | false |
openstack-packages/DLRN | dlrn/tests/test_driver_git.py | 1 | 5008 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import sh
import shutil
import tempfile
from six.moves import configparser
from dlrn.config import ConfigOptions
from dlrn.drivers.gitrepo import GitRepoDriver
from dlrn.tests import base
def _mocked_environ(*args, **kwargs):
return 'myuser'
def _mocked_exists(path):
return True
class TestDriverGit(base.TestCase):
def setUp(self):
super(TestDriverGit, self).setUp()
config = configparser.RawConfigParser()
config.read("projects.ini")
config.set("DEFAULT", "pkginfo_driver",
"dlrn.drivers.gitrepo.GitRepoDriver")
self.config = ConfigOptions(config)
self.config.datadir = tempfile.mkdtemp()
self.config.gitrepo_dirs = ['/openstack']
def tearDown(self):
super(TestDriverGit, self).tearDown()
shutil.rmtree(self.config.datadir)
@mock.patch.object(sh.Command, '__call__', autospec=True)
@mock.patch('dlrn.drivers.gitrepo.refreshrepo')
def test_getinfo(self, refresh_mock, sh_mock):
refresh_mock.return_value = [None, None, None]
driver = GitRepoDriver(cfg_options=self.config)
package = {'upstream': 'test', 'name': 'test'}
info = driver.getinfo(package=package, project="test", dev_mode=True)
self.assertEqual(info, [])
@mock.patch.object(sh.Command, '__call__', autospec=True)
@mock.patch('os.listdir')
def test_getpackages(self, listdir_mock, sh_mock):
listdir_mock.return_value = []
driver = GitRepoDriver(cfg_options=self.config)
packages = driver.getpackages(dev_mode=True)
self.assertEqual(packages, [])
@mock.patch('os.path.exists', side_effect=_mocked_exists)
@mock.patch('os.environ.get', side_effect=['myuser'])
@mock.patch('sh.renderspec', create=True)
@mock.patch('sh.env', create=True)
@mock.patch('os.listdir')
def test_custom_preprocess(self, ld_mock, env_mock, rs_mock, get_mock,
pth_mock):
self.config.custom_preprocess = ['/bin/true']
driver = GitRepoDriver(cfg_options=self.config)
driver.preprocess(package_name='foo')
directory = '%s/package_info/openstack/foo' % self.config.datadir
expected = [mock.call(
['DLRN_PACKAGE_NAME=foo',
'DLRN_DISTGIT=%s' % directory,
'DLRN_SOURCEDIR=%s/foo' % self.config.datadir,
'DLRN_USER=myuser',
'/bin/true'],
_cwd=directory,
_env={'LANG': 'C'})]
self.assertEqual(env_mock.call_args_list, expected)
self.assertEqual(env_mock.call_count, 1)
@mock.patch('os.path.exists', side_effect=_mocked_exists)
@mock.patch('os.environ.get', side_effect=_mocked_environ)
@mock.patch('sh.renderspec', create=True)
@mock.patch('sh.env', create=True)
@mock.patch('os.listdir')
def test_custom_preprocess_multiple_commands(self, ld_mock, env_mock,
rs_mock, get_mock, pth_mock):
self.config.custom_preprocess = ['/bin/true', '/bin/false']
driver = GitRepoDriver(cfg_options=self.config)
driver.preprocess(package_name='foo')
directory = '%s/package_info/openstack/foo' % self.config.datadir
expected = [mock.call(
['DLRN_PACKAGE_NAME=foo',
'DLRN_DISTGIT=%s' % directory,
'DLRN_SOURCEDIR=%s/foo' % self.config.datadir,
'DLRN_USER=myuser',
'/bin/true'],
_cwd=directory,
_env={'LANG': 'C'}),
mock.call(
['DLRN_PACKAGE_NAME=foo',
'DLRN_DISTGIT=%s' % directory,
'DLRN_SOURCEDIR=%s/foo' % self.config.datadir,
'DLRN_USER=myuser',
'/bin/false'],
_cwd=directory,
_env={'LANG': 'C'})
]
self.assertEqual(env_mock.call_args_list, expected)
self.assertEqual(env_mock.call_count, 2)
@mock.patch('sh.renderspec', create=True)
@mock.patch('os.listdir')
def test_custom_preprocess_fail(self, ld_mock, rs_mock):
self.config.custom_preprocess = ['/bin/nonexistingcommand']
driver = GitRepoDriver(cfg_options=self.config)
os.makedirs(os.path.join(self.config.datadir,
'package_info/openstack/foo'))
self.assertRaises(RuntimeError, driver.preprocess, package_name='foo')
| apache-2.0 | -2,759,557,301,672,723,500 | 36.096296 | 78 | 0.621006 | false |
tyiannak/inf_teiste_info_theory_lab | compressText.py | 1 | 2472 | import ITlib, sys, bitarray, cPickle, os
if __name__ == '__main__':
mode, inputFilePath, outputFilePath = (sys.argv[1], sys.argv[2], sys.argv[3])
if mode == "compress":
method = sys.argv[4]
f = open(inputFilePath) # read the input file
text = f.read()
f.close()
charCounts = ITlib.getTextCountsUnique(text) # get character counts
if method == "H" or method == "Huffman":
code, length = ITlib.generateHuffmanCode(charCounts) # generate huffman code
elif method == "SF" or method == "Shannon-Fano":
code, length = ITlib.generateShannonFanoCode(charCounts) # generate shannon-fano code
else:
raise ValueError('Method argument must be either Huffman\
(or H) or Shannon-Fano (or SF)')
etext = ITlib.encode(code, text) # encode using huffman code
etext = "".join(etext) # convert list to string
etextBits = bitarray.bitarray(etext) # convert to bitarray type
with open(outputFilePath,"wb") as f: # write bits to binary file
etextBits.tofile(f)
cPickle.dump(code, open(outputFilePath+"_code", "wb" ) ) # write code to file
inFSize = os.stat(inputFilePath).st_size
outFSize = os.stat(outputFilePath).st_size
codeFSize = os.stat(outputFilePath+"_code").st_size
print "Original file size is %d bytes" % inFSize
print "Compressed file size is %d bytes \
(%d for encoded text and %d for the code itself)" % \
(outFSize + codeFSize, outFSize, codeFSize)
print "Compression ratio is %.3f" % \
(float(inFSize) / (outFSize + codeFSize))
elif mode == "uncompress":
etextBits = bitarray.bitarray()
with open(inputFilePath,"r") as f: # load bits from comrpessed file
etextBits.fromfile(f)
code = cPickle.load(open(inputFilePath+"_code", "r" ) ) # load code from file
text_n = ITlib.decode(code, etextBits.to01()) # decode the text
with open(outputFilePath, "w") as f: # write decoded text to file
f.write(text_n)
f.close() | apache-2.0 | -2,836,186,854,288,598,000 | 60.825 | 112 | 0.529531 | false |
mbaldessari/pcp | src/python/setup.py | 1 | 2568 | """ Build script for the PCP python package """
#
# Copyright (C) 2012-2014 Red Hat.
# Copyright (C) 2009-2012 Michael T. Werner
#
# This file is part of the "pcp" module, the python interfaces for the
# Performance Co-Pilot toolkit.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
from distutils.core import setup, Extension
setup(name = 'pcp',
version = '1.0',
description = 'Python package for Performance Co-Pilot',
license = 'GPLv2+',
author = 'Performance Co-Pilot Development Team',
author_email = '[email protected]',
url = 'http://www.performancecopilot.org',
packages = ['pcp'],
ext_modules = [
Extension('cpmapi', ['pmapi.c'], libraries = ['pcp']),
Extension('cpmda', ['pmda.c'], libraries = ['pcp_pmda', 'pcp']),
Extension('cpmgui', ['pmgui.c'], libraries = ['pcp_gui']),
Extension('cpmi', ['pmi.c'], libraries = ['pcp_import']),
Extension('cmmv', ['mmv.c'], libraries = ['pcp_mmv']),
],
platforms = [ 'Windows', 'Linux', 'FreeBSD', 'Solaris', 'Mac OS X', 'AIX' ],
long_description =
'PCP provides services to support system-level performance monitoring',
classifiers = [
'Development Status :: 5 - Production/Stable'
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: AIX',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: Unix',
'Topic :: System :: Logging',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: Software Development :: Libraries',
],
)
| lgpl-2.1 | 4,887,346,861,633,221,000 | 41.098361 | 80 | 0.630841 | false |
Blazemeter/apiritif | tests/test_transactions.py | 1 | 4566 | import logging
import time
import unittest
import threading
import apiritif
from apiritif import http, transaction, transaction_logged, smart_transaction
target = http.target('https://httpbin.org')
target.keep_alive(True)
target.auto_assert_ok(False)
target.use_cookies(True)
class TestRequests(unittest.TestCase):
# will produce test-case sample with one sub-sample
def test_1_single_request(self):
target.get('/')
# will produce test-case sample with two sub-samples
def test_2_multiple_requests(self):
target.get('/')
target.get('/2')
# won't produce test-case sample, only transaction
def test_3_toplevel_transaction(self):
with transaction("Transaction"):
target.get('/')
target.get('/2')
# won't produce test-case sample, only "Tran Name"
# will also will skip "GET /" request, as it's not in the transaction.
def test_4_mixed_transaction(self):
target.get('/')
with transaction("Transaction"):
target.get('/2')
# won't produce test-case sample, two separate ones
def test_5_multiple_transactions(self):
with transaction("Transaction 1"):
target.get('/')
target.get('/2')
with transaction("Transaction 2"):
target.get('/')
target.get('/2')
def test_6_transaction_obj(self):
tran = transaction("Label")
tran.start()
time.sleep(0.5)
tran.finish()
def test_7_transaction_fail(self):
with transaction("Label") as tran:
tran.fail("Something went wrong")
def test_8_transaction_attach(self):
with transaction("Label") as tran:
user_input = "YO"
tran.set_request("Request body")
tran.set_response("Response body")
tran.set_response_code(201)
tran.attach_extra("user", user_input)
def test_9_transaction_logged(self):
with transaction_logged("Label") as tran:
logging.warning("TODO: capture logging to assert for result")
class ControllerMock(object):
class CurrentSampleMock:
def __init__(self, index):
self.test_case = 'TestCase %d' % index
self.test_suite = 'TestSuite %d' % index
def __init__(self, index):
self.tran_mode = True
self.test_info = {}
self.current_sample = self.CurrentSampleMock(index)
def beforeTest(self):
pass
def startTest(self):
pass
def stopTest(self, is_transaction):
pass
def addError(self, name, msg, trace, is_transaction):
pass
def afterTest(self, is_transaction):
pass
class TransactionThread(threading.Thread):
def __init__(self, index):
self.index = index
self.driver = 'Driver %d' % self.index
self.controller = ControllerMock(self.index)
self.thread_name = 'Transaction %d' % self.index
self.exception_message = 'Thread %d failed' % self.index
super(TransactionThread, self).__init__(target=self._run_transaction)
def _run_transaction(self):
apiritif.put_into_thread_store(driver=self.driver, func_mode=False, controller=self.controller)
apiritif.set_transaction_handlers({'enter': [self._enter_handler], 'exit': [self._exit_handler]})
tran = smart_transaction(self.thread_name)
with tran:
self.transaction_driver = tran.driver
self.transaction_controller = tran.controller
raise Exception(self.exception_message)
self.message_from_thread_store = apiritif.get_from_thread_store('message')
def _enter_handler(self):
pass
def _exit_handler(self):
pass
class TestMultiThreadTransaction(unittest.TestCase):
# Transaction data should be different for each thread.
# Here TransactionThread class puts all transaction data into thread store.
# Then we save all thread data from real transaction data to our mock.
# As the result written and saved data should be the same.
def test_Transaction_data_per_thread(self):
transactions = [TransactionThread(i) for i in range(5)]
for tran in transactions:
tran.start()
for tran in transactions:
tran.join()
for tran in transactions:
self.assertEqual(tran.transaction_controller, tran.controller)
self.assertEqual(tran.transaction_driver, tran.driver)
self.assertEqual(tran.message_from_thread_store, tran.exception_message)
| apache-2.0 | 2,616,180,987,506,200,000 | 30.708333 | 105 | 0.635786 | false |
AMOboxTV/AMOBox.LegoBuild | plugin.video.exodus/resources/lib/sources/primewire_mv_tv.py | 1 | 9244 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import proxy
class source:
def __init__(self):
self.domains = ['primewire.ag']
self.base_link = 'http://www.primewire.ag'
self.key_link = 'http://www.primewire.ag/index.php?search'
self.moviesearch_link = 'http://www.primewire.ag/index.php?search_keywords=%s&key=%s&search_section=1'
self.tvsearch_link = 'http://www.primewire.ag/index.php?search_keywords=%s&key=%s&search_section=2'
def request(self, url, check):
try:
result = client.source(url)
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
result = client.source(proxy.get() + urllib.quote_plus(url))
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
result = client.source(proxy.get() + urllib.quote_plus(url))
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
except:
return
def movie(self, imdb, title, year):
try:
result = self.request(self.key_link, 'searchform')
query = client.parseDOM(result, 'input', ret='value', attrs = {'name': 'key'})[0]
query = self.moviesearch_link % (urllib.quote_plus(re.sub('\'', '', title)), query)
result = self.request(query, 'index_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'})
title = 'watch' + cleantitle.get(title)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['q'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]
except: pass
match = [i[0] for i in result if title == cleantitle.get(i[1])]
match2 = [i[0] for i in result]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
result = self.request(urlparse.urljoin(self.base_link, i), 'choose_tabs')
if imdb in str(result): url = i ; break
except:
pass
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
result = self.request(self.key_link, 'searchform')
query = client.parseDOM(result, 'input', ret='value', attrs = {'name': 'key'})[0]
query = self.tvsearch_link % (urllib.quote_plus(re.sub('\'', '', tvshowtitle)), query)
result = self.request(query, 'index_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'index_item.+?'})
tvshowtitle = 'watch' + cleantitle.get(tvshowtitle)
years = ['(%s)' % str(year), '(%s)' % str(int(year)+1), '(%s)' % str(int(year)-1)]
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', ret='title')) for i in result]
result = [(i[0][0], i[1][0]) for i in result if len(i[0]) > 0 and len(i[1]) > 0]
result = [i for i in result if any(x in i[1] for x in years)]
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['q'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.parse_qs(urlparse.urlparse(i[0]).query)['u'][0], i[1]) for i in result]
except: pass
try: result = [(urlparse.urlparse(i[0]).path, i[1]) for i in result]
except: pass
match = [i[0] for i in result if tvshowtitle == cleantitle.get(i[1])]
match2 = [i[0] for i in result]
match2 = [x for y,x in enumerate(match2) if x not in match2[:y]]
if match2 == []: return
for i in match2[:5]:
try:
if len(match) > 0: url = match[0] ; break
result = self.request(urlparse.urljoin(self.base_link, i), 'tv_episode_item')
if imdb in str(result): url = i ; break
except:
pass
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.urljoin(self.base_link, url)
result = self.request(url, 'tv_episode_item')
result = client.parseDOM(result, 'div', attrs = {'class': 'tv_episode_item'})
title = cleantitle.get(title)
result = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'span', attrs = {'class': 'tv_episode_name'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in result]
result = [(i[0], i[1][0], i[2]) for i in result if len(i[1]) > 0] + [(i[0], None, i[2]) for i in result if len(i[1]) == 0]
result = [(i[0], i[1], i[2][0]) for i in result if len(i[2]) > 0] + [(i[0], i[1], None) for i in result if len(i[2]) == 0]
result = [(i[0][0], i[1], i[2]) for i in result if len(i[0]) > 0]
url = [i for i in result if title == cleantitle.get(i[1]) and premiered == i[2]][:1]
if len(url) == 0: url = [i for i in result if premiered == i[2]]
if len(url) == 0 or len(url) > 1: url = [i for i in result if 'season-%01d-episode-%01d' % (int(season), int(episode)) in i[0]]
url = client.replaceHTMLCodes(url[0][0])
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = self.request(url, 'choose_tabs')
links = client.parseDOM(result, 'tbody')
for i in links:
try:
url = client.parseDOM(i, 'a', ret='href')[0]
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
except: pass
try: url = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
except: pass
url = urlparse.parse_qs(urlparse.urlparse(url).query)['url'][0]
url = base64.b64decode(url)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
quality = client.parseDOM(i, 'span', ret='class')[0]
if quality == 'quality_cam' or quality == 'quality_ts': quality = 'CAM'
elif quality == 'quality_dvd': quality = 'SD'
else: raise Exception()
sources.append({'source': host, 'quality': quality, 'provider': 'Primewire', 'url': url, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 | 5,713,432,665,606,945,000 | 40.828054 | 188 | 0.535482 | false |
aerialhedgehog/VyPy | tests/data/ordered_bunch.py | 1 | 5662 |
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from VyPy.data import OrderedBunch, Property
import pickle
from copy import deepcopy
from time import time, sleep
import numpy as np
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
# --------------------------------------------------------
# Initialize
# --------------------------------------------------------
o = OrderedBunch()
# --------------------------------------------------------
# Load up data
# --------------------------------------------------------
o['x'] = 'hello' # dictionary style
o.y = 1 # attribute style
o['z'] = [3,4,5]
o.t = OrderedBunch() # sub-bunch
o.t['h'] = 20
o.t.i = (1,2,3)
# --------------------------------------------------------
# Attach a callable object
# --------------------------------------------------------
o.f = Callable(test_function,o)
# --------------------------------------------------------
# Printing
# --------------------------------------------------------
print '>>> print o.keys()'
print o.keys()
print ''
print '>>> print o'
print o
print '>>> print o.f()'
print o.f()
print ''
# --------------------------------------------------------
# Pickling test
# --------------------------------------------------------
print '>>> pickle.dumps()'
d = pickle.dumps(o)
print '>>> pickle.loads()'
p = pickle.loads(d)
print ''
print '>>> print p'
print p
#print "should be true:" , p.f.d is p
#assert p.f.d is p
# --------------------------------------------------------
# The update function
# --------------------------------------------------------
o.t['h'] = 'changed'
p.update(o)
print "should be 'changed':" , p.t.h
assert p.t.h == 'changed'
#assert p.f.d.t.h == 'changed'
print ''
# --------------------------------------------------------
# Array Manipulation
# --------------------------------------------------------
# an ordered bunch of floats
a = OrderedBunch()
a.f = 1
a.g = 2
a.b = OrderedBunch()
a.b.h = np.array([1,2,3])
a.n = 'strings ignored'
print '>>> print a'
print a
print ''
# dump the numerical data to an array
print '>>> a.pack_array()'
c = a.pack_array()
print c
print ''
# modify array
print '>>> modify c[2]'
c[2] = 25
print c
print ''
# repack dictionary
a.unpack_array(c)
print '>>> a.unpack_array(c)'
print a
print ''
# make a copy
b = deepcopy(a)
# a method to do recursivlely on both a and b
def method(self,other):
try: return self-other
except: return None # ignore strings or failed operations
d = a.do_recursive(method,b)
print ">>> recursive a-b"
print d
print ''
# --------------------------------------------------------
# Access Speed test
# --------------------------------------------------------
print 'Access speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e6)):
v = o.t.i
t1 = time()-t0
# accessing a simpler bunch
class SimpleBunch:
pass
z = SimpleBunch()
z.t = SimpleBunch
z.t.i = 0
t0 = time()
for i in range(int(1e6)):
v = z.t.i
t2 = time()-t0
# results
print 'OrderedBunch: %.6f s' % (t1)
print 'SimpleBunch: %.6f s' % (t2)
assert (t1-t2)/t2 < 0.5
print ''
# --------------------------------------------------------
# Assignment Speed test
# --------------------------------------------------------
print 'Assignment speed test...'
# accessing bunch
t0 = time()
for i in range(int(1e6)):
o.t.i = 10
t1 = time()-t0
# accessing a simpler bunch
t0 = time()
for i in range(int(1e6)):
z.t.i = 10
t2 = time()-t0
# results
print 'OrderedBunch: %.6f s' % (t1)
print 'SimpleBunch: %.6f s' % (t2)
assert (t1-t2)/t2 < 5.0
print ''
# ----------------------------------------------------------------------
# Callable Object
# ----------------------------------------------------------------------
# has a hidden property
# works like a decorator
class Callable(OrderedBunch):
d = Property('d')
def __init__(self,f,d):
self.f = f
self.d = d
def __call__(self,*x):
return self.f(self.d,*x)
# ----------------------------------------------------------------------
# Test Function
# ----------------------------------------------------------------------
# to work in the callable object
def test_function(c):
return c.x
# ----------------------------------------------------------------------
# Call Main
# ----------------------------------------------------------------------
if __name__ == '__main__':
main() | bsd-3-clause | -5,173,433,652,382,378,000 | 23.399103 | 80 | 0.310138 | false |
RedHatQE/mgmtsystem | tests/test_hawkular.py | 1 | 13600 | # -*- coding: utf-8 -*-
"""Unit tests for Hawkular client."""
import json
import os
from random import sample
from urllib.parse import urlparse
import pytest
from mock import patch
from wrapanapi.systems import HawkularSystem
from wrapanapi.systems.hawkular import (CanonicalPath, Resource, ResourceData,
ResourceType)
def fake_urlopen(c_client, url, headers, params):
"""
A stub urlopen() implementation that load json responses from
the filesystem.
"""
# Map path from url to a file
parsed_url = urlparse("{}/{}".format(c_client.api_entry, url)).path
if parsed_url.startswith('/hawkular/inventory/traversal') \
or parsed_url.startswith('/hawkular/inventory/entity'):
# Change parsed url, when we use default one, 'd;configuration' replaced with 'd'
parsed_url = "{}/{}".format(urlparse("{}".format(c_client.api_entry)).path, url)
parsed_url = parsed_url.replace('traversal/', '')
parsed_url = parsed_url.replace('entity/', '')
parsed_url = parsed_url.replace('f;', 'feeds/')
parsed_url = parsed_url.replace('r;', 'resources/', 1)
parsed_url = parsed_url.replace('r;', '')
parsed_url = parsed_url.replace('rt;', 'resourceTypes/')
parsed_url = parsed_url.replace('rl;defines/', '')
parsed_url = parsed_url.replace('type=rt', 'resourceTypes')
parsed_url = parsed_url.replace('type=r', 'resources')
parsed_url = parsed_url.replace('type=f', 'feeds')
parsed_url = parsed_url.replace('d;configuration', 'data')
resource_file = os.path.normpath("tests/resources/{}.json".format(parsed_url))
# Must return a file-like object
return json.load(open(resource_file))
def fake_urldelete(c_client, url, headers):
"""
A stub delete_status() implementation that returns True
"""
return True
def fake_urlput(c_client, url, data, headers):
"""
A stub put_status() implementation that returns True
"""
return True
def fake_urlpost(c_client, url, data, headers):
"""
A stub post_status() implementation that returns True
"""
return True
@pytest.yield_fixture(scope="function")
def provider():
"""
A stub urlopen() implementation that load json responses from
the filesystem.
"""
if not os.getenv('HAWKULAR_HOSTNAME'):
patcher = patch('wrapanapi.clients.rest_client.ContainerClient.get_json', fake_urlopen)
patcher.start()
patcher = patch('wrapanapi.clients.rest_client.ContainerClient.delete_status',
fake_urldelete)
patcher.start()
patcher = patch('wrapanapi.clients.rest_client.ContainerClient.post_status', fake_urlpost)
patcher.start()
patcher = patch('wrapanapi.clients.rest_client.ContainerClient.put_status', fake_urlput)
patcher.start()
hwk = HawkularSystem(
hostname=os.getenv('HAWKULAR_HOSTNAME', 'localhost'),
protocol=os.getenv('HAWKULAR_PROTOCOL', 'http'),
port=os.getenv('HAWKULAR_PORT', 8080),
username=os.getenv('HAWKULAR_USERNAME', 'jdoe'),
password=os.getenv('HAWKULAR_PASSWORD', 'password'),
ws_connect=False
)
yield hwk
if not os.getenv('HAWKULAR_HOSTNAME'):
patcher.stop()
@pytest.yield_fixture(scope="function")
def datasource(provider):
"""
Fixture for preparing Datasource for tests.
It creates resource and resource data for Datasource.
On the end of testing, Datasource is deleted.
"""
datasources = provider.inventory.list_server_datasource()
assert len(datasources) > 0, "No resource data is listed for any of datasources"
new_datasource = None
for datasource in sample(datasources, 1):
r_data = _read_resource_data(provider, datasource)
assert r_data
name_ext = "MWTest"
new_datasource = Resource(name="{}{}".format(datasource.name, name_ext),
id="{}{}".format(datasource.id, name_ext),
path=CanonicalPath(
"{}{}".format(datasource.path.to_string, name_ext)))
new_datasource.path.resource_id = new_datasource.path.resource_id[1]
resource_type = ResourceType(id=None, name=None,
path=CanonicalPath("/rt;Datasource"))
new_datasource_data = ResourceData(name=None, path=None, value=r_data.value)
new_datasource_data.value.update(
{"JNDI Name": "{}{}".format(r_data.value["JNDI Name"], name_ext),
"Enabled": "true"
}
)
_delete_resource(provider, new_datasource)
result = _create_resource(provider, resource=new_datasource,
resource_data=new_datasource_data, resource_type=resource_type)
assert result, "Create should be successful"
r_data = _read_resource_data(provider, new_datasource)
assert r_data, "Resource data should exist"
assert r_data.value == new_datasource_data.value
yield new_datasource
if new_datasource:
_delete_resource(provider, new_datasource)
def test_list_feed(provider):
""" Checks whether any feed is listed """
feeds = provider.inventory.list_feed()
assert len(feeds) > 0, "No feeds are listed"
for feed in feeds:
assert feed.id
assert feed.path
def test_list_resource_type(provider):
""" Checks whether any resource type is listed and has attributes """
feeds = provider.inventory.list_feed()
for feed in feeds:
res_types = provider.inventory.list_resource_type(feed_id=feed.id)
for res_type in res_types:
assert res_type.id
assert res_type.name
assert res_type.path
assert len(res_types) > 0, "No resource type is listed for any of feeds"
def test_list_server(provider):
""" Checks whether any server is listed and has attributes"""
servers = provider.inventory.list_server()
for server in servers:
assert server.id
assert server.name
assert server.path
assert server.data
assert len(servers) > 0, "No server is listed for any of feeds"
def test_list_domain(provider):
""" Checks whether any domain is listed and has attributes"""
domains = provider.inventory.list_domain()
for domain in domains:
assert domain.id
assert domain.name
assert domain.path
assert domain.data
assert len(domains) > 0, "No domain is listed for any of feeds"
def test_list_server_group(provider):
""" Checks whether any group is listed and has attributes"""
domains = provider.inventory.list_domain()
for domain in domains:
server_groups = provider.inventory.list_server_group(domain.path.feed_id)
for server_group in server_groups:
assert server_group.id
assert server_group.name
assert server_group.path
assert server_group.data
assert len(server_groups) > 0, "No server group is listed for any of feeds"
def test_list_server_deployment(provider):
""" Checks whether any deployment is listed and has attributes """
deployments = provider.inventory.list_server_deployment()
for deployment in deployments:
assert deployment.id
assert deployment.name
assert deployment.path
assert len(deployments) > 0, "No deployment is listed for any of feeds"
def test_list_messaging(provider):
""" Checks whether any messaging is listed and has attributes """
messagings = provider.inventory.list_messaging()
for messaging in messagings:
assert messaging.id
assert messaging.name
assert messaging.path
assert len(messagings) > 0, "No messaging is listed for any of feeds"
def test_get_config_data(provider):
""" Checks whether resource data is provided and has attributes """
found = False
servers = provider.inventory.list_server()
for server in servers:
r_data = provider.inventory.get_config_data(feed_id=server.path.feed_id,
resource_id=server.id)
if r_data:
found = True
assert r_data.name
assert r_data.path
assert r_data.value
assert found, "No resource data is listed for any of servers"
def test_edit_resource_data(provider, datasource):
""" Checks whether resource data is edited """
r_data = _read_resource_data(provider, datasource)
assert r_data, "Resource data should exist"
r_data.value['Enabled'] = "false"
result = _update_resource_data(provider, r_data, datasource)
assert result, "Update should be successful"
r_data = _read_resource_data(provider, datasource)
# skip value verification for mocked provider
if os.getenv('HAWKULAR_HOSTNAME'):
assert r_data.value['Enabled'] == "false"
def test_delete_resource(provider, datasource):
""" Checks whether resource is deleted """
r_data = _read_resource_data(provider, datasource)
assert r_data, "Resource data should exist"
result = _delete_resource(provider, datasource)
assert result, "Delete should be successful"
r_data = _read_resource_data(provider, datasource)
# skip deleted verification for mocked provider
if os.getenv('HAWKULAR_HOSTNAME'):
assert not r_data
def _read_resource_data(provider, resource):
return provider.inventory.get_config_data(feed_id=resource.path.feed_id,
resource_id=resource.path.resource_id)
def _create_resource(provider, resource, resource_data, resource_type):
return provider.inventory.create_resource(resource=resource, resource_data=resource_data,
resource_type=resource_type,
feed_id=resource.path.feed_id)
def _update_resource_data(provider, resource_data, resource):
return provider.inventory.edit_config_data(resource_data=resource_data,
feed_id=resource.path.feed_id,
resource_id=resource.path.resource_id)
def _delete_resource(provider, resource):
return provider.inventory.delete_resource(feed_id=resource.path.feed_id,
resource_id=resource.path.resource_id)
def test_list_server_datasource(provider):
""" Checks whether any datasource is listed and has attributes """
found = False
datasources = provider.inventory.list_server_datasource()
if len(datasources) > 0:
found = True
for datasource in datasources:
assert datasource.id
assert datasource.name
assert datasource.path
assert found | provider.inventory._stats_available['num_datasource'](provider.inventory) > 0,\
"No any datasource is listed for any of feeds, but they exists"
def test_path(provider):
""" Checks whether path returned correctly """
feeds = provider.inventory.list_feed()
for feed in feeds:
assert feed.path
assert feed.path.feed_id
servers = provider.inventory.list_server()
for server in servers:
assert server.path
assert server.path.tenant_id
assert server.path.feed_id
assert server.path.resource_id
def test_num_server(provider):
""" Checks whether number of servers is returned correct """
servers_count = 0
feeds = provider.inventory.list_feed()
for feed in feeds:
servers_count += len(provider.inventory.list_server(feed_id=feed.id))
num_server = provider.inventory._stats_available['num_server'](provider.inventory)
assert num_server == servers_count, "Number of servers is wrong"
def test_num_deployment(provider):
""" Checks whether number of deployments is returned correct """
deployments_count = 0
feeds = provider.inventory.list_feed()
for feed in feeds:
deployments_count += len(provider.inventory.list_server_deployment(feed_id=feed.id))
num_deployment = provider.inventory._stats_available['num_deployment'](provider.inventory)
assert num_deployment == deployments_count, "Number of deployments is wrong"
def test_num_datasource(provider):
""" Checks whether number of datasources is returned correct """
datasources_count = 0
feeds = provider.inventory.list_feed()
for feed in feeds:
datasources_count += len(provider.inventory.list_server_datasource(feed_id=feed.id))
num_datasource = provider.inventory._stats_available['num_datasource'](provider.inventory)
assert num_datasource == datasources_count, "Number of datasources is wrong"
def test_num_messaging(provider):
""" Checks whether number of messagings is returned correct """
messagings_count = 0
feeds = provider.inventory.list_feed()
for feed in feeds:
messagings_count += len(provider.inventory.list_messaging(feed_id=feed.id))
num_messaging = provider.inventory._stats_available['num_messaging'](provider.inventory)
assert num_messaging == messagings_count, "Number of messagings is wrong"
def test_list_event(provider):
""" Checks whether is any event listed """
events = provider.alert.list_event()
if len(events) > 0:
event = events[0]
assert event.id
assert event.eventType
assert event.ctime
assert event.dataSource
assert event.dataId
assert event.category
assert event.text
| mit | 5,295,584,554,794,557,000 | 37.636364 | 98 | 0.653015 | false |
aosprey/rose | lib/python/rose/suite_hook.py | 1 | 5734 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-8 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Hook functionalities for a suite."""
from email.mime.text import MIMEText
import os
import pwd
from rose.opt_parse import RoseOptionParser
from rose.popen import RosePopener
from rose.reporter import Reporter
from rose.resource import ResourceLocator
from rose.suite_engine_proc import SuiteEngineProcessor
from smtplib import SMTP, SMTPException
import socket
class RoseSuiteHook(object):
"""Hook functionalities for a suite."""
def __init__(self, event_handler=None, popen=None, suite_engine_proc=None):
self.event_handler = event_handler
if popen is None:
popen = RosePopener(event_handler)
self.popen = popen
if suite_engine_proc is None:
suite_engine_proc = SuiteEngineProcessor.get_processor(
event_handler=event_handler, popen=popen)
self.suite_engine_proc = suite_engine_proc
def handle_event(self, *args, **kwargs):
"""Call self.event_handler if it is callabale."""
if callable(self.event_handler):
return self.event_handler(*args, **kwargs)
def run(self, suite_name, task_id, hook_event, hook_message=None,
should_mail=False, mail_cc_list=None, should_shutdown=False,
should_retrieve_job_logs=False):
"""
Invoke the hook for a suite.
1. For a task hook, if the task runs remotely, retrieve its log from
the remote host.
2. If "should_mail", send an email notification to the current user,
and those in the "mail_cc_list".
3. If "should_shutdown", shut down the suite.
"""
# Retrieve log and populate job logs database
task_ids = []
if task_id and should_retrieve_job_logs:
task_ids = [task_id]
self.suite_engine_proc.job_logs_pull_remote(suite_name, task_ids)
# Send email notification if required
email_exc = None
if should_mail:
text = ""
if task_id:
text += "Task: %s\n" % task_id
if hook_message:
text += "Message: %s\n" % hook_message
url = self.suite_engine_proc.get_suite_log_url(None, suite_name)
text += "See: %s\n" % (url)
user = pwd.getpwuid(os.getuid()).pw_name
conf = ResourceLocator.default().get_conf()
host = conf.get_value(["rose-suite-hook", "email-host"],
default="localhost")
msg = MIMEText(text)
msg["From"] = user + "@" + host
msg["To"] = msg["From"]
if mail_cc_list:
mail_cc_addresses = []
for mail_cc_address in mail_cc_list:
if "@" not in mail_cc_address:
mail_cc_address += "@" + host
mail_cc_addresses.append(mail_cc_address)
msg["Cc"] = ", ".join(mail_cc_addresses)
mail_cc_list = mail_cc_addresses
else:
mail_cc_list = []
msg["Subject"] = "[%s] %s" % (hook_event, suite_name)
smtp_host = conf.get_value(["rose-suite-hook", "smtp-host"],
default="localhost")
try:
smtp = SMTP(smtp_host)
smtp.sendmail(
msg["From"], [msg["To"]] + mail_cc_list, msg.as_string())
smtp.quit()
except (socket.error, SMTPException) as email_exc:
pass
# Shut down if required
if should_shutdown:
self.suite_engine_proc.shutdown(suite_name, args=["--kill"])
if email_exc is not None:
raise
__call__ = run
def main():
"""Implement "rose suite-hook" command."""
opt_parser = RoseOptionParser()
opt_parser.add_my_options(
"mail_cc", "mail", "retrieve_job_logs", "shutdown")
opts, args = opt_parser.parse_args()
for key in ["mail_cc"]:
values = []
if getattr(opts, key):
for value in getattr(opts, key):
values.extend(value.split(","))
setattr(opts, key, values)
report = Reporter(opts.verbosity - opts.quietness - 1) # Reduced default
popen = RosePopener(event_handler=report)
suite_engine_proc = SuiteEngineProcessor.get_processor(
event_handler=report, popen=popen)
args = suite_engine_proc.process_suite_hook_args(*args, **vars(opts))
hook = RoseSuiteHook(event_handler=report,
popen=popen,
suite_engine_proc=suite_engine_proc)
hook(*args,
should_mail=opts.mail,
mail_cc_list=opts.mail_cc,
should_shutdown=opts.shutdown,
should_retrieve_job_logs=opts.retrieve_job_logs)
if __name__ == "__main__":
main()
| gpl-3.0 | -6,236,962,520,064,511,000 | 37.483221 | 79 | 0.571503 | false |
cloudify-cosmo/packman | setup.py | 1 | 2853 | ########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
# from setuptools import find_packages
from setuptools.command.test import test as testcommand
import sys
import re
import os
import codecs
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
# intentionally *not* adding an encoding option to open
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
print('VERSION: ', version_match.group(1))
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
class Tox(testcommand):
def finalize_options(self):
testcommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
setup(
name='packman',
version=find_version('packman', '__init__.py'),
url='https://github.com/cloudify-cosmo/packman',
author='nir0s',
author_email='[email protected]',
license='LICENSE',
description='Package Generator',
long_description=read('README.rst'),
packages=['packman'],
entry_points={
'console_scripts': [
'pkm = packman.pkm:main',
]
},
install_requires=[
"jinja2==2.7.2",
"docopt==.0.6.1",
"pyyaml==3.10",
"sh==1.11",
"requests==2.5.1",
],
tests_require=['nose', 'tox'],
test_suite='packman.test.test_packman',
cmdclass={'test': Tox},
classifiers=[
'Programming Language :: Python',
'Natural Language :: English',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving :: Packaging',
],
)
| apache-2.0 | -7,370,398,719,941,188,000 | 30.7 | 79 | 0.629863 | false |
05bit/bachata | docs/conf.py | 1 | 9441 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Bachata documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 7 22:16:05 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Bachata'
copyright = '2015, Alexey Kinev'
author = 'Alexey Kinev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
try:
import sphinx_rtd_theme
except ImportError:
sphinx_rtd_theme = None
if sphinx_rtd_theme:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bachatadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Bachata.tex', 'Bachata Documentation',
'Alexey Kinev', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bachata', 'Bachata Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Bachata', 'Bachata Documentation',
author, 'Bachata', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 | 263,612,688,371,594,240 | 30.787879 | 79 | 0.705646 | false |
mcdallas/nba_py | nba_py/constants.py | 1 | 17673 | CURRENT_SEASON = '2016-17'
TEAMS = {
'ATL': {
'abbr': 'ATL',
'city': 'Atlanta',
'code': 'hawks',
'conference': 'Eastern',
'displayAbbr': 'ATL',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612737',
'name': 'Hawks',
'color': 'E2373E',
'colors': ['E2373E', '002A5C', 'BAC4CA']
}, 'BOS': {
'abbr': 'BOS',
'city': 'Boston',
'code': 'celtics',
'conference': 'Eastern',
'displayAbbr': 'BOS',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612738',
'name': 'Celtics',
'color': '007239',
'colors': ['007239', 'AE8445', '982527', '000000']
}, 'BKN': {
'abbr': 'BKN',
'city': 'Brooklyn',
'code': 'nets',
'conference': 'Eastern',
'displayAbbr': 'BKN',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612751',
'name': 'Nets',
'color': '000000',
'colors': ['000000', 'FFFFFF']
}, 'CHA': {
'abbr': 'CHA',
'city': 'Charlotte',
'code': 'hornets',
'conference': 'Eastern',
'displayAbbr': 'CHA',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612766',
'name': 'Hornets',
'color': '00848E',
'colors': ['00848E', '260F54', 'CCCCCC']
}, 'CHI': {
'abbr': 'CHI',
'city': 'Chicago',
'code': 'bulls',
'conference': 'Eastern',
'displayAbbr': 'CHI',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612741',
'name': 'Bulls',
'color': 'C60033',
'colors': ['C60033', '000000']
}, 'CLE': {
'abbr': 'CLE',
'city': 'Cleveland',
'code': 'cavaliers',
'conference': 'Eastern',
'displayAbbr': 'CLE',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612739',
'name': 'Cavaliers',
'color': '860038',
'colors': ['860038', '002D62', 'FDBA31']
}, 'DAL': {
'abbr': 'DAL',
'city': 'Dallas',
'code': 'mavericks',
'conference': 'Western',
'displayAbbr': 'DAL',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612742',
'name': 'Mavericks',
'color': '0063AF',
'colors': ['0063AF', 'BAC4CA', '000000']
}, 'DEN': {
'abbr': 'DEN',
'city': 'Denver',
'code': 'nuggets',
'conference': 'Western',
'displayAbbr': 'DEN',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612743',
'name': 'Nuggets',
'color': '559FD6',
'colors': ['559FD6', '006BB7', 'FEA927']
}, 'DET': {
'abbr': 'DET',
'city': 'Detroit',
'code': 'pistons',
'conference': 'Eastern',
'displayAbbr': 'DET',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612765',
'name': 'Pistons',
'color': 'EC003D',
'colors': ['EC003D', '0058A6', '001D4A']
}, 'GSW': {
'abbr': 'GSW',
'city': 'Golden State',
'code': 'warriors',
'conference': 'Western',
'displayAbbr': 'GSW',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612744',
'name': 'Warriors',
'color': '0068B3',
'colors': ['0068B3', 'FFC423']
}, 'HOU': {
'abbr': 'HOU',
'city': 'Houston',
'code': 'rockets',
'conference': 'Western',
'displayAbbr': 'HOU',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612745',
'name': 'Rockets',
'color': 'C60033',
'colors': ['C60033', '000000']
}, 'IND': {
'abbr': 'IND',
'city': 'Indiana',
'code': 'pacers',
'conference': 'Eastern',
'displayAbbr': 'IND',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612754',
'name': 'Pacers',
'color': '001D4A',
'colors': ['001D4A', 'FEAC2D', 'B0B2B5']
}, 'LAC': {
'abbr': 'LAC',
'city': 'Los Angeles',
'code': 'clippers',
'conference': 'Western',
'displayAbbr': 'LAC',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612746',
'name': 'Clippers',
'color': '00559A',
'colors': ['00559A', 'EC003D']
}, 'LAL': {
'abbr': 'LAL',
'city': 'Los Angeles',
'code': 'lakers',
'conference': 'Western',
'displayAbbr': 'LAL',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612747',
'name': 'Lakers',
'color': 'FEA927',
'colors': ['FEA927', '42186E', '000000']
}, 'MEM': {
'abbr': 'MEM',
'city': 'Memphis',
'code': 'grizzlies',
'conference': 'Western',
'displayAbbr': 'MEM',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612763',
'name': 'Grizzlies',
'color': '182A48',
'colors': ['182A48', '4C78AD', 'FEA927', 'AAC8E5']
}, 'MIA': {
'abbr': 'MIA',
'city': 'Miami',
'code': 'heat',
'conference': 'Eastern',
'displayAbbr': 'MIA',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612748',
'name': 'Heat',
'color': '98002E',
'colors': ['98002E', 'F88D1D', '000000']
}, 'MIL': {
'abbr': 'MIL',
'city': 'Milwaukee',
'code': 'bucks',
'conference': 'Eastern',
'displayAbbr': 'MIL',
'displayConference': 'Eastern',
'division': 'Central',
'id': '1610612749',
'name': 'Bucks',
'color': 'C41230',
'colors': ['C41230', '003815', 'BAC4CA']
}, 'MIN': {
'abbr': 'MIN',
'city': 'Minnesota',
'code': 'timberwolves',
'conference': 'Western',
'displayAbbr': 'MIN',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612750',
'name': 'Timberwolves',
'color': '#003F70',
'colors': ['003F70', '006F42', 'BAC4CA', 'FFE211', 'DE2032', '000000']
}, 'NOP': {
'abbr': 'NOP',
'city': 'New Orleans',
'code': 'pelicans',
'conference': 'Western',
'displayAbbr': 'NOP',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612740',
'name': 'Pelicans',
'color': '#002B5C',
'colors': ['002B5C', 'B4975A', 'E13A3E']
}, 'NYK': {
'abbr': 'NYK',
'city': 'New York',
'code': 'knicks',
'conference': 'Eastern',
'displayAbbr': 'NYK',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612752',
'name': 'Knicks',
'color': 'F3571F',
'colors': ['F3571F', '0067B2', 'BAC4CA']
}, 'OKC': {
'abbr': 'OKC',
'city': 'Oklahoma City',
'code': 'thunder',
'conference': 'Western',
'displayAbbr': 'OKC',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612760',
'name': 'Thunder',
'color': 'FDBB30',
'colors': ['FDBB30', 'F05133', '007DC3', '002D62']
}, 'ORL': {
'abbr': 'ORL',
'city': 'Orlando',
'code': 'magic',
'conference': 'Eastern',
'displayAbbr': 'ORL',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612753',
'name': 'Magic',
'color': '006BB7',
'colors': ['006BB7', 'BAC4CA', '000000']
}, 'PHI': {
'abbr': 'PHI',
'city': 'Philadelphia',
'code': 'sixers',
'conference': 'Eastern',
'displayAbbr': 'PHI',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612755',
'name': 'Sixers',
'color': 'EC003D',
'colors': ['EC003D', '00559A', 'BAC4CA']
}, 'PHX': {
'abbr': 'PHX',
'city': 'Phoenix',
'code': 'suns',
'conference': 'Western',
'displayAbbr': 'PHX',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612756',
'name': 'Suns',
'color': 'E45F1F',
'colors': ['E45F1F', 'F89F1B', 'BAC4CA', '000000']
}, 'POR': {
'abbr': 'POR',
'city': 'Portland',
'code': 'blazers',
'conference': 'Western',
'displayAbbr': 'POR',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612757',
'name': 'Trail Blazers',
'color': 'DE2032',
'colors': ['DE2032', 'BAC4CA', '000000']
}, 'SAC': {
'abbr': 'SAC',
'city': 'Sacramento',
'code': 'kings',
'conference': 'Western',
'displayAbbr': 'SAC',
'displayConference': 'Western',
'division': 'Pacific',
'id': '1610612758',
'name': 'Kings',
'color': '542E91',
'colors': ['542E91', 'BAC4CA', '000000']
}, 'SAS': {
'abbr': 'SAS',
'city': 'San Antonio',
'code': 'spurs',
'conference': 'Western',
'displayAbbr': 'SAS',
'displayConference': 'Western',
'division': 'Southwest',
'id': '1610612759',
'name': 'Spurs',
'color': '#BA24CA',
'colors': ['BA24CA', '000000']
}, 'TOR': {
'abbr': 'TOR',
'city': 'Toronto',
'code': 'raptors',
'conference': 'Eastern',
'displayAbbr': 'TOR',
'displayConference': 'Eastern',
'division': 'Atlantic',
'id': '1610612761',
'name': 'Raptors',
'color': 'C60033',
'colors': ['C60033', 'BAC4CA']
}, 'UTA': {
'abbr': 'UTA',
'city': 'Utah',
'code': 'jazz',
'conference': 'Western',
'displayAbbr': 'UTA',
'displayConference': 'Western',
'division': 'Northwest',
'id': '1610612762',
'name': 'Jazz',
'color': '#002A5C',
'colors': ['002A5C', '004812', 'FCB034', 'BACA4CA']
}, 'WAS': {
'abbr': 'WAS',
'city': 'Washington',
'code': 'wizards',
'conference': 'Eastern',
'displayAbbr': 'WAS',
'displayConference': 'Eastern',
'division': 'Southeast',
'id': '1610612764',
'name': 'Wizards',
'color': '002A5B',
'colors': ['002A5B', 'E21836', 'BAC4CA']
}
}
class _DefaultN:
Default = 'N'
class _DefaultBlank:
Default = ''
class _DefaultZero:
Default = '0'
class League:
NBA = '00'
Default = NBA
class PerMode:
Totals = 'Totals'
PerGame = 'PerGame'
MinutesPer = 'MinutesPer'
Per48 = 'Per48'
Per40 = 'Per40'
Per36 = 'Per36'
PerMinute = 'PerMinute'
PerPossession = 'PerPossession'
PerPlay = 'PerPlay'
Per100Possessions = 'Per100Possessions'
Per100Plays = 'Per100Plays'
Default = PerGame
class SeasonType:
Regular = 'Regular Season'
Playoffs = 'Playoffs'
Default = Regular
class MeasureType:
Base = 'Base'
Advanced = 'Advanced'
Misc = 'Misc'
FourFactors = 'Four Factors'
Scoring = 'Scoring'
Opponent = 'Opponent'
Usage = 'Usage'
Default = Base
class PtMeasureType:
SpeedDistance = 'SpeedDistance'
class GroupQuantity:
Default = 5
class Outcome(_DefaultBlank):
Win = 'W'
Loss = 'L'
class Location(_DefaultBlank):
Home = 'Home'
Away = 'Away'
class SeasonSegment(_DefaultBlank):
EntireSeason = ''
PreAllStar = 'Pre All-Star'
PostAllStar = 'Post All-Star'
class DateFrom(_DefaultBlank):
pass
class DateTo(_DefaultBlank):
pass
class VsConference(_DefaultBlank):
All = ''
East = 'East'
West = 'West'
class VsDivision(_DefaultBlank):
All = ''
Atlantic = 'Atlantic'
Central = 'Central'
Northwest = 'Northwest'
Pacific = 'Pacific'
Southeast = 'Southeast'
Southwest = 'Southwest'
class GameSegment(_DefaultBlank):
EntireGame = ''
FirstHalf = 'First Half'
SecondHalf = 'Second Half'
Overtime = 'Overtime'
class ClutchTime(_DefaultBlank):
Last5Min = 'Last 5 Minutes'
Last4Min = 'Last 4 Minutes'
Last3Min = 'Last 3 Minutes'
Last2Min = 'Last 2 Minutes'
Last1Min = 'Last 1 Minutes'
Last30Sec = 'Last 30 Seconds'
Last10Sec = 'Last 10 Seconds'
class ShotClockRange(_DefaultBlank):
AllRanges = ''
# I honestly don't know anytime the shot clock would be off
ShotClockOff = 'ShotClock Off'
def get(self, n):
if n > 24 or n < 0:
return ''
elif 22 <= n <= 24:
return '24-22'
elif 18 <= n < 22:
return '22-18 Very Early'
elif 15 <= n < 18:
return '18-15 Early'
elif 7 <= n < 15:
return '15-7 Average'
elif 4 <= n < 7:
return '7-4 Late'
elif 0 <= n < 4:
return '4-0 Very Late'
class AheadBehind(_DefaultBlank):
AheadOrBehind = 'Ahead or Behind'
AheadOrTied = 'Ahead or Tied'
BehindOrTied = 'Behind or Tied'
class PlusMinus(_DefaultN):
pass
class PaceAdjust(_DefaultN):
pass
class Rank(_DefaultN):
pass
class OpponentTeamID(_DefaultZero):
pass
class Period(_DefaultZero):
AllQuarters = '0'
FirstQuarter = '1'
SecondQuarter = '2'
ThirdQuarter = '3'
FourthQuarter = '4'
def Overtime(self, n):
return str(4 + n)
class LastNGames(_DefaultZero):
pass
class PlayoffRound(_DefaultZero):
All = '0'
QuarterFinals = '1'
SemiFinals = '2'
ConferenceFinals = '3'
Finals = '4'
class Month(_DefaultZero):
All = '0'
October = '1'
November = '2'
December = '3'
January = '4'
February = '5'
March = '6'
April = '7'
May = '8'
June = '9'
July = '10'
August = '11'
September = '12'
class RangeType(_DefaultZero):
pass
class StartRange(_DefaultZero):
pass
class EndRange(_DefaultZero):
pass
class StartPeriod(Period):
pass
class EndPeriod(Period):
pass
class StatCategory:
PTS = 'PTS'
FGM = 'FGM'
FGA = 'FGA'
FG_PCT = 'FG%'
FG3M = '3PM'
FG3A = '3PA'
FG3_PCT = '3P%'
FTM = 'FTM'
FTA = 'FTA'
FT_PCT = 'FT%'
OREB = 'OREB'
DREB = 'DREB'
REB = 'REB'
AST = 'AST'
STL = 'STL'
BLK = 'BLK'
TOV = 'TOV'
EFF = 'EFF'
AST_TOV = 'AST/TO'
STL_TOV = 'STL/TOV'
PF = 'PF'
Default = PTS
class ContextMeasure:
# Not sure if this is mapped correctly. Source: https://github.com/bradleyfay/NBAStats
FGM = 'FGM'
FGA = 'FGA'
FG_PCT = 'FG_PCT'
FG3M = 'FG3m'
FG3A = 'FG3A'
FG3_PCT = 'FG3_PCT'
PF = 'PF'
EFG_PCT = 'EFG_PCT'
TS_PCT = 'TS_PCT'
PTS_FB = 'PTS_FB'
PTS_OFF_TOV = 'PTS_OFF_TOV'
PTS_2ND_CHANCE = 'PTS_2ND_CHANCE'
Default = FGM
class Scope:
AllPlayers = 'S'
Rookies = 'Rookies'
Default = AllPlayers
class PlayerScope:
# ugh this is so similar to Scope, why does it have its own
AllPlayers = 'All Players'
Rookies = 'Rookie'
Default = AllPlayers
class PlayerOrTeam:
Player = 'Player'
Team = 'Team'
Default = Player
class GameScope:
Season = 'Season'
Last10 = 'Last 10'
Yesterday = 'Yesterday'
Finals = 'Finals'
Default = Season
class Game_Scope(_DefaultBlank):
Last10 = 'Last 10'
Yesterday = 'Yesterday'
class Player_or_Team:
Player = 'P'
Team = 'T'
Default = Player
class Conference(VsConference):
pass
class Division(VsDivision):
pass
class TeamID(_DefaultZero):
pass
class GameID(_DefaultBlank):
pass
class RookieYear(_DefaultBlank):
pass
class PlayerExperience(_DefaultBlank):
Rookie = 'Rookie'
Sophomore = 'Sophomore'
Veteran = 'Veteran'
class PlayerPosition(_DefaultBlank):
Forward = 'F'
Center = 'C'
Guard = 'G'
class StarterBench(_DefaultBlank):
Starters = 'Starters'
Bench = 'Bench'
class DraftYear(_DefaultBlank):
pass
class DraftPick(_DefaultBlank):
FirstRound = '1st+Round'
SecondRound = '2nd+Round'
FirstPick = '1st+Pick'
Lottery = 'Lottery+Pick'
Top5 = 'Top+5+Pick'
Top10 = 'Top+10+Pick'
Top15 = 'Top+15+Pick'
Top20 = 'Top+20+Pick'
Top25 = 'Top+25+Pick'
Picks11Thru20 = 'Picks+11+Thru+20'
Picks21Thru30 = 'Picks+21+Thru+30'
Undrafted = 'Undrafted'
class College(_DefaultBlank):
pass
class Country(_DefaultBlank):
pass
class Height(_DefaultBlank):
'''
Example:
for greater than 6ft8 api call should be GT+6-8
for lower than 7ft3 api call should be LT+7-3
'''
class Weight(_DefaultBlank):
'''
Example:
for greater than 225lbs api call should be GT+225lbs
'''
class Counter:
Default = '1000'
class Sorter:
PTS = 'PTS'
FGM = 'FGM'
FGA = 'FGA'
FG_PCT = 'FG_PCT'
FG3M = 'FG3M'
FG3A = 'FG3A'
FG3_PCT = 'FG3_PCT'
FTM = 'FTM'
FTA = 'FTA'
FT_PCT = 'FT_PCT'
OREB = 'OREB'
DREB = 'DREB'
AST = 'AST'
STL = 'STL'
BLK = 'BLK'
TOV = 'TOV'
REB = 'REB'
Default = PTS
class Direction:
DESC = 'DESC'
ASC = 'ASC'
Default = DESC
| bsd-3-clause | -3,240,081,273,265,094,000 | 21.951948 | 90 | 0.502009 | false |
swiftcoder/ashima-iv | src/game.py | 1 | 2465 |
import pyglet
from pyglet.gl import *
import math
from app import AppState, enter_state
from outcome import OutcomeState
from window import Window
from entity import World
import factories
from euclid import Vector3
from resources import Resources
from camera import Camera
from controller import Controller
from tether import Tether
from graphics import Graphics
from teams import Teams
class GameState(AppState):
def start(self):
music = pyglet.resource.media('data/music/the_moonlight_strikers_act1.mp3')
self.player = music.play()
self.sunlight = Resources.load_shader('data/shaders/sunlight.shader')
ship = factories.create_hammerfall(Vector3(0, -250, 2400), 'red')
World.add(ship)
for i in range(4, 0, -1):
ship = factories.create_anaconda(Vector3(i*5, i*10, i*10 + 1000), 'red')
World.add(ship)
for i in range(2, 0, -1):
ship = factories.create_viper(Vector3(i*40, i*-10, i*10 + 25), 'blue', i != 1)
World.add(ship)
self.ship = ship
World.set_player(self.ship)
@ship.event
def on_remove(ship):
print 'defeat'
enter_state( OutcomeState(False) )
self.fps_display = pyglet.clock.ClockDisplay()
glEnable(GL_CULL_FACE)
glFrontFace(GL_CCW)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE)
aspect = float(Window.width)/float(Window.height)
camera = Camera(math.pi/4, aspect, 0.1, 100000.0)
Graphics.camera = camera
cam = factories.create_camera(camera)
World.add(cam)
tether = Tether(cam, ship, Vector3(-5, 8, -16), Vector3(0, 0, 65))
aim = factories.aim_assist(cam)
crosshairs = factories.cross_hairs(ship)
factories.create_sky(cam)
def resume(self):
control = Controller(self.ship)
self.player.play()
def pause(self):
if self.player:
self.player.pause()
def update(self, dt):
World.perform_update(dt)
if Teams.in_team('red') == []:
print 'victory'
enter_state( OutcomeState(True) )
def draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor4f(1, 1, 1, 1)
self.sunlight.bind()
self.sunlight.uniform('sunDir', Vector3(-1, 1, 0).normalize())
self.sunlight.unbind()
World.perform_frame()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, Window.width, 0, Window.height, -100, 100)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.fps_display.draw()
| bsd-3-clause | 3,728,101,832,243,301,400 | 22.037383 | 81 | 0.691278 | false |
Reilithion/xmms2-reilithion | wafadmin/Tools/gas.py | 1 | 1258 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2008 (ita)
"as and gas"
import os, sys
import Task
from TaskGen import extension, taskgen, after, before
EXT_ASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP']
as_str = '${AS} ${ASFLAGS} ${_ASINCFLAGS} ${SRC} -o ${TGT}'
Task.simple_task_type('asm', as_str, 'PINK', ext_out='.o')
@extension(EXT_ASM)
def asm_hook(self, node):
# create the compilation task: cpp or cc
task = self.create_task('asm')
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
task.inputs = [node]
task.outputs = [node.change_ext(obj_ext)]
self.compiled_tasks.append(task)
self.meths.append('asm_incflags')
@taskgen
@after('apply_obj_vars_cc')
@after('apply_obj_vars_cxx')
@before('apply_link')
def asm_incflags(self):
if self.env['ASINCFLAGS']: self.env['_ASINCFLAGS'] = self.env['ASINCFLAGS']
if 'cxx' in self.features: self.env['_ASINCFLAGS'] = self.env['_CXXINCFLAGS']
else: self.env['_ASINCFLAGS'] = self.env['_CCINCFLAGS']
def detect(conf):
comp = os.environ.get('AS', '')
if not comp: comp = conf.find_program('as', var='AS')
if not comp: comp = conf.find_program('gas', var='AS')
if not comp: comp = conf.env['CC']
if not comp: return
v = conf.env
v['ASFLAGS'] = ''
| lgpl-2.1 | 4,753,860,226,152,624,000 | 26.347826 | 78 | 0.651828 | false |
YannickB/odoo-hosting | clouder_template_gitlab/oneclick.py | 1 | 3584 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron
# Copyright 2015, TODAY Clouder SASU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License with Attribution
# clause as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License with
# Attribution clause along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
from odoo import models, api
except ImportError:
from openerp import models, api
class ClouderNode(models.Model):
"""
Add methods to manage the postgres specificities.
"""
_inherit = 'clouder.node'
@api.multi
def oneclick_deploy_exec(self):
super(ClouderNode, self).oneclick_deploy_exec()
self.oneclick_deploy_element('service', 'backup-bup')
bind = self.oneclick_deploy_element('service', 'bind', ports=[53])
if not self.domain_id.dns_id:
self.domain_id.write({'dns_id': bind.id})
self.deploy_dns_exec()
self.oneclick_deploy_element('service', 'postfix-all', ports=[25])
self.oneclick_deploy_element('service', 'proxy', ports=[80, 443])
# service = self.oneclick_deploy_element('service', 'shinken')
# self.oneclick_deploy_element('base', 'shinken', service=service)
#
# service = self.oneclick_deploy_element('service', 'registry')
# self.oneclick_deploy_element('base', 'registry', service=service)
#
# self.oneclick_deploy_element('service', 'gitlab-all')
# self.oneclick_deploy_element(
# 'base', 'gitlab', code_service='gitlab-all-gitlab')
#
# self.oneclick_deploy_element('service', 'gitlabci')
@api.multi
def oneclick_purge_exec(self):
service_obj = self.env['clouder.service']
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'gitlabci')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'gitlab-all')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'registry')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'shinken')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'proxy')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'bind')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'postfix-all')]).unlink()
service_obj.search([('environment_id', '=', self.environment_id.id),
('suffix', '=', 'backup-bup')]).unlink()
super(ClouderNode, self).oneclick_purge_exec()
| agpl-3.0 | 4,088,218,775,987,584,500 | 37.12766 | 79 | 0.57394 | false |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/nnvm/tests/python/compiler/test_fold_axis.py | 1 | 5980 | """Unittest cases for fold_axis"""
import nnvm
import nnvm.testing.resnet
import numpy as np
from nnvm import symbol as sym
from nnvm.compiler import graph_util, graph_attr
def test_fold_axis_conv():
# Before simplify
def before(x, conv_weight, conv_bias, in_scale, out_scale, channels):
x = x * sym.expand_dims(in_scale, axis=1, num_newaxis=2)
y = sym.conv2d(x, conv_weight, conv_bias,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
name="conv")
y = sym.relu(y)
y = y * sym.expand_dims(out_scale, axis=1, num_newaxis=2)
return y
def expected(x, conv_weight, conv_bias, in_scale, out_scale, channels):
conv_weight = conv_weight * sym.expand_dims(out_scale, axis=1, num_newaxis=3)
conv_weight = conv_weight * sym.expand_dims(in_scale, axis=1, num_newaxis=2)
conv_bias = conv_bias * out_scale
y = sym.conv2d(x,
conv_weight,
conv_bias,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
name="conv")
y = sym.relu(y)
return y
def check(shape, channels):
x = sym.Variable("x") + 1
weight = sym.Variable("weight")
bias = sym.Variable("bias")
in_scale = sym.Variable("in_scale")
out_scale = sym.Variable("out_scale")
y1 = before(x, weight, bias, in_scale, out_scale, channels)
y2 = expected(x, weight, bias, in_scale, out_scale, channels)
ishape = {"x": shape, "out_scale": (channels,), "in_scale": (shape[1],)}
g1 = nnvm.graph.create(y1)
g2 = nnvm.graph.create(y2)
graph_attr.set_shape_inputs(g1, ishape)
g1 = g1.apply("InferShape").apply("FoldScaleAxis")
# assert graph equals as expected
graph_util.check_graph_equal(g1, g2)
check((2, 4, 10, 10), 2)
def test_fold_axis_depthwise_conv():
# Before simplify
def before(x, conv_weight, conv_bias, in_scale, out_scale, channels):
x = x * sym.expand_dims(in_scale, axis=1, num_newaxis=2)
y = sym.conv2d(x, conv_weight, conv_bias,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
groups=54,
name="depthiwise_conv")
y = sym.relu(y)
y = y * sym.expand_dims(out_scale, axis=1, num_newaxis=2)
return y
def expected(x, conv_weight, conv_bias, in_scale, out_scale, channels):
conv_weight = conv_weight * sym.expand_dims(out_scale, axis=1, num_newaxis=3)
conv_weight = conv_weight * sym.expand_dims(in_scale, axis=1, num_newaxis=3)
conv_bias = conv_bias * out_scale
y = sym.conv2d(x,
conv_weight,
conv_bias,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
groups=54,
name="depthiwise_conv")
y = sym.relu(y)
return y
def check(shape, channels):
x = sym.Variable("x") + 1
weight = sym.Variable("weight")
bias = sym.Variable("bias")
in_scale = sym.Variable("in_scale")
out_scale = sym.Variable("out_scale")
y1 = before(x, weight, bias, in_scale, out_scale, channels)
y2 = expected(x, weight, bias, in_scale, out_scale, channels)
ishape = {"x": shape, "out_scale": (channels,), "in_scale": (shape[1],)}
g1 = nnvm.graph.create(y1)
g2 = nnvm.graph.create(y2)
graph_attr.set_shape_inputs(g1, ishape)
g1 = g1.apply("InferShape").apply("FoldScaleAxis")
# assert graph equals as expected
graph_util.check_graph_equal(g1, g2)
check((1, 54, 63, 127), 54)
def test_fold_fail():
# Before simplify
def before(x, scale, channels):
y = sym.conv2d(x,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
name="conv")
y = y * sym.expand_dims(scale, axis=1, num_newaxis=1)
return y
def check(shape, channels):
x = sym.Variable("x")
bias = sym.Variable("bias")
scale = sym.Variable("scale")
y1 = before(x, scale, channels)
ishape = {"x": shape, "scale": (channels,), "bias": (channels,)}
g1 = nnvm.graph.create(y1)
graph_attr.set_shape_inputs(g1, ishape)
g2 = g1.apply("InferShape").apply("FoldScaleAxis")
# assert graph equals as expected
graph_util.check_graph_equal(g1, g2)
check((2, 10, 10, 10), 10)
def test_fold_resnet():
batch_size = 1
num_classes = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) +image_shape
net, params = nnvm.testing.resnet.get_workload(
batch_size=1, image_shape=image_shape)
ishape = {"data" : data_shape}
graph = nnvm.graph.create(net)
data = np.random.uniform(size=data_shape).astype("float32")
# Initial pass do shape type inference
shape, _ = graph_util.infer_shape(graph, **ishape)
ishape.update(zip(graph.index.input_names, shape))
def run_prune(graph, params, opt_level):
# Apply optimization
with nnvm.compiler.build_config(opt_level=0):
graph = nnvm.compiler.optimize(graph, ishape)
graph, params = nnvm.compiler.build_module.precompute_prune(graph, params)
params["data"] = data
return nnvm.compiler.build_module._run_graph(graph, params)
x = run_prune(graph, params, 0)
y = run_prune(graph, params, 3)
np.testing.assert_allclose(y[0].asnumpy(), x[0].asnumpy())
if __name__ == "__main__":
test_fold_resnet()
test_fold_axis_conv()
test_fold_fail()
test_fold_axis_depthwise_conv()
| apache-2.0 | -4,966,185,714,852,103,000 | 37.089172 | 85 | 0.550502 | false |
FairyDevicesRD/FairyMaCorpus | scripts/validate.py | 1 | 1535 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import codecs
import sys
def operation(inf, outf):
'''
Check
'''
err = False
annotations = set([])
for line in inf:
if line.startswith(";"):
continue
items = line[:-1].split("\t")
if len(items) == 0:
continue
# Only | and ? are allowed to use in ASCII characters.
annotation = items[0]
for char in annotation:
if ord(char) <= 128:
if char not in ['|', '?']:
outf.write("Illigal ASCII character: %s (%s)\n" % (char, ord(char)))
err = True
if annotation in annotations:
outf.write("Duplication: %s\n" % (annotation))
err = True
annotations.add(annotation)
return err
def main():
'''
Parse arguments
'''
oparser = argparse.ArgumentParser()
oparser.add_argument("-i", "--input", dest="input", default="-")
oparser.add_argument("-o", "--output", dest="output", default="-")
oparser.add_argument(
"--verbose", dest="verbose", action="store_true", default=False)
opts = oparser.parse_args()
if opts.input == "-":
inf = sys.stdin
else:
inf = codecs.open(opts.input, "r", "utf8")
if opts.output == "-":
outf = sys.stdout
else:
outf = codecs.open(opts.output, "w", "utf8")
err = operation(inf, outf)
if err:
sys.exit(-1)
if __name__ == '__main__':
main()
| apache-2.0 | -762,907,266,110,665,200 | 23.365079 | 88 | 0.517915 | false |
codebox/star-charts | diagram.py | 1 | 2486 |
from svg import Svg
import codecs
MARGIN_X=20
MARGIN_Y=60
MAGNIFICATION = 500
MIN_D = 1
MAX_D = 4
DIMMEST_MAG = 6
BRIGHTEST_MAG = -1.5
LABEL_OFFSET_X = 4
LABEL_OFFSET_Y = 3
FONT_SIZE=16
FONT_COLOUR='#167ac6'
TITLE_SIZE=16
TITLE_COLOUR='#000'
COORDS_SIZE=12
COORDS_COLOUR='#000'
STAR_COLOUR='#000'
CURVE_WIDTH = 0.1
CURVE_COLOUR = '#000'
class Diagram:
def __init__(self, title, area, star_data_list):
self.title = title
self.area = area
self.star_data_list = star_data_list
self.curves = []
self.border_min_x = self.border_min_y = self.border_max_x = self.border_max_y = None
def add_curve(self, curve_points):
self.curves.append(curve_points)
def _mag_to_d(self, m):
mag_range = DIMMEST_MAG - BRIGHTEST_MAG
m_score = (DIMMEST_MAG - m) / mag_range
r_range = MAX_D - MIN_D
return MIN_D + m_score * r_range
def _invert_and_offset(self, x, y):
return x + MARGIN_X, (self.star_data_list.max_y - y) + MARGIN_Y
def render_svg(self, outfile):
svg = Svg()
# add stars first
for star_data in self.star_data_list.data:
x, y = self._invert_and_offset(star_data.x, star_data.y)
svg.circle(x, y, self._mag_to_d(star_data.mag), STAR_COLOUR)
# next add labels
for star_data in self.star_data_list.data:
if star_data.label:
x, y = self._invert_and_offset(star_data.x, star_data.y)
d = self._mag_to_d(star_data.mag)
svg.text(x + LABEL_OFFSET_X + d/2, y + LABEL_OFFSET_Y, star_data.label, FONT_COLOUR, FONT_SIZE)
# next add curves
for curve_points in self.curves:
svg.curve([self._invert_and_offset(cp[0], cp[1]) for cp in curve_points], CURVE_WIDTH, CURVE_COLOUR)
# title
center_x = self.star_data_list.max_x/2 + MARGIN_X
svg.text(center_x, MARGIN_Y/2, self.title, TITLE_COLOUR, TITLE_SIZE, 'middle', 'underline')
# coords
chart_bottom_y = self.star_data_list.max_y + MARGIN_Y
svg.text(center_x, chart_bottom_y + MARGIN_Y/2, "Right Ascension: {}-{}".format(self.area.ra_min, self.area.ra_max), COORDS_COLOUR, COORDS_SIZE, 'middle')
svg.text(center_x, chart_bottom_y + MARGIN_Y/2 + COORDS_SIZE, "Declination: {}-{}".format(self.area.dec_min, self.area.dec_max), COORDS_COLOUR, COORDS_SIZE, 'middle')
codecs.open(outfile, 'w', 'utf-8').writelines(svg.to_list())
| mit | -7,223,114,317,514,982,000 | 30.871795 | 174 | 0.604988 | false |
avinassh/prawoauth2 | prawoauth2/PrawOAuth2Server.py | 1 | 3785 | #!/usr/bin/env python
import webbrowser
import tornado.ioloop
import tornado.web
__all__ = ['PrawOAuth2Server']
application = None
REDIRECT_URL = 'http://127.0.0.1:65010/authorize_callback'
SCOPES = ['identity', 'read']
REFRESHABLE = True
CODE = None
class AuthorizationHandler(tornado.web.RequestHandler):
def get(self):
global CODE
CODE = self.get_argument('code')
self.write('successful (:')
tornado.ioloop.IOLoop.current().stop()
class PrawOAuth2Server:
"""Creates an instance of `PrawOAuth2Server` which is responsible for
getting `access_token` and `refresh_token` given valid `app_key` and
`app_secret`. This is meant to be run once only.
:param reddit_client: An Instance of praw
:param app_key: App Secret (or also known as Client Id) of your
app. Find them here: https://www.reddit.com/prefs/apps/
:param app_secret: App Key (or also known as Client Secret) of your
app. Find them here: https://www.reddit.com/prefs/apps/
:param state: Some unique string which represents your client. You
could use `user_agent` which you used when creating the praw
instance.
:param scopes: List of scopes for OAuth. Default is `['identity']`.
https://praw.readthedocs.org/en/latest/pages/oauth.html#oauth-scopes
:param redirect_url: Redirect URL used in authorization process using
`PrawOAuth2Server`. Default is `http://127.0.0.1:9999/authorize_callback`
(which is recommended by praw).
:param refreshable: Boolean. Specifies whether you want `access_token`
to be refreshable or not. If it is set to `False` then you have to
use `PrawOAuth2Server` again to generate new `access_token`.
Default is `True`.
"""
def __init__(self, reddit_client, app_key, app_secret,
state, redirect_url=REDIRECT_URL, scopes=SCOPES,
refreshable=REFRESHABLE):
self.reddit_client = reddit_client
self.app_key = app_key
self.app_secret = app_secret
self.state = state
self.redirect_url = redirect_url
self.scopes = set(scopes)
self.refreshable = refreshable
self.code = None
self._set_app_info()
self._set_up_tornado()
def _set_app_info(self):
self.reddit_client.set_oauth_app_info(client_id=self.app_key,
client_secret=self.app_secret,
redirect_uri=self.redirect_url)
def _set_up_tornado(self):
global application
application = tornado.web.Application([
(r'/authorize_callback', AuthorizationHandler),
])
application.listen(65010)
def _get_auth_url(self):
return self.reddit_client.get_authorize_url(
state=self.state, scope=self.scopes,
refreshable=self.refreshable)
def start(self):
"""Starts the `PrawOAuth2Server` server. It will open the default
web browser and it will take you to Reddit's authorization page,
asking you to authorize your Reddit account(or account of the bot's)
with your app(or bot script). Once authorized successfully, it will
show `successful` message in web browser.
"""
global CODE
url = self._get_auth_url()
webbrowser.open(url)
tornado.ioloop.IOLoop.current().start()
self.code = CODE
def get_access_codes(self):
"""Returns the `access_token` and `refresh_token`. Obviously, this
method should be called after `start`.
:returns: A dictionary containing `access_token` and `refresh_token`.
"""
return self.reddit_client.get_access_information(code=self.code)
| mit | -6,661,762,104,879,166,000 | 35.747573 | 81 | 0.639102 | false |
d1m0/bap_play | lib/z3_embed/embedder.py | 1 | 8204 | from bap import disasm
from bap.adt import Visitor, visit
from ..util import flatten
from z3 import If, eq, Const, And, BitVecRef, ArrayRef, BitVecNumRef, \
BitVecVal, BitVecSort, Context
from re import compile
def boolToBV(boolExp, ctx):
return If(boolExp, BitVecVal(1, 1, ctx=ctx), BitVecVal(0, 1, ctx=ctx),
ctx=ctx)
def bvToBool(bvExp, ctx):
assert eq(bvExp.sort(), BitVecSort(1, ctx=ctx))
return bvExp == BitVecVal(1, 1, ctx)
def bitsToBil(bits, target='x86-64'):
return flatten([x.bil for x in disasm(bits.toBinStr(), arch=target)])
class Stack(list):
def push(self, arg):
return self.append(arg)
def z3Ids(z3Term):
if len(z3Term.children()) == 0:
if (isinstance(z3Term, BitVecRef) or
isinstance(z3Term, ArrayRef)) and \
not isinstance(z3Term, BitVecNumRef):
return set([(z3Term.decl().name(), z3Term.sort())])
else:
return set()
else:
return reduce(lambda acc, el: acc.union(z3Ids(el)),
z3Term.children(),
set())
ssaRE = compile("(.*)\.([0-9]*)")
initialRE = compile("(.*)\.initial*")
unknownRE = compile("unknown_[0-9]*")
def unssa(name):
m = ssaRE.match(name)
assert m
return (m.groups()[0], int(m.groups()[1]))
def isInitial(name):
return initialRE.match(name) is not None
def isUnknown(name):
return unknownRE.match(name) is not None
class StmtNode:
sId = 0
def __init__(self, parents):
self.mDef = {}
self.mSort = {}
self.mPrefix = ""
self.mCond = []
# Assert simpler tree structures - only 2-way branch/join from ifs
assert (len(parents) <= 2)
self.mParents = parents
self.mSplitSrc = None
self.mId = StmtNode.sId
StmtNode.sId += 1
def lookupDef(self, name, cache=False):
if name in self.mDef:
return self
elif len(self.mParents) == 1:
return self.mParents[0].lookupDef(name)
elif len(self.mParents) > 1:
defs = set([x.lookupDef(name) for x in self.mParents])
if (len(defs) == 1):
# If all agree it hasn't been modified in some branch
return list(defs)[0]
else:
# name has been defined independently in different branches.
# Need a phi def here
# Make sure all definitions have the same sort
s = list(defs)[0].mSort[name]
for d in defs:
assert eq(s, d.mSort[name])
self.mDef[name] = defs
self.mSort[name] = s
return self
else:
return None
def cond(self, other):
if (self == other):
return []
elif (len(self.mParents) == 1):
c = self.mParents[0].cond(other)
elif (len(self.mParents) > 1):
c = self.mSplitSrc.cond(other)
else:
assert False, str(other) + " doesn't dominate " + str(self)
return c + self.mCond
def prefix(self):
if len(self.mParents) == 1:
return self.mParents[0].prefix() + self.mPrefix
elif len(self.mParents) > 1:
return self.mSplitSrc.prefix() + self.mPrefix
else:
return self.mPrefix
def ssa(self, name):
return name + self.prefix() + "." + str(self.mId)
class StmtDef(StmtNode):
def __init__(self, parents, **kwArgs):
StmtNode.__init__(self, parents)
self.mDef = kwArgs
self.mSort = {k: v.sort() for (k, v) in kwArgs.iteritems()}
class StmtBranch(StmtNode):
def __init__(self, parent, cond, prefix):
StmtNode.__init__(self, [parent])
self.mCond = [cond]
self.mPrefix = prefix
class StmtJoin(StmtNode):
def __init__(self, parents, splitSrc):
StmtNode.__init__(self, parents)
self.mSplitSrc = splitSrc
class Z3Embedder(Visitor):
""" Z3 BIL Visitor. Entry points correpsond to
the ADTs defined in the bap.bil module
"""
def __init__(self, ctx):
Visitor.__init__(self)
self.mStack = Stack()
self.mNodeMap = {}
self.mCtx = ctx
initialState = {name: Const(name + ".initial", sort)
for name, sort in self.arch_state()}
self.mRoot = StmtDef([], **initialState)
self.mScope = self.mRoot
self.mNodeMap = {self.mScope.mId: self.mScope}
self.mNumUnknowns = 0
def getFreshUnknown(self, typ):
newUnknown = "unknown_" + str(self.mNumUnknowns)
z3Unknown = Const(newUnknown, typ)
self.mScope.mDef[newUnknown] = z3Unknown
self.mScope.mSort[newUnknown] = typ
self.mNumUnknowns += 1
return z3Unknown
def pushScope(self, **kwArgs):
if (len(kwArgs) == 0):
raise TypeError("Can't push a scope unless we modify some vars")
self.mScope = StmtDef([self.mScope], **kwArgs)
self.mNodeMap[self.mScope.mId] = self.mScope
def pushBranchScope(self, prefix, cond, fromScope):
self.mScope = StmtBranch(fromScope, cond, prefix)
self.mNodeMap[self.mScope.mId] = self.mScope
def pushJoinScope(self, left, right, split):
self.mScope = StmtJoin([left, right], split)
self.mNodeMap[self.mScope.mId] = self.mScope
def popScope(self):
# Can only pop Def scopes (related to Let exprs)
assert len(self.mScope.mParents) == 1 and\
isinstance(self.mScope, StmtDef)
res = self.mScope
self.mScope = self.mScope.mParents[0]
return res
def lookupNode(self, id):
try:
return self.mNodeMap[id]
except KeyError, e:
print self.mNodeMap
raise e
def lookup(self, name):
defNode = self.mScope.lookupDef(name)
if (defNode):
return (defNode.ssa(name), defNode.mSort[name])
else:
return (name, None)
def scopeMarker(self):
return self.mScope
def extract_one(self, node, name, sort, emitted):
if (node, name) in emitted:
return []
ssaName = node.ssa(name)
defn = node.mDef[name]
ctx = self.mCtx
asserts = []
if (isinstance(defn, set)):
asserts.extend(reduce(
lambda acc, nd: acc + self.extract_one(nd, name, sort,
emitted),
defn, []))
baseDef = [x for x in defn if len(x.cond(self.mRoot)) == 0]
assert len(baseDef) == 1
baseDef = baseDef[0]
otherDefs = filter(lambda x: x != baseDef, defn)
z3Val = reduce(
lambda exp, d: If(And(*(d.cond(self.mRoot) + [ctx])),
Const(d.ssa(name), sort),
exp),
otherDefs,
Const(baseDef.ssa(name), sort))
else:
for (id, idSort) in z3Ids(defn):
if isInitial(id) or isUnknown(id):
# Initial values and unknowns are not defined in
# any scope
continue
unssaName, ssaId = unssa(id)
defnNode = self.lookupNode(ssaId)
asserts.extend(self.extract_one(defnNode,
unssaName, idSort, emitted))
z3Val = defn
asserts.append(Const(ssaName, sort) == z3Val)
emitted.add((node, name))
return asserts
def extract(self):
asserts = []
emitted = set()
for (name, sort) in self.arch_state():
asserts.extend(self.extract_one(self.mScope.lookupDef(name),
name, sort, emitted))
return asserts
def arch_state(self):
raise Exception("Abstract")
def embed(bil, visitor_class):
visitor = visitor_class(Context())
visit(visitor, bil)
assert len(visitor.mStack) == 0
return visitor.extract()
| gpl-3.0 | 1,772,084,412,615,283,500 | 29.498141 | 76 | 0.53998 | false |
teamfruit/defend_against_fruit | defend_against_fruit/daf_fruit_dist/daf_fruit_dist/tests/checksum_dependency_helper_tests.py | 1 | 3031 | from collections import namedtuple
from nose.tools import eq_, raises
from pip.exceptions import DistributionNotFound
from requests import RequestException
from daf_fruit_dist.checksum_dependency_helper import ChecksumDependencyHelper
def found_files_and_checksums_test():
"""
Verify that finding a package and its associated checksums results
in those checksums being returned.
"""
TestContext(
determine_file_path_succeeds=True,
determine_checksums_succeeds=True,
expected_checksums=checksums_found).run()
def failed_to_find_file_test():
"""
Verify that failing to find a package results in None being returned
for each checksum.
"""
TestContext(
determine_file_path_succeeds=False,
determine_checksums_succeeds=False,
expected_checksums=checksums_not_found).run()
@raises(RequestException)
def found_file_but_not_checksums_test():
"""
Verify that successfully finding a package but not its associated
checksums results in an exception.
"""
TestContext(
determine_file_path_succeeds=True,
determine_checksums_succeeds=False,
checksum_lookup_exception=RequestException).run()
###############################################################################
######################################################### Test Data and Helpers
Checksums = namedtuple('Hashes', ('md5', 'sha1'))
checksums_found = Checksums(md5='MD5', sha1='SHA1')
checksums_not_found = Checksums(md5=None, sha1=None)
class TestContext(object):
def __init__(
self,
determine_file_path_succeeds,
determine_checksums_succeeds,
expected_checksums=None,
checksum_lookup_exception=Exception):
self.__checksums = expected_checksums
self.__checksum_lookup_exception = checksum_lookup_exception
if determine_file_path_succeeds:
self.__determine_file_path_fn = lambda pkg_name, pkg_version: None
else:
def fn(pkg_name, pkg_version):
raise DistributionNotFound()
self.__determine_file_path_fn = fn
if determine_checksums_succeeds:
self.__determine_checksums_fn = (
lambda dependency_path: self.__checksums)
else:
def fn(dependency_path):
raise self.__checksum_lookup_exception()
self.__determine_checksums_fn = fn
def __verify_checksums(self, actual_md5, actual_sha1):
eq_(actual_md5, self.__checksums.md5)
eq_(actual_sha1, self.__checksums.sha1)
def run(self):
checksum_dependency_helper = ChecksumDependencyHelper(
determine_file_path_fn=self.__determine_file_path_fn,
determine_checksums_from_file_path_fn=
self.__determine_checksums_fn)
actual_md5, actual_sha1 = checksum_dependency_helper(
artifact_id=None,
version=None)
self.__verify_checksums(actual_md5, actual_sha1)
| apache-2.0 | 7,144,934,150,456,840,000 | 32.307692 | 79 | 0.627516 | false |
eguil/ENSO_metrics | scripts/driverCCollection_testMC3.py | 1 | 27073 | from cdms2 import open as CDMS2open
from os.path import join as join_path
from os import environ
from sys import exit
#from EnsoMetrics.EnsoCollectionsLib import CmipVariables, defCollection, ReferenceObservations
#from EnsoMetrics.EnsoComputeMetricsLib import ComputeCollection
from EnsoCollectionsLib import CmipVariables, defCollection, ReferenceObservations
from EnsoComputeMetricsLib import ComputeCollection
xmldir = environ['XMLDIR']
def find_xml(name, frequency, variable, project='', experiment='', ensemble='', realm=''):
list_obs = ReferenceObservations().keys()
if name in list_obs:
file_name, file_area, file_land = find_xml_obs(name, frequency, variable)
else:
file_name, file_area, file_land = find_xml_cmip(name, project, experiment, ensemble, frequency, realm, variable)
return file_name, file_area, file_land
def find_xml_cmip(model, project, experiment, ensemble, frequency, realm, variable):
file_name = join_path(xmldir, str(model) + '_' + str(project) + '_' + str(experiment) + '_' + str(ensemble) +
'_glob_' + str(frequency) + '_' + str(realm) + '.xml')
xml = CDMS2open(file_name)
listvar1 = sorted(xml.listvariables())
if variable not in listvar1:
if realm == 'O':
new_realm = 'A'
elif realm == 'A':
new_realm = 'O'
# if var is not in realm 'O' (for ocean), look for it in realm 'A' (for atmosphere)
file_name = join_path(xmldir, str(model) + '_' + str(project) + '_' + str(experiment) + '_' + str(ensemble) +
'_glob_' + str(frequency) + '_' + str(new_realm) + '.xml')
xml = CDMS2open(file_name)
listvar2 = sorted(xml.listvariables())
if variable not in listvar2:
print '\033[95m' + str().ljust(5) + "CMIP var " + str(variable) + " cannot be found (realm A and O)"\
+ '\033[0m'
print '\033[95m' + str().ljust(10) + "file_name = " + str(file_name) + '\033[0m'
print '\033[95m' + str().ljust(10) + "variables = " + str(listvar1) + '\033[0m'
print '\033[95m' + str().ljust(10) + "AND" + '\033[0m'
print '\033[95m' + str().ljust(10) + "variables = " + str(listvar2) + '\033[0m'
exit(1)
file_area, file_land = find_xml_fx(model, project=project, experiment=experiment, realm=new_realm)
else:
file_area, file_land = find_xml_fx(model, project=project, experiment=experiment, realm=realm)
return file_name, file_area, file_land
def find_xml_fx(name, project='', experiment='', realm=''):
list_obs = ReferenceObservations().keys()
if name in list_obs:
file_area = join_path(xmldir, 'obs_' + str(name) + '_glob_fx_O_areacell.xml')
file_land = join_path(xmldir, 'obs_' + str(name) + '_glob_fx_O_landmask.xml')
else:
file_area = join_path(xmldir, str(name) + '_' + str(project) + '_' + str(experiment) + '_r0i0p0_glob_fx_'
+ str(realm) + '_areacell.xml')
file_land = join_path(xmldir, str(name) + '_' + str(project) + '_' + str(experiment) + '_r0i0p0_glob_fx_'
+ str(realm) + '_landmask.xml')
try: xml = CDMS2open(file_area)
except: file_area = None
try: xml = CDMS2open(file_land)
except: file_land = None
return file_area, file_land
def find_xml_obs(obs, frequency, variable):
file_name = join_path(xmldir, 'obs_' + str(obs) + '_glob_' + str(frequency) + '_O.xml')
xml = CDMS2open(file_name)
listvar1 = sorted(xml.listvariables())
if variable not in listvar1:
print '\033[95m' + str().ljust(5) + "obs var " + str(variable) + " cannot be found" + '\033[0m'
print '\033[95m' + str().ljust(10) + "file_name = " + str(file_name) + '\033[0m'
print '\033[95m' + str().ljust(10) + "variables = " + str(listvar1) + '\033[0m'
exit(1)
file_area, file_land = find_xml_fx(obs)
return file_name, file_area, file_land
# metric collection
mc_name = 'ENSO_proc'#'EVAL_IPSL'#'ENSO_perf'#'ENSO_tel'#'MC1'#
dict_mc = defCollection(mc_name)
list_metric = sorted(dict_mc['metrics_list'].keys())
# parameters
project = 'CMIP5'
experiment = 'historical'
ensemble = 'r1i1p1'
frequency = 'mon'
realm = 'A'
# list of variables
list_variables = list()
for metric in list_metric:
listvar = dict_mc['metrics_list'][metric]['variables']
for var in listvar:
if var not in list_variables:
list_variables.append(var)
list_variables = sorted(list_variables)
print '\033[95m' + str(list_variables) + '\033[0m'
# list of observations
list_obs = list()
for metric in list_metric:
dict_var_obs = dict_mc['metrics_list'][metric]['obs_name']
for var in dict_var_obs.keys():
for obs in dict_var_obs[var]:
if obs not in list_obs:
list_obs.append(obs)
list_obs = sorted(list_obs)
if mc_name == 'MC1':
list_obs = ['Tropflux']
elif mc_name == 'ENSO_perf':
list_obs = ['ERA-Interim']#['Tropflux','GPCPv2.3']#['HadISST']#['HadISST','Tropflux','GPCPv2.3']#
elif mc_name == 'ENSO_tel':
list_obs = ['ERA-Interim']#['HadISST','GPCPv2.3']
elif mc_name == 'ENSO_proc':
list_obs = ['AVISO', 'ERA-Interim', 'Tropflux']#['Tropflux', 'ERA-Interim', 'SODA3.4.2']#['ERA-Interim', 'SODA3.4.2']#['HadISST','GPCPv2.3']
elif mc_name == 'EVAL_IPSL':
list_obs = ['ERA-Interim']#
print '\033[95m' + str(list_obs) + '\033[0m'
#
# finding file and variable name in file for each observations dataset
#
dict_obs = dict()
for obs in list_obs:
# @jiwoo: be sure to add your datasets to EnsoCollectionsLib.ReferenceObservations if needed
dict_var = ReferenceObservations(obs)['variable_name_in_file']
dict_obs[obs] = dict()
for var in list_variables:
#
# finding variable name in file
#
# @jiwoo: correct / adapt the 'varname' in
# EnsoCollectionsLib.ReferenceObservations(obs)['variable_name_in_file'][var] if it is not correct or if you
# changed a name in the xml
# I usually alias the variable names from observations and models in the xml in order to have the same name
# for sst (or any other variable) in every xml. This way I don not need to go through this function to know the
# variable name in file
try:
var_in_file = dict_var[var]['var_name']
except:
print '\033[95m' + str(var) + " is not available for " + str(obs) + " or unscripted" + '\033[0m'
else:
try:
areacell_in_file = dict_var['areacell']['var_name']
except:
areacell_in_file = None
try:
landmask_in_file = dict_var['landmask']['var_name']
except:
landmask_in_file = None
if isinstance(var_in_file, list):
list_areacell, list_files, list_landmask, list_name_area, list_name_land = \
list(), list(), list(), list(), list()
for var1 in var_in_file:
file_name, file_areacell, file_landmask = find_xml(obs, frequency, var1)
list_files.append(file_name)
list_areacell.append(file_areacell)
list_name_area.append(areacell_in_file)
list_landmask.append(file_landmask)
list_name_land.append(landmask_in_file)
else:
file_name, file_areacell, file_landmask = find_xml(obs, frequency, var_in_file)
list_files = file_name
list_areacell = file_areacell
list_name_area = areacell_in_file
list_landmask = file_landmask
list_name_land = landmask_in_file
dict_obs[obs][var] = {'path + filename': list_files, 'varname': var_in_file,
'path + filename_area': list_areacell, 'areaname': list_name_area,
'path + filename_landmask': list_landmask, 'landmaskname': list_name_land}
# models
list_models = ['IPSL-CM5B-LR']#['CNRM-CM5']#['IPSL-CM5B-LR']#['CNRM-CM5','IPSL-CM5B-LR']#
ens = 'r1i1p1'
#
# finding file and variable name in file for each observations dataset
#
dict_metric, dict_dive = dict(), dict()
dict_var = CmipVariables()['variable_name_in_file']
for mod in list_models:
dict_mod = {mod: {}}
# ------------------------------------------------
# @jiwoo: between these dash the program is a bit ad hoc...
# it works well for me because I am looking for sst and taux on the ocean grid, and fluxes [lhf, lwr, swr, shf, thf]
# on the atmosphere grid
# if you want to use atmosphere only, do not use this or create your own way to find the equivalent between the
# variable name in the program and the variable name in the file
for var in list_variables:
#
# finding variable name in file
#
var_in_file = dict_var[var]['var_name']
try:
areacell_in_file = dict_var['areacell']['var_name']
except:
areacell_in_file = None
try:
landmask_in_file = dict_var['landmask']['var_name']
except:
landmask_in_file = None
if isinstance(var_in_file, list):
list_areacell, list_files, list_landmask, list_name_area, list_name_land = \
list(), list(), list(), list(), list()
for var1 in var_in_file:
file_name, file_areacell, file_landmask = \
find_xml(mod, frequency, var1, project=project, experiment=experiment, ensemble=ens,
realm=realm)
list_files.append(file_name)
list_areacell.append(file_areacell)
list_name_area.append(areacell_in_file)
list_landmask.append(file_landmask)
list_name_land.append(landmask_in_file)
else:
file_name, file_areacell, file_landmask = \
find_xml(mod, frequency, var_in_file, project=project, experiment=experiment, ensemble=ens,
realm=realm)
list_files = file_name
list_areacell = file_areacell
list_name_area = areacell_in_file
list_landmask = file_landmask
list_name_land = landmask_in_file
dict_mod[mod][var] = {'path + filename': list_files, 'varname': var_in_file,
'path + filename_area': list_areacell, 'areaname': list_name_area,
'path + filename_landmask': list_landmask, 'landmaskname': list_name_land}
# dictionary needed by nsoMetrics.ComputeMetricsLib.ComputeCollection
# @jiwoo the ComputeCollection function it still on development and it does not read the observations requirement
# defined in the metric collection, i.e., defCollection(mc_name)['metrics_list']['<metric name>']['obs_name']
# so the function does not take a specific obs to compute the metric so for every obs in 'dict_obs' we must include
# every variables needed by the metric collection [lhf, lwr, swr, shf, sst, taux, thf] even if its coming from
# another dataset
dictDatasets = {'model': dict_mod, 'observations': dict_obs}
# regridding dictionary (only if you want to specify the regridding)
dict_regrid = {}
# dict_regrid = {
# 'regridding': {
# 'model_orand_obs': 2, 'regridder': 'cdms', 'regridTool': 'esmf', 'regridMethod': 'linear',
# 'newgrid_name': 'generic 1x1deg'},
# }
# Computes the metric collection
#dict_metric[mod] = ComputeCollection(mc_name, dictDatasets, user_regridding=dict_regrid, debug=False)
#dict_metric[mod], dict_dive[mod] = ComputeCollection(mc_name, dictDatasets, user_regridding=dict_regrid,
# debug=False, dive_down=True)
netcdf_path = '/Users/yannplanton/Documents/Yann/Fac/2016_2018_postdoc_LOCEAN/data/Test'
netcdf_name = 'YANN_PLANTON_' + mc_name + '_' + mod
netcdf = join_path(netcdf_path, netcdf_name)
# dict_metric[mod], dict_dive[mod] = ComputeCollection(mc_name, dictDatasets, netcdf=True, netcdf_name=netcdf, debug=True, dive_down=True)
# dict_metric[mod], dict_dive[mod] = ComputeCollection(mc_name, dictDatasets, netcdf=True, netcdf_name=netcdf, debug=True)
dict_metric[mod], dict_dive[mod] = ComputeCollection(mc_name, dictDatasets, mod, netcdf=True, netcdf_name=netcdf, debug=True)
tmp = sorted(dict_metric[mod]['value'].keys(), key=lambda v: v.upper())
for kk in tmp:
print kk.ljust(13) + ': ' + str(dict_metric[mod]['value'][kk]['metric'])
stop
# Prints the metrics values
for ii in range(3): print ''
print '\033[95m' + str().ljust(5) + str(mod) + '\033[0m'
list_metric = dict_metric[mod]['value'].keys()
for metric in list_metric:
print '\033[95m' + str().ljust(10) + str(metric) + '\033[0m'
metric_dict = dict_metric[mod]['value'][metric]['metric']
for ref in metric_dict.keys():
print '\033[95m' + str().ljust(15) + 'metric: ' + str(ref) + ' value = ' + str(metric_dict[ref]['value'])\
+ ', error = ' + str(metric_dict[ref]['value_error']) + '\033[0m'
if 'value2' in metric_dict[ref].keys():
print '\033[95m' + str().ljust(15) + 'metric: ' + str(ref) + ' value = ' +\
str(metric_dict[ref]['value2']) + ', error = ' + str(metric_dict[ref]['value_error2']) + '\033[0m'
if 'value3' in metric_dict[ref].keys():
print '\033[95m' + str().ljust(15) + 'metric: ' + str(ref) + ' value = ' + \
str(metric_dict[ref]['value3']) + ', error = ' + str(metric_dict[ref]['value_error3']) + '\033[0m'
# Plot
#stop
#if ' ':
for mod in list_models:
from numpy import arange as NUMPYarange
from cdms2 import createAxis as CDMS2createAxis
from MV2 import array as MV2array
from MV2 import masked_where as MV2masked_where
from MV2 import maximum as MV2maximum
from MV2 import minimum as MV2minimum
import plot_frame as PFRAME
import plot_functions as PF
path_plot = '/Users/yannplanton/Documents/Yann/Fac/2016_2018_postdoc_LOCEAN/data/Plots'
#if ' ':
if ' ':
for metric in list_metric:
print '\033[95m' + str().ljust(10) + str(metric) + '\033[0m'
metric_dict = dict_metric[mod]['value'][metric]['metric']
# metric
dict_m1, dict_m2, dict_m3 = dict(), dict(), dict()
for ref in metric_dict.keys():
dict_m1[ref] = metric_dict[ref]['value']
if 'value2' in metric_dict[ref].keys():
dict_m2[ref] = metric_dict[ref]['value2']
if 'value3' in metric_dict[ref].keys():
dict_m3[ref] = metric_dict[ref]['value3']
# dive down
dive_model = dict_dive[mod]['value'][metric]['model']
if metric in ['EnsoPrMap', 'EnsoSstMap']:
tmp_dive, tmp_axis = dict(), dict()
for ref in dict_dive[mod]['value'][metric].keys():
if ref != 'model':
tmp_dive['ref_' + ref] = dict_dive[mod]['value'][metric][ref]
tmp1 = dict_dive[mod]['metadata']['metrics'][metric][ref]['axisLat']
axis1 = CDMS2createAxis(MV2array(tmp1), id='latitude')
tmp2 = dict_dive[mod]['metadata']['metrics'][metric][ref]['axisLon']
axis2 = CDMS2createAxis(MV2array(tmp2), id='longitude')
tmp_axis['ref_' + ref] = [axis1, axis2]
del axis1, axis2, tmp1, tmp2
# plot
x_axis, inc = [0, 360], 60
x_dict = dict((ii, str(ii) + 'E') if ii <= 180 else (ii, str(abs(ii-360)) + 'W') for ii in
range(x_axis[0], x_axis[1] + inc, inc))
y_axis, inc = [-60, 60], 20
y_dict = dict((ii, str(abs(ii)) + 'S') if ii < 0 else ((ii, str(ii) + 'N') if ii>0 else (ii, 'Eq')) for
ii in range(y_axis[0], y_axis[1] + inc, inc))
dom = (y_axis[0], y_axis[1], x_axis[0], x_axis[1])
if metric in ['EnsoPrMap']:
label_col = MV2array(range(-3, 3 + 1, 1))
elif metric in ['EnsoSstMap']:
label_col = MV2array([round(ii, 1) for ii in NUMPYarange(-1.2, 1.2 + 0.4, 0.4)])
for ref in dict_m1.keys():
tab1 = MV2array(dive_model)
tab1.setAxisList(tmp_axis[ref])
m1 = 'Metric 1: ' + str("%.2f" % round(dict_m1[ref], 2))
m2 = 'Metric 2: ' + str("%.2f" % round(dict_m2[ref], 2))
m3 = 'Metric 3: ' + str("%.2f" % round(dict_m3[ref], 2))
tab2 = MV2array(tmp_dive[ref])
tab2.setAxisList(tmp_axis[ref])
print str().ljust(10) + 'range = ' + str("%.2f" % round(min(MV2minimum(tab1),MV2minimum(tab2)), 2))\
+ ' ' + str("%.2f" % round(max(MV2maximum(tab1),MV2maximum(tab2)), 2))
name = metric + ' in Historical (' + mod + ')'
name_png = path_plot + '/' + metric + '_' + mod
PFRAME.plot_my_map(tab1, label_col, dom, white_zero=0, x_dico=x_dict, y_dico=y_dict, name=name,
path_plus_name_png=name_png, bg=1)
name = metric + ' in Historical (' + ref + ')'
name_png = path_plot + '/' + metric + '_' + ref
PFRAME.plot_my_map(tab2, label_col, dom, white_zero=0, x_dico=x_dict, y_dico=y_dict, name=name,
path_plus_name_png=name_png, bg=1)
del m1, m2, m3, name, name_png, tab1, tab2
elif metric in ['EnsoPrJjaTel', 'EnsoPrNdjTel']:
tmp_dive, tmp_axis = dict(), dict()
for ref in dict_dive[mod]['value'][metric].keys():
if ref != 'model':
tmp_dive['ref_' + ref] = dict_dive[mod]['value'][metric][ref]
tmp_axis['ref_' + ref] = dict_dive[mod]['metadata']['metrics'][metric][ref]['axis']
# plot
y_axis, inc = [-2.0, 6.2], 0.2
y_dict = dict((round(elt, 1), "{0:.1f}".format(round(elt, 1)))
if (round(elt, 1) * 10) % round(5 * round(inc, 1) * 10, 1) == 0
else (round(elt, 1), '') for elt in NUMPYarange(y_axis[0], y_axis[1] + inc, inc))
for ref in dict_m1.keys():
axis = CDMS2createAxis(MV2array(range(len(tmp_axis[ref])), dtype='int32'), id='regions')
x_dict = dict((elt, tmp_axis[ref][elt]) for elt in range(len(tmp_axis[ref])))
x_axis = [-1.0, len(tmp_axis[ref])]
tab1 = MV2array(dive_model)
tab1.setAxisList([axis])
m1 = 'Metric 1: ' + str("%.2f" % round(dict_m1[ref], 2))
m2 = 'Metric 2: ' + str("%.1f" % round(dict_m2[ref]*100, 1))
tab2 = MV2array(tmp_dive[ref])
tab2.setAxisList([axis])
print str().ljust(10) + 'range = ' + str("%.2f" % round(min(MV2minimum(tab1),MV2minimum(tab2)), 2))\
+ ' ' + str("%.2f" % round(max(MV2maximum(tab1),MV2maximum(tab2)), 2))
list_curve = [tab1, tab2]
list_col = ['black', 'red']
# strings to write
l_w = [m1, m2]
l_w_xy = [[97, 100 - (ii + 1) * 6] for ii in range(len(l_w))]
l_w_si = [30 for ii in range(len(l_w))]
l_w_ha = ['right' for ii in range(len(l_w))]
# lines to plot
lines_y1y2 = [[round(ii, 1), round(ii, 1)] for ii in y_dict.keys() if y_dict[ii] != '' and
round(ii, 1) != 0 and round(ii, 1) not in y_axis]
lines_x1x2 = [x_axis for ii in range(len(lines_y1y2))]
lines_colo = ['grey' for ii in range(len(lines_y1y2))]
name = metric + ' metric in Historical (' + mod + ')'
yname = 'El Nino (PR) minus La Nina (PR)'
name_png = path_plot + '/' + metric + '_' + mod + '_ ' + ref
PFRAME.curves_plot(list_curve, list_col=list_col, x_axis=x_axis, x_dico=x_dict, y_axis=y_axis,
y_dico=y_dict, name_in_xlabel=True, name=name, xname='', yname=yname,
list_writings=l_w, list_writings_pos_xy=l_w_xy, list_writings_size=l_w_si,
list_writings_halign=l_w_ha, plot_lines=True, lines_x1x2=lines_x1x2,
lines_y1y2=lines_y1y2, lines_color=lines_colo, path_plus_name_png=name_png,
draw_white_background=True, save_ps=False, bg=1)
del l_w, l_w_ha, l_w_si, l_w_xy, lines_colo, lines_x1x2, lines_y1y2, list_curve, m1,\
m2, name, name_png, yname
elif metric in ['BiasSstLonRmse', 'BiasSstSkLonRmse', 'SeasonalSstLonRmse', 'NinaSstTsRmse',
'NinoSstTsRmse']:
tmp_dive, tmp_axis = dict(), dict()
for ref in dict_dive[mod]['value'][metric].keys():
if ref != 'model':
tmp_dive['ref_' + ref] = dict_dive[mod]['value'][metric][ref]
tmp_axis['ref_' + ref] = dict_dive[mod]['metadata']['metrics'][metric][ref]['axis']
# plot
for ref in dict_m1.keys():
axis = CDMS2createAxis(MV2array(tmp_axis[ref], dtype='float32'), id='axis')
tab1 = MV2array(dive_model)
tab1.setAxisList([axis])
tab1 = MV2masked_where(tab1>=1e20, tab1)
if metric in ['BiasSstLonRmse', 'BiasSstSkLonRmse', 'NinoSstLonRmse', 'SeasonalSstLonRmse']:
inc = 30
if min(axis[:])<0:
x_axis = [-250, -70]
tmp = [x_axis[0]+10, x_axis[1]-10]
x_dict = dict((ii, str(ii + 360) + 'E') if ii < -180 else (ii, str(abs(ii)) + 'W') for ii in
range(tmp[0], tmp[1] + inc, inc))
else:
x_axis = [110, 290]
tmp = [x_axis[0] + 10, x_axis[1] - 10]
x_dict = dict((ii, str(ii) + 'E') if ii < 180 else (ii, str(abs(ii - 360)) + 'W') for ii in
range(tmp[0], tmp[1] + inc, inc))
elif metric in ['NinoSstTsRmse']:
x_axis, inc = [-1, len(axis)], 1
tmp = ['M', 'J', 'S', 'D']
x_dict = dict((ii, tmp[(((ii + 1) / 3) % 4) - 1]) if (ii + 1) % 3 == 0 else (ii, '') for ii in
range(x_axis[0], x_axis[1] + inc, inc))
m1 = 'Metric: ' + str("%.2f" % round(dict_m1[ref], 2))
tab2 = MV2array(tmp_dive[ref])
tab2.setAxisList([axis])
tab2 = MV2masked_where(tab2 >= 1e20, tab2)
print str().ljust(10) + 'range = ' + str("%.2f" % round(min(MV2minimum(tab1),MV2minimum(tab2)), 2))\
+ ' ' + str("%.2f" % round(max(MV2maximum(tab1),MV2maximum(tab2)), 2))
y_axis, y_dict = PF.create_dico([min(MV2minimum(tab1),MV2minimum(tab2)),
max(MV2maximum(tab1),MV2maximum(tab2))])
list_curve = [tab1, tab2]
list_col = ['black', 'red']
# strings to write
l_w = [m1]
l_w_xy = [[97, 100 - (ii + 1) * 6] for ii in range(len(l_w))]
l_w_si = [30 for ii in range(len(l_w))]
l_w_ha = ['right' for ii in range(len(l_w))]
# lines to plot
lines_y1y2 = [[round(ii, 1), round(ii, 1)] for ii in y_dict.keys() if y_dict[ii] != '' and
round(ii, 1) != 0 and round(ii, 1) not in y_axis]
lines_x1x2 = [x_axis for ii in range(len(lines_y1y2))]
if metric in ['BiasSstLonRmse', 'BiasSstSkLonRmse', 'NinoSstLonRmse', 'SeasonalSstLonRmse']:
xname = 'longitude'
lines_x1x2 = lines_x1x2 + [[ii, ii] for ii in x_dict.keys() if x_dict[ii] != '' and ii != 0
and ii not in x_axis]
lines_y1y2 = lines_y1y2 + [y_axis for ii in x_dict.keys() if x_dict[ii] != '' and ii != 0
and ii not in x_axis]
elif metric in ['NinoSstTsRmse']:
xname = 'time'
lines_x1x2 = lines_x1x2 + [[ii, ii] for ii in x_dict.keys() if (ii + 1) % 12 == 0 and ii != 0
and ii not in x_axis]
lines_y1y2 = lines_y1y2 + [y_axis for ii in x_dict.keys() if (ii + 1) % 12 and ii != 0
and ii not in x_axis]
lines_colo = ['grey' for ii in range(len(lines_y1y2))]
name = metric + ' metric (' + mod + ')'
print metric, mod, ref
name_png = path_plot + '/' + metric + '_' + mod + '_ ' + ref
if metric in ['NinoSstLonRmse', 'NinoSstTsRmse', 'SeasonalSstLonRmse']:
yname = 'SSTA (degC)'
elif metric in ['BiasSstLonRmse', 'BiasSstSkLonRmse']:
yname = 'SST (degC)'
PFRAME.curves_plot(list_curve, list_col=list_col, x_axis=x_axis, x_dico=x_dict, y_axis=y_axis,
y_dico=y_dict, name_in_xlabel=False, name=name, xname=xname, yname=yname,
list_writings=l_w, list_writings_pos_xy=l_w_xy, list_writings_size=l_w_si,
list_writings_halign=l_w_ha, plot_lines=True, lines_x1x2=lines_x1x2,
lines_y1y2=lines_y1y2, lines_color=lines_colo, path_plus_name_png=name_png,
draw_white_background=True, save_ps=False, bg=1)
del l_w, l_w_ha, l_w_si, l_w_xy, lines_colo, lines_x1x2, lines_y1y2, list_curve, m1,\
name, name_png, xname, yname
| bsd-3-clause | -6,984,128,249,758,549,000 | 57.221505 | 144 | 0.518635 | false |
h3llrais3r/Auto-Subliminal | tests/core/test_item.py | 1 | 4529 | # coding=utf-8
import datetime
from autosubliminal.core.item import WantedItem
wanted_item = WantedItem()
wanted_item.timestamp = '2018-01-01 12:30:01'
def test_compare_wanted_items():
wanted_item_1 = WantedItem(type='episode', title='testequal', season=1, episode=1)
wanted_item_2 = WantedItem(type='episode', title='testequal', season=1, episode=1)
wanted_item_3 = WantedItem(type='episode', title='testdifferent', season=1, episode=1)
assert wanted_item_1 == wanted_item_2
assert wanted_item_1 != wanted_item_3
assert wanted_item_2 != wanted_item_3
def test_wanted_item_with_multi_episode():
# Example file: Marvels.Agents.of.S.H.I.E.L.D.S05E01-E02.720p.HDTV.x264-AVS.mkv
wanted_item_1 = WantedItem(type='episode', title='test', season=1, episode=[1, 2])
assert wanted_item_1.episode == [1, 2]
def test_wanted_item_with_multi_sources():
# Example file: Inferno.2016.1080p.WEB.BluRay.x264-[GROUP1.AG].mp4
wanted_item_1 = WantedItem(type='movie', title='test', source=['Web', 'Blu-ray'])
assert wanted_item_1.source == ['Web', 'Blu-ray']
def test_wanted_item_with_multi_codec():
# Example file: Code.37.S03E02.NL.VLAAMS.720p.HDTV.x264-SHOWGEMiST_xvid.avi
wanted_item_1 = WantedItem(type='episode', title='test', season=1, episode=1, codec=['H.264', 'Xvid'])
assert wanted_item_1.codec == ['H.264', 'Xvid']
def test_wanted_item_trim_release_group():
wanted_item_1 = WantedItem(type='episode', title='test', season=1, episode=1, releasegrp='KILLERS[rarbg]')
assert wanted_item_1.releasegrp == 'KILLERS'
def test_wanted_item_set_attr():
wanted_item.set_attr('languages', 'nl,en')
wanted_item.set_attr('season', '1')
wanted_item.set_attr('episode', '1,2')
wanted_item.set_attr('year', '2018')
wanted_item.set_attr('source', 'Web')
wanted_item.set_attr('quality', '720p')
wanted_item.set_attr('codec', 'H.264')
wanted_item.set_attr('tvdbid', '263365')
wanted_item.set_attr('unknown', 'unknown')
assert wanted_item.languages == ['nl', 'en']
assert wanted_item.season == 1
assert wanted_item.episode == [1, 2]
assert wanted_item.year == 2018
assert wanted_item.source == 'Web'
assert wanted_item.quality == '720p'
assert wanted_item.codec == 'H.264'
assert wanted_item.tvdbid == 263365
assert not hasattr(wanted_item, 'unknown')
def test_wanted_item_copy_to():
wanted_item_1 = WantedItem(type='episode', title='titl1', season=1, episode=1)
wanted_item_2 = WantedItem(type='episode', title='title2', season=2, episode=2, codec=2)
wanted_item_1.copy_to(wanted_item_2)
assert wanted_item_1 == wanted_item_2
def test_is_search_active_for_wanted_item_before_on_creation(monkeypatch, mocker):
monkeypatch.setattr('autosubliminal.CHECKSUBDEADLINE', 4)
monkeypatch.setattr('autosubliminal.CHECKSUBDELTA', 7)
today = datetime.datetime(2018, 1, 1, 0, 0, 0)
mocker.patch('autosubliminal.core.item.get_today', return_value=today)
assert wanted_item.is_search_active
def test_is_search_active_for_wanted_item_before_deadline(monkeypatch, mocker):
monkeypatch.setattr('autosubliminal.CHECKSUBDEADLINE', 4)
monkeypatch.setattr('autosubliminal.CHECKSUBDELTA', 7)
today = datetime.datetime(2018, 1, 2, 0, 0, 0)
mocker.patch('autosubliminal.core.item.get_today', return_value=today)
assert wanted_item.is_search_active
def test_is_search_active_for_wanted_item_on_deadline(monkeypatch, mocker):
monkeypatch.setattr('autosubliminal.CHECKSUBDEADLINE', 4)
monkeypatch.setattr('autosubliminal.CHECKSUBDELTA', 7)
today = datetime.datetime(2018, 1, 29, 0, 0, 0)
mocker.patch('autosubliminal.core.item.get_today', return_value=today)
assert wanted_item.is_search_active
def test_is_search_active_for_wanted_item_after_deadline(monkeypatch, mocker):
monkeypatch.setattr('autosubliminal.CHECKSUBDEADLINE', 4)
monkeypatch.setattr('autosubliminal.CHECKSUBDELTA', 7)
today = datetime.datetime(2018, 1, 30, 0, 0, 0)
mocker.patch('autosubliminal.core.item.get_today', return_value=today)
assert not wanted_item.is_search_active
def test_is_search_active_for_wanted_item_after_deadline_on_delta(monkeypatch, mocker):
monkeypatch.setattr('autosubliminal.CHECKSUBDEADLINE', 4)
monkeypatch.setattr('autosubliminal.CHECKSUBDELTA', 7)
today = datetime.datetime(2018, 2, 26, 0, 0, 0)
mocker.patch('autosubliminal.core.item.get_today', return_value=today)
assert wanted_item.is_search_active
| gpl-3.0 | 1,611,014,061,796,929,800 | 40.935185 | 110 | 0.705895 | false |
nextgis-extra/tests | lib_gdal/gdrivers/ngsgeoid.py | 1 | 2609 | #!/usr/bin/env python
###############################################################################
# $Id: ngsgeoid.py 32002 2015-12-05 06:03:16Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read functionality for NGSGEOID driver.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2011, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Test opening a little endian file
def ngsgeoid_1():
tst = gdaltest.GDALTest( 'NGSGEOID', 'g2009u01_le_truncated.bin', 1, 65534 )
return tst.testOpen( check_gt = ( 229.99166666666667, 0.016666666666670001, 0.0, 40.00833333333334, 0.0, -0.016666666666670001 ), check_prj = 'WGS84' )
###############################################################################
# Test opening a big endian file
def ngsgeoid_2():
tst = gdaltest.GDALTest( 'NGSGEOID', 'g2009u01_be_truncated.bin', 1, 65534 )
return tst.testOpen( check_gt = ( 229.99166666666667, 0.016666666666670001, 0.0, 40.00833333333334, 0.0, -0.016666666666670001 ), check_prj = 'WGS84' )
gdaltest_list = [
ngsgeoid_1,
ngsgeoid_2
]
if __name__ == '__main__':
gdaltest.setup_run( 'ngsgeoid' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| gpl-2.0 | 1,945,217,051,635,007,200 | 38.530303 | 155 | 0.620544 | false |
pculture/mirocommunity | localtv/inline_edit/urls.py | 1 | 1638 | from django.conf.urls.defaults import patterns
from localtv import models
urlpatterns = patterns(
'localtv.inline_edit',
(r'^video/(?P<id>[0-9]+)/name/$', 'simple.edit_field',
{'model': models.Video, 'field': 'name'},
'localtv_admin_video_edit_name'),
(r'^video/(?P<id>[0-9]+)/when_published/$', 'simple.edit_field',
{'model': models.Video, 'field': 'when_published'},
'localtv_admin_video_edit_when_published'),
(r'^video/(?P<id>[0-9]+)/authors/$', 'simple.edit_field',
{'model': models.Video, 'field': 'authors'},
'localtv_admin_video_edit_authors'),
(r'^video/(?P<id>[0-9]+)/categories/$', 'simple.edit_field',
{'model': models.Video, 'field': 'categories'},
'localtv_admin_video_edit_categories'),
(r'^video/(?P<id>[0-9]+)/tags/$', 'simple.edit_field',
{'model': models.Video, 'field': 'tags'},
'localtv_admin_video_edit_tags'),
(r'^video/(?P<id>[0-9]+)/description/$', 'simple.edit_field',
{'model': models.Video, 'field': 'description'},
'localtv_admin_video_edit_description'),
(r'^video/(?P<id>[0-9]+)/website_url/$', 'simple.edit_field',
{'model': models.Video, 'field': 'website_url'},
'localtv_admin_video_edit_website_url'),
(r'^video/(?P<id>[0-9]+)/editors_comment/$', 'video_views.editors_comment',
{}, 'localtv_admin_video_edit_editors_comment'),
(r'^video/(?P<id>[0-9]+)/thumbnail/$', 'simple.edit_field',
{'model': models.Video, 'field': 'thumbnail'},
'localtv_admin_video_edit_thumbnail'),
(r'^playlist/([0-9]+)/info/$', 'playlist.info',
{}, 'localtv_admin_playlist_edit_info'),
)
| agpl-3.0 | -4,088,861,361,132,173,000 | 44.5 | 79 | 0.601954 | false |
sglumac/pyislands | pyislands/evolution.py | 1 | 1937 | from itertools import islice
def evolution(island):
'''
Infinite generator for evolution of some population.
This generator yields population:
population - tuple together containing tuples/individuals
population_0 = create()
population_1 = evolve(population_0, info_0)
.
.
population_k = evolve(population_k-1, info_k-1)
.
.
Since population is a tuple/an immutable type, a population cannot be
influenced by outside functions. Population can be used only to gather
statistics
If no immigration and emmigration is used this island evolution
becomes a classical genetic algorithm.
'''
population = island.create_population()
while True:
for _ in range(island.migration_interval if island.migrate else 1):
yield population
# Immigration - Outside individuals are inhabiting an island
if island.assimilate:
population = island.assimilate(population)
# Evolution - Each island population is evolved into the next generation
population = island.evolve(population)
# Emmigration - Sends individuals (clones) from one population onto voyage
if island.migrate:
island.migrate(population)
def finite_evolution(num_iterations, island):
''' Same as evolution, except stopped after num_iterations '''
return islice(evolution(island), num_iterations)
def stagnation_evolution(max_stagnation, island):
''' Same as evolution, except stopped after max_stagnation '''
infinite_evolution = evolution(island)
population = next(infinite_evolution)
best = min(population)
stagnation = 0
while stagnation < max_stagnation:
stagnation += 1
yield population
population = next(infinite_evolution)
current_best = min(population)
if current_best < best:
stagnation = 0
best = current_best
| mit | 3,788,803,665,080,810,000 | 27.910448 | 75 | 0.681466 | false |
pekin0609/- | agent/cognitive/interpreter.py | 1 | 27170 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
interpreter.py
=====
This module contains the class `NetworkBuilder` and `AgentkBuilder` which interprets
the contents of BriCA language files.
"""
# BriCA Language Interpreter for V1 (Interpreter version 1)
# Originally licenced for WBAI (wbai.jp) under the Apache License (?)
# Created: 2016-01-31
# TODO: import, subports
import json
import os
import sys
import brica1
import logging
from config.log import APP_KEY
app_logger = logging.getLogger(APP_KEY)
debug = False # True
class NetworkBuilder:
"""
The BriCA language interpreter.
- reads BriCA language files.
"""
unit_dic = {} # Map: BriCA unit name ⇒ unit object
super_modules = {} # Super modules
# base_name_space="" # Base Name Space
module_dictionary = {}
sub_modules = {}
__ports = {}
__connections = {}
__comments = {}
__network = {}
__super_sub_modules = {} # Super & Sub modules
__load_files = []
def __init__(self):
"""
NetworkBuilder Create a new `NetworkBuilder` instance.
Args:
None.
Returns:
NetworkBuilder: a new `NetworkBuilder` instance.
"""
unit_dic = {}
module_dictionary = {}
super_modules = {}
sub_modules = {}
__ports = {}
__connections = {}
__comments = {}
__load_files = []
def load_file(self, file_object):
"""
Load a BriCA language json file.
Args:
A file object
Returns:
success:True, failure:False
"""
self.__load_files.append(os.path.abspath(file_object.name))
dir_name = os.path.dirname(file_object.name)
try:
jsn = json.load(file_object)
except:
app_logger.error("File could not be read!")
return False
if "Header" not in jsn:
app_logger.error("Header must be specified!")
return False
header = jsn["Header"]
if "Import" in header:
import_files = header["Import"]
for import_file in import_files:
if "/" != import_file[0]: # not full path
import_file = dir_name + "/" + import_file
if not os.path.isfile(import_file):
app_logger.error("JSON file {} not found!".format(import_file))
return False
if os.path.abspath(import_file) in self.__load_files:
app_logger.error("Import file {} has been read!".format(import_file))
continue
f = open(import_file)
if self.load_file(f) is False:
return False
if "Name" not in header:
app_logger.error("Header name must be specified!")
return False
if "Base" not in header:
app_logger.error("Base name space must be specified!")
return False
self.base_name_space = header["Base"].strip()
if "Type" not in header:
app_logger.error("Type must be specified!")
return False
self.__type = header["Type"]
if "Comment" in header:
self.__comments["Header." + header["Name"]] = header["Comment"]
if self.__set_modules(jsn) is False:
return False
if self.__set_ports(jsn) is False:
return False
if self.__set_connections(jsn) is False:
return False
return True
def get_network(self):
"""
Args:
None
return:
the network created by load_file(self, file_object)
"""
return {"ModuleDictionary": self.module_dictionary, "SuperModules": self.super_modules,
"SubModules": self.sub_modules, "Ports": self.__ports, "Connections": self.__connections,
"Comments": self.__comments}
def check_consistency(self):
"""
Args:
None
return:
true iff no fatal inconsistency in the network
function:
see the consistency check section below.
"""
for module_name in self.module_dictionary:
if module_name not in self.unit_dic:
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating {}.".format(module_name))
self.unit_dic[module_name] = brica1.Module() # New Module instance
# SuperModules of consistency check
for module, superModule in self.super_modules.items():
if superModule not in self.module_dictionary:
app_logger.error("Super Module {} is not defined!".format(superModule))
return False
# Loop check
if self.__loop_check(superModule, module):
app_logger.error("Loop detected while trying to add {} as a subunit to {}!".format(module, superModule))
return False
# SubModules of consistency check
for superModule, subModules in self.sub_modules.items():
for subModule in subModules:
if subModule not in self.module_dictionary:
app_logger.error("Sub Module {} is not defined!".format(subModule))
return False
# Loop check
if self.__loop_check(superModule, subModule):
app_logger.error("Loop detected while trying to add {} as a subunit to {}!".format(
superModule, subModule))
return False
# Port of consistency check
for module_name in self.module_dictionary:
ports = self.module_dictionary[module_name]["Ports"]
if len(ports) == 0:
app_logger.error("The specified module {} does not have the port!".format(module_name))
return False
for port in ports:
if not module_name + "." + port in self.__ports:
app_logger.error("The specified module {} does not have the port!".format(module_name))
return False
for port_name, v in self.__ports.items():
# Fatal if the specified modules have not been defined.
if "Module" not in v:
app_logger.error("Module is not defined in the port {}!".format(port_name))
return False
module_name = v["Module"]
if module_name not in self.module_dictionary:
app_logger.error("Specified module {} is not defined in the port {}!".format(module_name, port_name))
return False
# Fatal if the shape has not been defined.
if "Shape" not in v:
app_logger.error("Shape is not defined in the port {}!".format(port_name))
return False
length = v["Shape"]
if length < 1:
app_logger.error("Incorrect length of Shape for the port {}!".format(port_name))
return False
# Fatal if the specified modules do not have the port, abort with a message.
module = self.module_dictionary[module_name]
pv = port_name.split(".")
last_port_name = pv[len(pv) - 1]
if last_port_name not in module["Ports"]:
app_logger.error("Port {} is not defined in the module {}!".format(last_port_name, module_name))
return False
module = self.unit_dic[module_name]
if v["IO"] == "Input":
module.make_in_port(last_port_name, length)
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating an input port {} (length {}) to {}.".format(
last_port_name, length, module_name))
elif v["IO"] == "Output":
module.make_out_port(last_port_name, length)
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating an output port {} (length {}) to {}.".format(
last_port_name, length, module_name))
# Connection of consistency check
for k, v in self.__connections.items():
# Fatal if the specified ports have not been defined.
if not v[0] in self.__ports:
app_logger.error("The specified port {} is not defined in connection {}.".format(v[0], k))
return False
if not v[1] in self.__ports:
app_logger.error("The specified port {} is not defined in connection {}.".format(v[1], k))
return False
tp = v[0].split(".")
to_port = tp[len(tp) - 1]
fp = v[1].split(".")
from_port = fp[len(fp) - 1]
to_unit = self.__ports[v[0]]["Module"]
from_unit = self.__ports[v[1]]["Module"]
# if from_unit & to_unit belong to the same level
if ((from_unit not in self.__super_sub_modules) and (to_unit not in self.__super_sub_modules)) or \
(from_unit in self.__super_sub_modules and to_unit in self.__super_sub_modules and (
self.__super_sub_modules[from_unit] == self.__super_sub_modules[to_unit])):
try:
fr_port_obj = self.unit_dic[from_unit].get_out_port(from_port)
to_port_obj = self.unit_dic[to_unit].get_in_port(to_port)
if fr_port_obj.buffer.shape != to_port_obj.buffer.shape:
app_logger.error("Port dimension unmatch!")
return False
# Creating a connection
brica1.connect((self.unit_dic[from_unit], from_port), (self.unit_dic[to_unit], to_port))
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating a connection from {} of {} to {} of {}".format(
from_port, from_unit, to_port, to_unit))
except:
app_logger.error("adding a connection from {} to {} on the same level"
" but not from an output port to an input port!".format(from_unit, to_unit))
return False
# else if from_unit is the direct super module of the to_unit
elif to_unit in self.__super_sub_modules and self.__super_sub_modules[to_unit] == from_unit:
try:
fr_port_obj = self.unit_dic[from_unit].get_in_port(from_port)
to_port_obj = self.unit_dic[to_unit].get_in_port(to_port)
if fr_port_obj.buffer.shape != to_port_obj.buffer.shape:
app_logger.error("Port dimension unmatch!")
return False
# Creating a connection (alias)
self.unit_dic[to_unit].alias_in_port(self.unit_dic[from_unit], from_port, to_port)
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating a connection (alias) from {} of {} to {} of {}.".format(
from_port, from_unit, to_port, to_unit
))
except:
app_logger.error("Error adding a connection from the super module {} to {} "
"but not from an input port to an input port!".format(from_unit, to_unit))
return False
# else if to_unit is the direct super module of the from_unit
elif from_unit in self.__super_sub_modules and self.__super_sub_modules[from_unit] == to_unit:
try:
fr_port_obj = self.unit_dic[from_unit].get_out_port(from_port)
to_port_obj = self.unit_dic[to_unit].get_out_port(to_port)
if fr_port_obj.buffer.shape != to_port_obj.buffer.shape:
app_logger.error("Port dimension unmatch!")
return False
# Creating a connection (alias)
self.unit_dic[from_unit].alias_out_port(self.unit_dic[to_unit], to_port, from_port)
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Creating a connection (alias) from {} of {} to {} of {}.".format(
from_port, from_unit, to_port, to_unit
))
except:
app_logger.error("Error adding a connection from {} to its super module {} "
"but not from an output port to an output port!".format(from_unit, to_unit))
return False
# else connection level error!
else:
app_logger.error("Trying to add a connection between units {} and {} in a remote level!".format(
from_unit, to_unit
))
return False
return True
def check_grounding(self):
"""
Args:
None
return:
true iff the network is grounded, i.e., every module at the bottom of the hierarchy
has a component specification.
"""
for module_name, v in self.module_dictionary.items():
implclass = v["ImplClass"]
if implclass != "":
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Use the existing ImplClass {} for {}.".format(implclass, module_name))
try:
component_instance = eval(implclass + '()') # New ImplClass instance
except:
v = implclass.rsplit(".", 1)
mod_name = v[0]
class_name = v[1]
try:
mod = __import__(mod_name, globals(), locals(), [class_name], -1)
Klass = getattr(mod, class_name)
component_instance = Klass()
except:
app_logger.error("Module {} at the bottom not grounded as a Component!".format(module_name))
return False
try:
module = self.unit_dic[module_name]
module.add_component(module_name, component_instance)
for port in module.in_ports:
length = module.get_in_port(port).buffer.shape[0]
component_instance.make_in_port(port, length)
component_instance.alias_in_port(module, port, port)
for port in module.out_ports:
length = module.get_out_port(port).buffer.shape[0]
component_instance.make_out_port(port, length)
component_instance.alias_out_port(module, port, port)
except:
app_logger.error("Module {} at the bottom not grounded as a Component!".format(module_name))
return False
return True
def __set_modules(self, jsn):
""" Add modules from the JSON description
Args:
None
Returns:
None
"""
if "Modules" in jsn:
modules = jsn["Modules"]
for module in modules:
if self.__set_a_module(module) is False:
return False
else:
app_logger.warning("No Modules in the language file.")
return True
def __set_a_module(self, module):
if "Name" not in module:
app_logger.error("Module name must be specified!")
return False
module_name = module["Name"].strip()
if module_name == "":
app_logger.error("Module name must be specified!")
return False
module_name = self.__prefix_base_name_space(module_name) # Prefixing the base name space
defined_module = None
if module_name in self.module_dictionary:
defined_module = self.module_dictionary[module_name]
ports = []
if "Ports" in module:
ports = module["Ports"]
# Multiple registration
if defined_module:
for p in defined_module["Ports"]:
if p not in ports:
ports.append(p)
implclass = ""
if "ImplClass" in module:
# if an implementation class is specified
implclass = module["ImplClass"].strip()
elif self.__type == "C":
app_logger.error("ImplClass is necessary if the type C in the module {}!".format(module_name))
return False
# Multiple registration
if defined_module:
if implclass == "":
implclass = defined_module["ImplClass"]
else:
if defined_module["ImplClass"] != "":
app_logger.warning("ImplClass {} of {} is replaced with {}.".format(
defined_module["ImplClass"], module_name, implclass))
self.module_dictionary[module_name] = {"Ports": ports, "ImplClass": implclass}
supermodule = ""
if "SuperModule" in module:
supermodule = module["SuperModule"].strip()
supermodule = self.__prefix_base_name_space(supermodule)
if supermodule != "":
# Multiple registration
if module_name in self.super_modules:
app_logger.warning("Super module {} of {} is replaced with {}.".format(
self.super_modules[module_name], module_name, supermodule))
self.super_modules[module_name] = supermodule
self.__super_sub_modules[module_name] = supermodule
if "SubModules" in module:
for submodule in module["SubModules"]:
if submodule != "":
submodule = self.__prefix_base_name_space(submodule)
if module_name not in self.sub_modules:
self.sub_modules[module_name] = []
self.sub_modules[module_name].append(submodule)
self.__super_sub_modules[submodule] = module_name
if "Comment" in module:
self.__comments["Modules." + module_name] = module["Comment"]
return True
def __prefix_base_name_space(self, name):
if name.find(".") < 0:
return self.base_name_space + "." + name
else:
return name
def __loop_check(self, superunit, subunit):
if superunit == subunit:
return True
val = superunit
while val in self.__super_sub_modules:
val = self.__super_sub_modules[val]
if val == subunit:
return True
return False
def __set_ports(self, jsn):
""" Add ports from the JSON description
Args:
None
Returns:
None
"""
if "Ports" in jsn:
ports = jsn["Ports"]
for port in ports:
if self.__set_a_port(port) is False:
return False
else:
app_logger.warning("No Ports in the language file.")
return True
def __set_a_port(self, port):
if "Name" in port:
port_name = port["Name"].strip()
else:
app_logger.error("Name not specified while adding a port!")
return False
if "Module" in port:
port_module = port["Module"].strip()
port_module = self.__prefix_base_name_space(port_module)
else:
app_logger.error("Module not specified while adding a port!")
return False
port_name = port_module + "." + port_name
defined_port = None
if port_name in self.__ports:
defined_port = self.__ports[port_name]
# Multiple registration
if defined_port:
if port_module != defined_port["Module"]:
app_logger.error("Module {} defined in the port {} is already defined as a module {}.".format(
port_module, port_name, self.__ports[port_name]["Module"]))
return False
if "Type" in port:
port_type = port["Type"].strip()
if port_type != "Input" and port_type != "Output":
app_logger.error("Invalid port type {}!".format(port_type))
return False
elif defined_port and port_type != defined_port["IO"]:
app_logger.error("The port type of port {} differs from previously defined port type!".format(
port_name))
return False
else:
app_logger.error("Type not specified while adding a port!")
return False
if "Shape" in port:
shape = port["Shape"]
if len(shape) != 1:
app_logger.error("Shape supports only one-dimensional vector!")
return False
if not isinstance(shape[0], int):
app_logger.error("The value of the port is not a number!")
return False
if int(shape[0]) < 1:
app_logger.error("Port dimension < 1!")
return False
self.__ports[port_name] = {"IO": port_type, "Module": port_module, "Shape": shape[0]}
else:
self.__ports[port_name] = {"IO": port_type, "Module": port_module}
if "Comment" in port:
self.__comments["Ports." + port_name] = port["Comment"]
return True
def __set_connections(self, jsn):
""" Add connections from the JSON description
Args:
None
Returns:
None
"""
if "Connections" in jsn:
connections = jsn["Connections"]
for connection in connections:
if self.__set_a_connection(connection) is False:
return False
else:
if self.__type != "C":
app_logger.warning("No Connections in the language file.")
return True
def __set_a_connection(self, connection):
if "Name" in connection:
connection_name = connection["Name"]
else:
app_logger.error("Name not specified while adding a connection!")
return False
defined_connection = None
if connection_name in self.__connections:
defined_connection = self.__connections[connection_name]
if "FromModule" in connection:
from_unit = connection["FromModule"]
from_unit = self.__prefix_base_name_space(from_unit)
else:
app_logger.error("FromModule not specified while adding a connection!")
return False
if "FromPort" in connection:
from_port = connection["FromPort"]
else:
app_logger.error("FromPort not specified while adding a connection!")
return False
if "ToModule" in connection:
to_unit = connection["ToModule"]
to_unit = self.__prefix_base_name_space(to_unit)
else:
app_logger.error("ToModule not specified while adding a connection!")
return False
if "ToPort" in connection:
to_port = connection["ToPort"]
else:
app_logger.error("ToPort not specified while adding a connection!")
return False
# Multiple registration
if defined_connection and defined_connection[0] != to_unit + "." + to_port:
app_logger.error("Defined port {}.{} is different from the previous ones in connection {}!".format(
to_unit, to_port, connection_name))
return False
if defined_connection and defined_connection[1] != from_unit + "." + from_port:
app_logger.error("Defined port {}.{} is different from the previous ones in connection {}!".format(
from_unit, from_port, connection_name))
return False
if "Comment" in connection:
self.__comments["Connections." + connection_name] = connection["Comment"]
self.__connections[connection_name] = (to_unit + "." + to_port, from_unit + "." + from_port)
return True
class AgentBuilder:
"""
The BriCA language interpreter.
- creates a BriCA agent based on the file contents.
"""
def __init__(self):
self.INCONSISTENT = 1
self.NOT_GROUNDED = 2
self.COMPONENT_NOT_FOUND = 3
self.unit_dic = None
'''
def create_agent(self, scheduler, network):
if network.check_consistency() == False:
return self.INCONSISTENT
if network.check_grounding() == False:
return self.NOT_GROUNDED
for module, super_module in network.super_modules.items():
if super_module in network.module_dictionary:
network.unit_dic[super_module].add_submodule(module, network.unit_dic[module])
if debug:
print "Adding a module " + module + " to " + super_module + "."
# Main logic
top_module = brica1.Module()
for unit_key in network.unit_dic.keys():
if not unit_key in network.super_modules:
if isinstance(network.unit_dic[unit_key], brica1.Module):
top_module.add_submodule(unit_key, network.unit_dic[unit_key])
if debug:
print "Adding a module " + unit_key + " to a BriCA agent."
agent = brica1.Agent(scheduler)
agent.add_submodule("__Runtime_Top_Module", top_module)
self.unit_dic = network.unit_dic
return agent
'''
def create_agent(self, network):
if network.check_consistency() is False:
return self.INCONSISTENT
if network.check_grounding() is False:
return self.NOT_GROUNDED
for module, super_module in network.super_modules.items():
if super_module in network.module_dictionary:
network.unit_dic[super_module].add_submodule(module, network.unit_dic[module])
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Adding a module {} to {}.".format(module, super_module))
# Main logic
top_module = brica1.Module()
for unit_key in network.unit_dic.keys():
if unit_key not in network.super_modules:
if isinstance(network.unit_dic[unit_key], brica1.Module):
top_module.add_submodule(unit_key, network.unit_dic[unit_key])
if app_logger.isEnabledFor(logging.DEBUG):
app_logger.debug("Adding a module {} to a BriCA agent.".format(unit_key))
# agent = brica1.Agent(scheduler)
agent = brica1.Agent()
agent.add_submodule("__Runtime_Top_Module", top_module)
self.unit_dic = network.unit_dic
return agent
def get_modules(self):
return self.unit_dic
| apache-2.0 | 1,350,167,149,328,328,400 | 39.549254 | 120 | 0.536109 | false |
rossgoodwin/bizarromoma | bot.py | 1 | 5642 | import os
import time
import json
import string
from collections import defaultdict, Counter
from random import random
import tweepy
class TwitterAPI:
"""
Class for accessing the Twitter API.
Requires API credentials to be available in environment
variables. These will be set appropriately if the bot was created
with init.sh included with the heroku-twitterbot-starter
"""
def __init__(self):
consumer_key = "ZyyYUZVcGfbMBa644Ey77Tu5b"
consumer_secret = "FgL9UAXDin6YQwR1ILqMdE8aCLG9wPkhKDm8wJibyNnWLem2kc"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
access_token = "3299819928-mYYqdXnQmZTURU9iXaalXDq7BGnCESNfe7MGUJE"
access_token_secret = "1pkxjxkpIPQCnAM0zEttaCHKezdlW5Co3x5B2KY1j40qI"
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
def tweet(self, message):
"""Send a tweet"""
self.api.update_status(status=message)
def train_char_lm(fname, order=4):
# data = file(fname).read()
data = fname
lm = defaultdict(Counter)
pad = "~" * order
data = pad + data
for i in xrange(len(data)-order):
history, char = data[i:i+order], data[i+order]
lm[history][char]+=1
def normalize(counter):
s = float(sum(counter.values()))
return [(c,cnt/s) for c,cnt in counter.iteritems()]
outlm = {hist:normalize(chars) for hist, chars in lm.iteritems()}
return outlm
def generate_letter(lm, history, order):
history = history[-order:]
dist = lm[history]
x = random()
for c,v in dist:
x = x - v
if x <= 0: return c
def generate_text(lm, order, nletters=5000):
history = "~" * order
out = []
for i in xrange(nletters):
c = generate_letter(lm, history, order)
history = history[-order:] + c
out.append(c)
return "".join(out)
# In[148]:
def fix_unmatched(l):
unmatched_locs = []
unmatched_locs_rev = []
def error(c, column_number):
# print 'Error: unmatched', c, 'column', column_number
if c in [')', ']', '}']:
unmatched_locs.append(column_number)
else:
unmatched_locs_rev.append(column_number)
def check(stack, wanted, c, column_number):
if stack:
if stack[-1] != wanted:
error(c, column_number)
else:
stack.pop()
else:
error(c, column_number)
def check_parentheses(line):
stack = list()
column_number = 0
for c in line:
if c == '(' or c == '[' or c == '{':
stack.append(c)
elif c == ')':
check(stack, '(', ')', column_number)
elif c == ']':
check(stack, '[', ']', column_number)
elif c == '}':
check(stack, '{', '}', column_number)
column_number += 1
def check_parentheses_rev(line):
stack = list()
column_number = 0
for c in line:
column_number += 1
if c == ')' or c == ']' or c == '}':
stack.append(c)
elif c == '(':
check(stack, ')', '(', column_number)
elif c == '[':
check(stack, ']', '[', column_number)
elif c == '{':
check(stack, '}', '{', column_number)
check_parentheses(l)
lchars = list(l)
newTitle = ''.join([i for j, i in enumerate(lchars) if j not in unmatched_locs])
check_parentheses_rev(newTitle[::-1])
real_unmatched_rev = map(lambda i: len(newTitle)-i, unmatched_locs_rev)
titChars = list(newTitle)
newTitle = ''.join([i for j, i in enumerate(titChars) if j not in real_unmatched_rev])
numDoubleQuotes = newTitle.count('\"')
if numDoubleQuotes % 2:
newTitle = string.replace(newTitle, '\"', '', 1)
numSingleQuotes = newTitle.count("\'")
if numSingleQuotes % 2:
newTitle = string.replace(newTitle, "\'", "", 1)
return newTitle
def main():
generatedTexts = map(lambda lm: generate_text(lm, 7), lms)
entry_candidates = map(lambda x: x.split('\n'), generatedTexts)
def remove_plagiarized(i):
plagiarized = set(entry_candidates[i]) & set(data[i])
keepers = map(fix_unmatched, list(set(entry_candidates[i]) - plagiarized))
return keepers
entries = map(remove_plagiarized, range(len(data)))
invented_art = zip(*entries)
def unpack(tup):
t, a, m = tup
outstr = "%s\n%s\n%s" % (t, a, m)
return outstr
output = filter(lambda x: len(x) <= 140, map(unpack, invented_art))
return output
fileObj = open('artworks.json', 'r')
art = json.load(fileObj)[:75000]
fileObj.close()
print "Artwork list loaded..."
titles = map(lambda d: d['title'], art)
artists = map(lambda d: d['artist'], art)
media = map(lambda d: d['medium'], art)
print "Got titles, artists, media..."
# dimensions = map(lambda d: d['dimensions'], art)
data = [titles, artists, media]
lms = map(lambda l: train_char_lm('\n'.join(l), order=7), data)
print "Got language models..."
if __name__ == "__main__":
twitter = TwitterAPI()
while True:
toTweet = main()
print "Got toTweet list..."
while toTweet:
curTweet = toTweet.pop()
print "Posting tweet..."
twitter.tweet(curTweet)
print "...tweet posted!"
time.sleep(120)
| gpl-3.0 | 573,573,843,229,703,900 | 28.694737 | 90 | 0.55654 | false |
elbuo8/sendgrid-django | tests/test_mail.py | 1 | 10276 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import EmailMessage
from django.core.mail import EmailMultiAlternatives
from django.test import SimpleTestCase as TestCase
from sgbackend import SendGridBackend
settings.configure()
class SendGridBackendTests(TestCase):
def test_raises_if_sendgrid_api_key_doesnt_exists(self):
with self.assertRaises(ImproperlyConfigured):
SendGridBackend()
def test_build_empty_sg_mail(self):
msg = EmailMessage()
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'from': {'email': 'webmaster@localhost'},
'subject': '',
'content': [{'type': 'text/plain', 'value': ''}],
'personalizations': [{'subject': ''}]}
)
def test_build_w_to_sg_email(self):
msg = EmailMessage(to=('[email protected]',))
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'value': '', 'type': 'text/plain'}],
'personalizations': [
{'to': [{'email': '[email protected]'}],
'subject': ''}],
'from': {'email': 'webmaster@localhost'}, 'subject': ''}
)
# Test using "name <email>" format.
msg = EmailMessage(to=('Andrii Soldatenko <[email protected]>',))
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'value': '', 'type': 'text/plain'}],
'personalizations': [
{'to': [
{'name': 'Andrii Soldatenko',
'email': '[email protected]'}],
'subject': ''}],
'from': {'email': 'webmaster@localhost'}, 'subject': ''}
)
def test_build_w_cc_sg_email(self):
msg = EmailMessage(cc=('[email protected]',))
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'value': '', 'type': 'text/plain'}],
'personalizations': [
{'cc': [{'email': '[email protected]'}],
'subject': ''}],
'from': {'email': 'webmaster@localhost'}, 'subject': ''}
)
# Test using "name <email>" format.
msg = EmailMessage(cc=('Andrii Soldatenko <[email protected]>',))
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'value': '', 'type': 'text/plain'}],
'personalizations': [
{'cc': [
{'name': 'Andrii Soldatenko',
'email': '[email protected]'}],
'subject': ''}],
'from': {'email': 'webmaster@localhost'}, 'subject': ''}
)
def test_build_w_bcc_sg_email(self):
msg = EmailMessage(bcc=('[email protected]',))
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'value': '', 'type': 'text/plain'}],
'personalizations': [
{'bcc': [{'email': '[email protected]'}],
'subject': ''}],
'from': {'email': 'webmaster@localhost'}, 'subject': ''}
)
# Test using "name <email>" format.
msg = EmailMessage(bcc=('Andrii Soldatenko <[email protected]>',))
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'value': '', 'type': 'text/plain'}],
'personalizations': [
{'bcc': [
{'name': 'Andrii Soldatenko',
'email': '[email protected]'}],
'subject': ''}],
'from': {'email': 'webmaster@localhost'}, 'subject': ''}
)
def test_build_w_reply_to_sg_email(self):
# Test setting a Reply-To header.
msg = EmailMessage()
msg.extra_headers = {'Reply-To': '[email protected]'}
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'value': '', 'type': 'text/plain'}],
'personalizations': [{'subject': ''}],
'reply_to': {'email': '[email protected]'},
'from': {'email': 'webmaster@localhost'}, 'subject': ''}
)
# Test using the reply_to attribute.
msg = EmailMessage(reply_to=('[email protected]',))
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'value': '', 'type': 'text/plain'}],
'personalizations': [{'subject': ''}],
'reply_to': {'email': '[email protected]'},
'from': {'email': 'webmaster@localhost'}, 'subject': ''}
)
# Test using "name <email>" format.
msg = EmailMessage(
reply_to=('Andrii Soldatenko <[email protected]>',))
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'value': '', 'type': 'text/plain'}],
'personalizations': [{'subject': ''}],
'reply_to': {
'name': 'Andrii Soldatenko',
'email': '[email protected]'},
'from': {'email': 'webmaster@localhost'}, 'subject': ''}
)
def test_build_empty_multi_alternatives_sg_email(self):
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives()
msg.attach_alternative(html_content, "text/html")
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'type': 'text/plain', 'value': ''},
{'type': 'text/html',
'value': '<p>This is an '
'<strong>important</strong> '
'message.</p>'}],
'from': {'email': 'webmaster@localhost'},
'personalizations': [{'subject': ''}],
'subject': ''}
)
def test_build_sg_email_w_categories(self):
msg = EmailMessage()
msg.categories = ['name']
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'categories': ['name'],
'content': [{'type': 'text/plain', 'value': ''}],
'from': {'email': 'webmaster@localhost'},
'personalizations': [{'subject': ''}],
'subject': ''
}
)
def test_build_sg_email_w_template_id(self):
msg = EmailMessage()
msg.template_id = 'template_id_123456'
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'template_id': 'template_id_123456',
'content': [{'type': 'text/plain', 'value': ''}],
'from': {'email': 'webmaster@localhost'},
'personalizations': [{'subject': ''}],
'subject': ''
}
)
def test_build_sg_email_w_substitutions(self):
msg = EmailMessage()
msg.substitutions = {}
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'type': 'text/plain', 'value': ''}],
'from': {'email': 'webmaster@localhost'},
'personalizations': [{'subject': ''}],
'subject': ''}
)
def test_build_sg_email_w_extra_headers(self):
msg = EmailMessage()
msg.extra_headers = {'EXTRA_HEADER': 'VALUE'}
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'type': 'text/plain', 'value': ''}],
'from': {'email': 'webmaster@localhost'},
'headers': {'EXTRA_HEADER': 'VALUE'},
'personalizations': [{'subject': ''}],
'subject': ''}
)
def test_build_sg_email_w_custom_args(self):
msg = EmailMessage()
msg.custom_args = {'custom_arg1': '12345-abcdef'}
with self.settings(SENDGRID_API_KEY='test_key'):
mail = SendGridBackend()._build_sg_mail(msg)
self.assertEqual(
mail,
{'content': [{'type': 'text/plain', 'value': ''}],
'custom_args': {'custom_arg1': '12345-abcdef'},
'from': {'email': 'webmaster@localhost'},
'personalizations': [{'subject': ''}],
'subject': ''}
)
| mit | 3,232,107,922,976,133,600 | 41.639004 | 83 | 0.479953 | false |
eugeneks/zmeyka | zmeyka_auth/user_auth.py | 1 | 3511 |
import datetime
from datetime import datetime
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, DateTime, ForeignKey, UniqueConstraint
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
try:
from db_conf import dbtype, user, password, host, port, db
except:
from zmeyka_auth.db_conf import dbtype, user, password, host, port, db
try:
from alexey_logging import write_to_log
except:
from zmeyka_auth.alexey_logging import write_to_log
url = '{}://{}:{}@{}:{}/{}'.format(dbtype, user, password, host, port, db)
engine = create_engine(url)
db_session = scoped_session(sessionmaker(bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
#write_to_log(2)
##################################################
class Zmeyka_User_DB(Base):
__tablename__ = 'zmeyka_users'
id = Column(String(300), primary_key=True)
password = Column(String(300), nullable=False)
info = Column(String(300))
created_time = Column(DateTime)
last_login_time = Column(DateTime)
def __init__(self, id, password, info = None):
self.id = id
self.password = password
self.info = info
self.created_time = datetime.now()
def user_ping(self):
self.last_login_time = datetime.now()
def __repr__(self):
return '<Zmeyka_User_DB {} {} {} {} >'.format(self.id, '******', self.info, self.created_time, self.last_login_time)
#####################################################
class Zmeyka_User(object):
# proxy for a database of users
#user_database = {"admin": ("admin", "admin"),
# "Alexey": ("Alexey", "Alexey_password")}
def __init__(self, username, password=''):
self.id = username
self.password = password
def get(self):
try:
#if Zmeyka_User.user_database[self.id][1] == self.password:
if Zmeyka_User_DB.query.filter(Zmeyka_User_DB.id=='admin').first().password == self.password:
return self
else:
return None
except:
return None
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User %r>' % (self.id)
##################### create_schema ##################
def create_schema():
try:
Base.metadata.create_all(bind=engine)
return 'OK'
except Exception as exception_desc:
db_session.rollback()
#print (exception_desc)
write_to_log(exception_desc)
return exception_desc
##################### drop_schema ##################
def drop_schema():
try:
Base.metadata.drop_all(bind=engine)
return 'OK'
except Exception as exception_desc:
db_session.rollback()
#print (exception_desc)
write_to_log(exception_desc)
return exception_desc
##################### clear_all ##################
def clear_all():
drop_result = drop_schema()
if drop_result == 'OK':
create_result = create_schema()
return create_result
else:
return drop_result
##################### MAIN ####################
if __name__ == "__main__":
pass
#Base.metadata.create_all(bind=engine)
#create_schema() | mit | -7,564,987,325,090,481,000 | 24.635036 | 124 | 0.569638 | false |
Diiaablo95/friendsNet | test/services_api_test_rate.py | 1 | 7003 | import unittest
import json
import flask
import friendsNet.resources as resources
import friendsNet.database as database
DB_PATH = 'db/friendsNet_test.db'
ENGINE = database.Engine(DB_PATH)
COLLECTION_JSON = "application/vnd.collection+json"
HAL_JSON = "application/hal+json"
RATE_PROFILE = "/profiles/rate-profile"
#Tell Flask that I am running it in testing mode.
resources.app.config['TESTING'] = True
#Necessary for correct translation in url_for
resources.app.config['SERVER_NAME'] = 'localhost:5000'
#Database Engine utilized in our testing
resources.app.config.update({'Engine': ENGINE})
class ResourcesAPITestCase(unittest.TestCase):
#INITIATION AND TEARDOWN METHODS
@classmethod
def setUpClass(cls):
''' Creates the database structure. Removes first any preexisting database file.'''
print "Testing ", cls.__name__
ENGINE.remove_database()
ENGINE.create_tables()
@classmethod
def tearDownClass(cls):
'''Remove the testing database.'''
print "Testing ENDED for ", cls.__name__
ENGINE.remove_database()
def setUp(self):
'''Populates the database.'''
#This method loads the initial values from friendsNet_data_db.sql
ENGINE.populate_tables()
#Activate app_context for using url_for
self.app_context = resources.app.app_context()
self.app_context.push()
#Create a test client
self.client = resources.app.test_client()
def tearDown(self):
'''
Remove all records from database.
'''
ENGINE.clear()
self.app_context.pop()
class RateTestCase(ResourcesAPITestCase):
resp_get = {
"id" : 1,
"user_id" : 2,
"status_id" : 1,
"rate" : 4,
"_links" : {
"self" : {"href" : "/friendsNet/api/rates/1/", "profile" : "/profiles/rate-profile"},
"status rated" : {"href" : "/friendsNet/api/statuses/1/"},
"author" : {"href" : "/friendsNet/api/users/2/profile/"},
},
"template" : {
"data" : [
{"name" : "value", "value" : "", "prompt" : "Rate value", "required" : "false"}
]
}
}
rate_patch_correct = {
"template" : {
"data" : [
{"name" : "rate", "value" : 3}
]
}
}
rate_patch_wrong = {
"template" : {
"data" : [
{"name" : "rate", "value" : 6}
]
}
}
rate_patch_empty = {
"template" : {
"data" : []
}
}
def setUp(self):
super(RateTestCase, self).setUp()
self.url = resources.api.url_for(resources.Rate, rate_id = 1, _external = False)
self.url_wrong = resources.api.url_for(resources.Rate, rate_id = 999, _external = False)
#TEST URL
def test_url(self):
_url = '/friendsNet/api/rates/1/'
print '('+self.test_url.__name__+')', self.test_url.__doc__
with resources.app.test_request_context(_url):
rule = flask.request.url_rule
view_point = resources.app.view_functions[rule.endpoint].view_class
self.assertEquals(view_point, resources.Rate)
def test_wrong_url(self):
resp = self.client.get(self.url_wrong, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp.status_code, 404)
data = json.loads(resp.data)
href = data["resource_url"] #test HREF
self.assertEquals(href, self.url_wrong)
error = data["code"]
self.assertEquals(error, 404)
#TEST GET
#200 + MIMETYPE & PROFILE
def test_get_rate(self):
print '('+self.test_get_rate.__name__+')', self.test_get_rate.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEquals(self.resp_get, data)
self.assertEqual(resp.headers.get("Content-Type", None), HAL_JSON)
#404
def test_get_not_existing_rate(self):
print '('+self.test_get_not_existing_rate.__name__+')', self.test_get_not_existing_rate.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url_wrong, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp.status_code, 404)
#TEST PATCH
#204
def test_patch_rate(self):
print '('+self.test_patch_rate.__name__+')', self.test_patch_rate.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.rate_patch_correct), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 204)
resp2 = self.client.get(self.url, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp2.status_code, 200)
data = json.loads(resp2.data)
new_value = data["rate"]
self.assertEquals(new_value, self.rate_patch_correct["template"]["data"][0]["value"])
#PATCH EMPTY
def test_patch_empty_rate(self):
print '('+self.test_patch_empty_rate.__name__+')', self.test_patch_empty_rate.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.rate_patch_empty), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 204)
#400
def test_patch_wrong_rate(self):
print '('+self.test_patch_wrong_rate.__name__+')', self.test_patch_wrong_rate.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.rate_patch_wrong), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 400)
#404
def test_patch_not_existing_rate(self):
print '('+self.test_patch_not_existing_rate.__name__+')', self.test_patch_not_existing_rate.__doc__
resp = self.client.patch(self.url_wrong, data = json.dumps(self.rate_patch_correct), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
#415
def test_patch_wrong_header_rate(self):
print '('+self.test_patch_wrong_header_rate.__name__+')', self.test_patch_wrong_header_rate.__doc__
resp = self.client.patch(self.url, data = json.dumps(self.rate_patch_correct))
self.assertEquals(resp.status_code, 415)
#TEST DELETE
#204
def test_delete_existing_rate(self):
print '('+self.test_delete_existing_rate.__name__+')', self.test_delete_existing_rate.__doc__
resp = self.client.delete(self.url, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 204)
#404
def test_delete_not_existing_rate(self):
print '('+self.test_delete_not_existing_rate.__name__+')', self.test_delete_not_existing_rate.__doc__
resp = self.client.delete(self.url_wrong, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
if __name__ == '__main__':
unittest.main()
print 'Start running tests' | gpl-3.0 | 8,076,253,096,272,787,000 | 35.479167 | 138 | 0.60417 | false |
steveb/heat | heat/common/exception.py | 1 | 15643 | #
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Heat exception subclasses"""
import sys
from oslo_log import log as logging
import six
from six import reraise as raise_
from heat.common.i18n import _
from heat.common.i18n import _LE
_FATAL_EXCEPTION_FORMAT_ERRORS = False
LOG = logging.getLogger(__name__)
# TODO(kanagaraj-manickam): Expose this to user via REST API
ERROR_CODE_MAP = {
'99001': _("Service %(service_name)s is not available for resource "
"type %(resource_type)s, reason: %(reason)s")
}
@six.python_2_unicode_compatible
class HeatException(Exception):
"""Base Heat Exception.
To correctly use this class, inherit from it and define a 'msg_fmt'
property. That msg_fmt will get formatted with the keyword arguments
provided to the constructor.
"""
message = _("An unknown exception occurred.")
# error_code helps to provide an unique number for a given exception
# and is encoded in XXYYY format.
# Here, XX - For each of the entity type like stack, resource, etc
# an unique number will be provided. All exceptions for a entity will
# have same XX code.
# YYY - Specific error code for a given exception.
error_code = None
def __init__(self, **kwargs):
self.kwargs = kwargs
try:
if self.error_code in ERROR_CODE_MAP:
self.msg_fmt = ERROR_CODE_MAP[self.error_code]
self.message = self.msg_fmt % kwargs
if self.error_code:
self.message = 'HEAT-E%s %s' % (self.error_code, self.message)
except KeyError:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value}) # noqa
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise_(exc_info[0], exc_info[1], exc_info[2])
def __str__(self):
return self.message
def __deepcopy__(self, memo):
return self.__class__(**self.kwargs)
class MissingCredentialError(HeatException):
msg_fmt = _("Missing required credential: %(required)s")
class AuthorizationFailure(HeatException):
msg_fmt = _("Authorization failed.")
class NotAuthenticated(HeatException):
msg_fmt = _("You are not authenticated.")
class Forbidden(HeatException):
msg_fmt = _("You are not authorized to use %(action)s.")
def __init__(self, action='this action'):
super(Forbidden, self).__init__(action=action)
# NOTE(bcwaldon): here for backwards-compatibility, need to deprecate.
class NotAuthorized(Forbidden):
msg_fmt = _("You are not authorized to complete this action.")
class Invalid(HeatException):
msg_fmt = _("Data supplied was not valid: %(reason)s")
class UserParameterMissing(HeatException):
msg_fmt = _("The Parameter (%(key)s) was not provided.")
class UnknownUserParameter(HeatException):
msg_fmt = _("The Parameter (%(key)s) was not defined in template.")
class InvalidTemplateVersion(HeatException):
msg_fmt = _("The template version is invalid: %(explanation)s")
class InvalidTemplateSection(HeatException):
msg_fmt = _("The template section is invalid: %(section)s")
class InvalidTemplateParameter(HeatException):
msg_fmt = _("The Parameter (%(key)s) has no attributes.")
class ImmutableParameterModified(HeatException):
msg_fmt = _("The following parameters are immutable and may not be "
"updated: %(keys)s")
def __init__(self, *args, **kwargs):
if args:
kwargs.update({'keys': ", ".join(args)})
super(ImmutableParameterModified, self).__init__(**kwargs)
class InvalidTemplateAttribute(HeatException):
msg_fmt = _("The Referenced Attribute (%(resource)s %(key)s)"
" is incorrect.")
class InvalidTemplateReference(HeatException):
msg_fmt = _('The specified reference "%(resource)s" (in %(key)s)'
' is incorrect.')
class EntityNotFound(HeatException):
msg_fmt = _("The %(entity)s (%(name)s) could not be found.")
def __init__(self, entity=None, name=None, **kwargs):
self.entity = entity
self.name = name
super(EntityNotFound, self).__init__(entity=entity, name=name,
**kwargs)
class PhysicalResourceNameAmbiguity(HeatException):
msg_fmt = _(
"Multiple physical resources were found with name (%(name)s).")
class InvalidTenant(HeatException):
msg_fmt = _("Searching Tenant %(target)s "
"from Tenant %(actual)s forbidden.")
class StackExists(HeatException):
msg_fmt = _("The Stack (%(stack_name)s) already exists.")
class HeatExceptionWithPath(HeatException):
msg_fmt = _("%(error)s%(path)s%(message)s")
def __init__(self, error=None, path=None, message=None):
self.error = error or ''
self.path = []
if path is not None:
if isinstance(path, list):
self.path = path
elif isinstance(path, six.string_types):
self.path = [path]
result_path = ''
for path_item in self.path:
if isinstance(path_item, int) or path_item.isdigit():
result_path += '[%s]' % path_item
elif len(result_path) > 0:
result_path += '.%s' % path_item
else:
result_path = path_item
self.error_message = message or ''
super(HeatExceptionWithPath, self).__init__(
error=('%s: ' % self.error if self.error != '' else ''),
path=('%s: ' % result_path if len(result_path) > 0 else ''),
message=self.error_message
)
def error(self):
return self.error
def path(self):
return self.path
def error_message(self):
return self.error_message
class StackValidationFailed(HeatExceptionWithPath):
pass
class InvalidSchemaError(HeatException):
msg_fmt = _("%(message)s")
class ResourceNotFound(EntityNotFound):
msg_fmt = _("The Resource (%(resource_name)s) could not be found "
"in Stack %(stack_name)s.")
class SnapshotNotFound(EntityNotFound):
msg_fmt = _("The Snapshot (%(snapshot)s) for Stack (%(stack)s) "
"could not be found.")
class InvalidGlobalResource(HeatException):
msg_fmt = _("There was an error loading the definition of the global "
"resource type %(type_name)s.")
class ResourceTypeUnavailable(HeatException):
error_code = '99001'
class InvalidBreakPointHook(HeatException):
msg_fmt = _("%(message)s")
class InvalidRestrictedAction(HeatException):
msg_fmt = _("%(message)s")
class ResourceNotAvailable(HeatException):
msg_fmt = _("The Resource (%(resource_name)s) is not available.")
class ClientNotAvailable(HeatException):
msg_fmt = _("The client (%(client_name)s) is not available.")
class WatchRuleNotFound(EntityNotFound):
"""Keep this for AWS compatiblility."""
msg_fmt = _("The Watch Rule (%(watch_name)s) could not be found.")
class ResourceFailure(HeatExceptionWithPath):
def __init__(self, exception_or_error, resource, action=None):
self.resource = resource
self.action = action
if action is None and resource is not None:
self.action = resource.action
path = []
res_path = []
if resource is not None:
res_path = [resource.stack.t.get_section_name('resources'),
resource.name]
if isinstance(exception_or_error, Exception):
if isinstance(exception_or_error, ResourceFailure):
self.exc = exception_or_error.exc
error = exception_or_error.error
message = exception_or_error.error_message
path = exception_or_error.path
else:
self.exc = exception_or_error
error = six.text_type(type(self.exc).__name__)
message = six.text_type(self.exc)
path = res_path
else:
self.exc = None
res_failed = 'Resource %s failed: ' % action.upper()
if res_failed in exception_or_error:
(error, message, new_path) = self._from_status_reason(
exception_or_error)
path = res_path + new_path
else:
path = res_path
error = None
message = exception_or_error
super(ResourceFailure, self).__init__(error=error, path=path,
message=message)
def _from_status_reason(self, status_reason):
"""Split the status_reason up into parts.
Given the following status_reason:
"Resource DELETE failed: Exception : resources.AResource: foo"
we are going to return:
("Exception", "resources.AResource", "foo")
"""
parsed = [sp.strip() for sp in status_reason.split(':')]
if len(parsed) >= 4:
error = parsed[1]
message = ': '.join(parsed[3:])
path = parsed[2].split('.')
else:
error = ''
message = status_reason
path = []
return (error, message, path)
class NotSupported(HeatException):
msg_fmt = _("%(feature)s is not supported.")
class ResourceActionNotSupported(HeatException):
msg_fmt = _("%(action)s is not supported for resource.")
class ResourceActionRestricted(HeatException):
msg_fmt = _("%(action)s is restricted for resource.")
class ResourcePropertyConflict(HeatException):
msg_fmt = _('Cannot define the following properties '
'at the same time: %(props)s.')
def __init__(self, *args, **kwargs):
if args:
kwargs.update({'props': ", ".join(args)})
super(ResourcePropertyConflict, self).__init__(**kwargs)
class ResourcePropertyDependency(HeatException):
msg_fmt = _('%(prop1)s cannot be specified without %(prop2)s.')
class ResourcePropertyValueDependency(HeatException):
msg_fmt = _('%(prop1)s property should only be specified '
'for %(prop2)s with value %(value)s.')
class PropertyUnspecifiedError(HeatException):
msg_fmt = _('At least one of the following properties '
'must be specified: %(props)s.')
def __init__(self, *args, **kwargs):
if args:
kwargs.update({'props': ", ".join(args)})
super(PropertyUnspecifiedError, self).__init__(**kwargs)
class UpdateReplace(Exception):
"""Raised when resource update requires replacement."""
def __init__(self, resource_name='Unknown'):
msg = _("The Resource %s requires replacement.") % resource_name
super(Exception, self).__init__(six.text_type(msg))
class ResourceUnknownStatus(HeatException):
msg_fmt = _('%(result)s - Unknown status %(resource_status)s due to '
'"%(status_reason)s"')
def __init__(self, result=_('Resource failed'),
status_reason=_('Unknown'), **kwargs):
super(ResourceUnknownStatus, self).__init__(
result=result, status_reason=status_reason, **kwargs)
class ResourceInError(HeatException):
msg_fmt = _('Went to status %(resource_status)s '
'due to "%(status_reason)s"')
def __init__(self, status_reason=_('Unknown'), **kwargs):
super(ResourceInError, self).__init__(status_reason=status_reason,
**kwargs)
class UpdateInProgress(Exception):
def __init__(self, resource_name='Unknown'):
msg = _("The resource %s is already being updated.") % resource_name
super(Exception, self).__init__(six.text_type(msg))
class HTTPExceptionDisguise(Exception):
"""Disguises HTTP exceptions.
They can be handled by the webob fault application in the wsgi pipeline.
"""
def __init__(self, exception):
self.exc = exception
self.tb = sys.exc_info()[2]
class EgressRuleNotAllowed(HeatException):
msg_fmt = _("Egress rules are only allowed when "
"Neutron is used and the 'VpcId' property is set.")
class Error(HeatException):
msg_fmt = "%(message)s"
def __init__(self, msg):
super(Error, self).__init__(message=msg)
class NotFound(HeatException):
def __init__(self, msg_fmt=_('Not found')):
self.msg_fmt = msg_fmt
super(NotFound, self).__init__()
class InvalidContentType(HeatException):
msg_fmt = _("Invalid content type %(content_type)s")
class RequestLimitExceeded(HeatException):
msg_fmt = _('Request limit exceeded: %(message)s')
class StackResourceLimitExceeded(HeatException):
msg_fmt = _('Maximum resources per stack exceeded.')
class ActionInProgress(HeatException):
msg_fmt = _("Stack %(stack_name)s already has an action (%(action)s) "
"in progress.")
class StopActionFailed(HeatException):
msg_fmt = _("Failed to stop stack (%(stack_name)s) on other engine "
"(%(engine_id)s)")
class EventSendFailed(HeatException):
msg_fmt = _("Failed to send message to stack (%(stack_name)s) "
"on other engine (%(engine_id)s)")
class InterfaceAttachFailed(HeatException):
msg_fmt = _("Failed to attach interface (%(port)s) "
"to server (%(server)s)")
class InterfaceDetachFailed(HeatException):
msg_fmt = _("Failed to detach interface (%(port)s) "
"from server (%(server)s)")
class UnsupportedObjectError(HeatException):
msg_fmt = _('Unsupported object type %(objtype)s')
class OrphanedObjectError(HeatException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class IncompatibleObjectVersion(HeatException):
msg_fmt = _('Version %(objver)s of %(objname)s is not supported')
class ObjectActionError(HeatException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class ReadOnlyFieldError(HeatException):
msg_fmt = _('Cannot modify readonly field %(field)s')
class ConcurrentTransaction(HeatException):
msg_fmt = _('Concurrent transaction for %(action)s')
class ObjectFieldInvalid(HeatException):
msg_fmt = _('Field %(field)s of %(objname)s is not an instance of Field')
class KeystoneServiceNameConflict(HeatException):
msg_fmt = _("Keystone has more than one service with same name "
"%(service)s. Please use service id instead of name")
class SIGHUPInterrupt(HeatException):
msg_fmt = _("System SIGHUP signal received.")
class NoActionRequired(Exception):
pass
class InvalidServiceVersion(HeatException):
msg_fmt = _("Invalid service %(service)s version %(version)s")
| apache-2.0 | -5,168,321,806,644,968,000 | 29.91502 | 78 | 0.623729 | false |
pmarcis/nlp-example | train-truecaser.py | 1 | 1640 | """
This script trains the TrueCase System
"""
import nltk
import os
import sys
import argparse
import cPickle
script_path=os.path.dirname(os.path.realpath(__file__))
truecaser_script_dir = os.path.join(script_path,"dependencies","truecaser")
sys.path.insert(1,truecaser_script_dir)
from TrainFunctions import *
def main(input_file, output_file):
uniDist = nltk.FreqDist()
backwardBiDist = nltk.FreqDist()
forwardBiDist = nltk.FreqDist()
trigramDist = nltk.FreqDist()
wordCasingLookup = {}
sentences = []
for line in input_file:
sentences.append(line.strip().decode('utf-8'))
tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
updateDistributionsFromSentences(tokens, wordCasingLookup, uniDist, backwardBiDist, forwardBiDist, trigramDist)
cPickle.dump(uniDist, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(backwardBiDist, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(forwardBiDist, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(trigramDist, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(wordCasingLookup, output_file, protocol=cPickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i',
type=argparse.FileType('r'),
default=sys.stdin, metavar='PATH',
help="Input file (default: standard input)")
parser.add_argument('--output', '-o', type=argparse.FileType('wb'), metavar='PATH',
help="Output file (binary)")
args = parser.parse_args()
main(args.input, args.output)
| mit | -6,295,123,703,930,949,000 | 37.139535 | 115 | 0.710976 | false |
jrydberg/edgy | src/edgy/xml/utils.py | 1 | 1436 | from edgy.xml.element import _namespace_map
def lookupPrefix(uri):
return _namespace_map.get(uri, None)
def findtext(n, qname, default=None):
for c in n.getchildren():
#print repr(c), qname
if c.tag == str(qname):
return c.text
return default
def find(n, qname, default=None):
for c in n.getchildren():
if c.tag == str(qname):
return c
return default
def findall(n, path):
"""Find all.
"""
new = n.getchildren()[:]
for comp in path:
n = [c for c in new if c.tag == comp]
#print repr(comp), repr(n)
if n:
new = []
for c in n:
new.extend(c.getchildren())
if not n:
break
return n
def findAndRemove(n, *path):
"""Find instance issued by path and remove it.
"""
for component in path:
if n is None:
break
parent, n = n, find(n, component)
if n is None:
raise Exception("Bad path")
parent.remove(n)
return n
def geturi(prefix, namespaces):
for p, uri in reversed(namespaces):
if p == prefix:
return uri
return None # not found
def splitTag(tag):
if tag[0] == '{':
return tag[1:].split('}', 1)
return None, tag
_split_tag = splitTag
def stripTag(tag):
tag = str(tag)
if tag[0] == '{':
return tag[1:].split('}', 1)[1]
return tag
| mit | -5,665,341,722,381,874,000 | 18.944444 | 50 | 0.533426 | false |
colab-chat/colab-server | web/colab_server/streaming/avroserialiser.py | 1 | 1426 | from fastavro import writer
import io
import uuid
from .avroschema import schema, event_schema
class AvroSerialiser:
def __init__(self):
pass
@staticmethod
def serialise_message(message):
buffer = io.BytesIO()
writer(buffer, schema,
[{'id': uuid.uuid4().int,
'author': message.get_author(),
'type': message.get_message_type().value,
'raw_text': message.get_raw_message(),
'timestamp': message.get_time_created().timestamp(),
'topic': message.get_topic(),
'html': message.get_html()}])
return buffer.getvalue()
@staticmethod
def serialize_binary_message(message):
buffer = io.BytesIO()
writer(buffer, schema,
[{'id': uuid.uuid4().int,
'author': message.get_author(),
'type': message.get_message_type().value,
'binary': message.get_raw_message(),
'timestamp': message.get_time_created().timestamp(),
'topic': message.get_topic(),
'html': message.get_html()}])
return buffer.getvalue()
@staticmethod
def serialise_event_message(event_type, name):
buffer = io.BytesIO()
writer(buffer, event_schema,
[{'event_type': event_type, 'name': name}])
return buffer.getvalue()
| gpl-3.0 | -6,876,747,384,578,060,000 | 32.952381 | 69 | 0.542076 | false |
EndPointCorp/lg_ros_nodes | lg_mirror/scripts/touch_router_node.py | 1 | 1598 | #!/usr/bin/env python3
from functools import partial
import rospy
import sys
from lg_mirror.touch_router import TouchRouter
from lg_common.helpers import on_new_scene, load_director_message
from lg_msg_defs.msg import StringArray
from lg_common.helpers import handle_initial_state
from lg_mirror.touch_router import SubscribeListener
from lg_msg_defs.srv import TouchRoutes
from lg_common.helpers import run_with_influx_exception_handler
NODE_NAME = 'lg_mirror_router'
def main():
rospy.init_node(NODE_NAME)
default_viewport = rospy.get_param('~default_viewport', None)
device_id = rospy.get_param('~device_id', 'default')
router = TouchRouter(default_viewport)
route_topic = '/lg_mirror/{}/active_routes'.format(device_id)
def publish_active_routes(routes):
routes_pub.publish(StringArray(routes))
new_listener_cb = partial(router.handle_new_listener, publish_active_routes)
routes_pub = rospy.Publisher(
route_topic,
StringArray,
queue_size=10,
subscriber_listener=SubscribeListener(new_listener_cb)
)
# Hacky callback to parse the initial scene.
def handle_initial_scene_msg(msg):
d = load_director_message(msg)
router.handle_scene(publish_active_routes, d)
handle_initial_state(handle_initial_scene_msg)
rospy.Service(route_topic, TouchRoutes, router.handle_service_request)
scene_cb = partial(router.handle_scene, publish_active_routes)
on_new_scene(scene_cb)
rospy.spin()
if __name__ == '__main__':
run_with_influx_exception_handler(main, NODE_NAME)
| apache-2.0 | 7,490,353,579,126,744,000 | 27.535714 | 80 | 0.717772 | false |
felipenaselva/repo.felipe | plugin.video.salts/scrapers/moviewatcher_scraper.py | 1 | 5058 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import urllib
import kodi
import log_utils
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
import scraper
BASE_URL = 'http://moviewatcher.to'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'MovieWatcher'
def resolve_link(self, link):
url = urlparse.urljoin(self.base_url, link)
html = self._http_get(url, allow_redirect=False, cache_limit=0)
if html.startswith('http'):
return html
else:
return link
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=1)
for item in dom_parser.parse_dom(html, 'a', {'class': 'full-torrent1'}):
stream_url = dom_parser.parse_dom(item, 'span', ret='onclick')
host = dom_parser.parse_dom(item, 'div', {'class': 'small_server'})
match = re.search('Views:\s*(?:</[^>]*>)?\s*(\d+)', item, re.I)
views = match.group(1) if match else None
match = re.search('Size:\s*(?:</[^>]*>)?\s*(\d+)', item, re.I)
size = int(match.group(1)) * 1024 * 1024 if match else None
if stream_url and host:
stream_url = stream_url[0]
host = host[0].lower()
host = host.replace('stream server: ', '')
match = re.search("'(/redirect/[^']+)", stream_url)
if match:
stream_url = match.group(1)
quality = scraper_utils.get_quality(video, host, QUALITIES.HIGH)
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': views, 'rating': None, 'url': stream_url, 'direct': False}
if size is not None: hoster['size'] = scraper_utils.format_size(size, 'B')
hosters.append(hoster)
return hosters
def _get_episode_url(self, show_url, video):
episode_pattern = 'href="([^"]*/s0*%se0*%s(?!\d)[^"]*)' % (video.season, video.episode)
return self._default_get_episode_url(show_url, video, episode_pattern)
def search(self, video_type, title, year, season=''):
results = []
if video_type == VIDEO_TYPES.MOVIE:
vid_type = 'movies'
else:
vid_type = 'series'
search_url = urlparse.urljoin(self.base_url, '/search?query=%s&type=%s')
search_url = search_url % (urllib.quote_plus(title), vid_type)
html = self._http_get(search_url, allow_redirect=False, cache_limit=8)
if html.startswith('http'):
results = [{'url': scraper_utils.pathify_url(html), 'title': scraper_utils.cleanse_title(title), 'year': ''}]
else:
for item in dom_parser.parse_dom(html, 'div', {'class': 'one_movie-item'}):
match_url = dom_parser.parse_dom(item, 'a', ret='href')
match_title = dom_parser.parse_dom(item, 'img', ret='alt')
match_year = ''
if match_url and match_title:
match_url = match_url[0]
match_title = match_title[0]
if match_year:
match_year = match_year[0]
else:
match_year = ''
if not year or not match_year or year == match_year:
result = {'url': scraper_utils.pathify_url(match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
results.append(result)
return results
| gpl-2.0 | 9,100,844,278,614,173,000 | 41.864407 | 167 | 0.566825 | false |
xArm-Developer/xArm-Python-SDK | xarm/x3/utils.py | 1 | 4241 | # !/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <[email protected]> <[email protected]>
import time
import functools
from ..core.utils.log import logger
from .code import APIState
def check_modbus_baud(baud=2000000, _type='set', default=None):
def _check_modbus_baud(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
code = args[0].checkset_modbus_baud(baud)
if code != 0:
logger.error('check modbus baud is failed, code={}'.format(code))
return code if _type == 'set' else (code, default if default != -99 else [])
else:
return func(*args, **kwargs)
return decorator
return _check_modbus_baud
def xarm_is_connected(_type='set'):
def _xarm_is_connected(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if args[0].connected:
return func(*args, **kwargs)
else:
logger.error('xArm is not connect')
return APIState.NOT_CONNECTED if _type == 'set' else (APIState.NOT_CONNECTED, 'xArm is not connect')
return decorator
return _xarm_is_connected
def xarm_is_ready(_type='set'):
def _xarm_is_ready(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if args[0].connected and kwargs.get('auto_enable', False):
if not args[0].ready:
args[0].motion_enable(enable=True)
args[0].set_mode(0)
args[0].set_state(0)
if args[0].connected:
if args[0].state_is_ready:
return func(*args, **kwargs)
else:
logger.error('xArm is not ready')
logger.info('Please check the arm for errors. If so, please clear the error first. '
'Then enable the motor, set the mode and set the state')
return APIState.NOT_READY if _type == 'set' else (APIState.NOT_READY, 'xArm is not ready')
else:
logger.error('xArm is not connect')
return APIState.NOT_CONNECTED if _type == 'set' else (APIState.NOT_CONNECTED, 'xArm is not connect')
return decorator
return _xarm_is_ready
def xarm_is_pause(_type='set'):
def _xarm_is_pause(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
args[0].check_is_pause()
return func(*args, **kwargs)
return decorator
return _xarm_is_pause
def xarm_wait_until_cmdnum_lt_max(only_wait=False):
def _xarm_wait_until_cmdnum_lt_max(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
ret = args[0].wait_until_cmdnum_lt_max()
if not only_wait and ret is not None:
args[0].log_api_info('API -> {} -> code={}'.format(func.__name__, ret), code=ret)
return ret
return func(*args, **kwargs)
return decorator
return _xarm_wait_until_cmdnum_lt_max
def xarm_is_not_simulation_mode(ret=0):
def _xarm_is_not_simulation_mode(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if not args[0].check_is_simulation_robot():
return func(*args, **kwargs)
else:
return ret
return decorator
return _xarm_is_not_simulation_mode
def api_log(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
ret = func(*args, **kwargs)
logger.info('{}, ret={}, args={}, kwargs={}'.format(func.__name__, ret, args[1:], kwargs))
return ret
return decorator
def compare_time(time1, time2):
try:
s_time = time.mktime(time.strptime(time1, '%Y-%m-%d'))
e_time = time.mktime(time.strptime(time2, '%Y-%m-%d'))
return int(s_time) - int(e_time) > 0
except:
return False
def compare_version(v1, v2):
for i in range(3):
if v1[i] > v2[i]:
return True
elif v1[i] < v2[i]:
return False
return False
| bsd-3-clause | -534,950,684,337,418,000 | 32.928 | 116 | 0.560481 | false |
yassersouri/omgh | src/utils.py | 1 | 1183 | import numpy
import sklearn.metrics
import os
import cv2
import numpy as np
def mean_accuracy(groundtruth, predictions):
groundtruth_cm = sklearn.metrics.confusion_matrix(groundtruth, groundtruth).astype(numpy.float32)
predictions_cm = sklearn.metrics.confusion_matrix(predictions, groundtruth).astype(numpy.float32)
return numpy.mean(numpy.diag(predictions_cm) / numpy.diag(groundtruth_cm))
def ensure_dir(address):
if not os.path.exists(address):
os.makedirs(address)
def draw_bbox(img, bbox, color=100, width=2):
try:
bx, by, bw, bh = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
except:
bx, by, bw, bh = bbox
new_img = img.copy()
cv2.rectangle(new_img, (bx, by), (bx+bw, by+bh), color, width)
return new_img
def get_rect(img, rect_info):
xmin, xmax, ymin, ymax = rect_info
return img[xmin:xmax, ymin:ymax]
def get_rect_from_bbox(img, bbox):
by, bx, bw, bh = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
return img[bx:bx+bh, by:by+bw]
def l2_feat_norm(feat):
row_norms = np.linalg.norm(feat, axis=1)
new_feat = feat / row_norms[:, np.newaxis]
return new_feat
| mit | 7,947,427,460,157,325,000 | 26.511628 | 101 | 0.662722 | false |
abinashk-inf/AstroBox | src/astroprint/api/cloud.py | 1 | 7527 | # coding=utf-8
__author__ = "AstroPrint Product Team <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os
import json
import uuid
from flask import request, jsonify, abort
from flask.ext.login import current_user
from requests import ConnectionError
from octoprint.server import restricted_access, SUCCESS
from octoprint.server.api import api
from octoprint.events import eventManager, Events
from astroprint.cloud import astroprintCloud, AstroPrintCloudNoConnectionException
from astroprint.printfiles import FileDestinations
from astroprint.printfiles.downloadmanager import downloadManager
from astroprint.printer.manager import printerManager
#~~ Cloud Slicer control
@api.route('/astroprint', methods=['DELETE'])
@restricted_access
def cloud_slicer_logout():
astroprintCloud().signout()
return jsonify(SUCCESS)
@api.route('/astroprint/private-key', methods=['POST'])
def set_private_key():
email = request.values.get('email')
password = request.values.get('password')
if email and password:
try:
if astroprintCloud().signin(email, password):
return jsonify(SUCCESS)
except (AstroPrintCloudNoConnectionException, ConnectionError):
abort(503, "AstroPrint.com can't be reached")
else:
abort(400)
abort(401)
@api.route('/astroprint/login-key', methods=['GET'])
@restricted_access
def get_login_key():
try:
key = astroprintCloud().get_login_key()
if key:
return jsonify(key)
except (AstroPrintCloudNoConnectionException, ConnectionError):
abort(503, "AstroPrint.com can't be reached")
abort(401)
@api.route('/astroprint/upload-data', methods=['GET'])
@restricted_access
def upload_data():
filePath = request.args.get('file')
if filePath:
uploadInfo = astroprintCloud().get_upload_info(filePath)
if uploadInfo:
if 'error' in uploadInfo:
if uploadInfo['error'] == 'no_user':
abort(401)
else:
abort(500)
else:
return json.dumps(uploadInfo)
else:
abort(500)
abort(400)
@api.route("/astroprint/print-files", methods=["GET"])
@restricted_access
def designs():
forceSyncCloud = request.args.get('forceSyncCloud')
cloud_files = json.loads(astroprintCloud().print_files(forceSyncCloud))
local_files = list(printerManager().fileManager.getAllFileData())
if cloud_files:
for p in cloud_files:
p['local_filename'] = None
p['last_print'] = None
p['uploaded_on'] = None
for i in range(len(local_files)):
if "cloud_id" in local_files[i] and p['id'] == local_files[i]['cloud_id']:
local_file = local_files[i]
p['local_filename'] = local_file['name']
p['local_only'] = False
p['uploaded_on'] = local_file['date']
if 'prints' in local_file \
and 'last' in local_file['prints'] \
and local_file['prints']['last'] \
and 'date' in local_file['prints']['last']:
p['last_print'] = local_file['prints']['last']['date']
del local_files[i]
break
cloud_files = sorted(cloud_files, key=lambda e: e['local_filename'] is None)
else:
cloud_files = []
if local_files:
for p in local_files:
p['id'] = uuid.uuid4().hex
p['local_filename'] = p['name']
p['local_only'] = True
p['last_print'] = None
p['uploaded_on'] = p['date']
if 'gcodeAnalysis' in p:
p['info'] = p['gcodeAnalysis']
del p['gcodeAnalysis']
else:
p['info'] = None
if 'prints' in p \
and 'last' in p['prints'] \
and p['prints']['last'] \
and 'date' in p['prints']['last']:
p['last_print'] = p['prints']['last']['date']
del p['prints']
else:
local_files = []
files = sorted(local_files + cloud_files, key=lambda e: e['last_print'], reverse=True)
return json.dumps(files)
@api.route("/astroprint/print-files/<string:print_file_id>/download", methods=["GET"])
@restricted_access
def design_download(print_file_id):
# ask chintan
# if request.headers.get("X-Api-Key") != settings().get(["api", "key"]):
if current_user is None or not current_user.is_authenticated or not current_user.publicKey:
abort(401)
em = eventManager()
def progressCb(progress):
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "progress",
"id": print_file_id,
"progress": progress
}
)
def successCb(destFile, fileInfo):
if fileInfo is True:
#This means the files was already on the device
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "success",
"id": print_file_id
}
)
else:
if printerManager().fileManager.saveCloudPrintFile(destFile, fileInfo, FileDestinations.LOCAL):
em.fire(
Events.CLOUD_DOWNLOAD, {
"type": "success",
"id": print_file_id,
"filename": printerManager().fileManager._getBasicFilename(destFile),
"info": fileInfo["info"]
}
)
else:
errorCb(destFile, "Couldn't save the file")
def errorCb(destFile, error):
if error == 'cancelled':
em.fire(
Events.CLOUD_DOWNLOAD,
{
"type": "cancelled",
"id": print_file_id
}
)
else:
em.fire(
Events.CLOUD_DOWNLOAD,
{
"type": "error",
"id": print_file_id,
"reason": error
}
)
if destFile and os.path.exists(destFile):
os.remove(destFile)
if astroprintCloud().download_print_file(print_file_id, progressCb, successCb, errorCb):
return jsonify(SUCCESS)
return abort(400)
@api.route("/astroprint/print-files/<string:print_file_id>/download", methods=["DELETE"])
@restricted_access
def cancel_design_download(print_file_id):
if downloadManager().cancelDownload(print_file_id):
return jsonify(SUCCESS)
else:
return abort(404)
@api.route("/astroprint/print-jobs/<string:print_job_id>/add-reason", methods=["PUT"])
@restricted_access
def update_cancel_reason(print_job_id):
if not "application/json" in request.headers["Content-Type"]:
return abort(400)
data = request.json
#get reason
reason = {}
if 'reason' in data:
reason['reason_id'] = data['reason']
if 'other_text' in data:
reason['other_text'] = data['other_text']
if reason:
if not astroprintCloud().updateCancelReason(print_job_id, reason):
return abort(500)
else:
return jsonify(SUCCESS)
else:
return abort(400)
| agpl-3.0 | 6,595,280,205,975,772,000 | 29.848361 | 107 | 0.554271 | false |
bcgov/gwells | app/backend/aquifers/models/__init__.py | 1 | 31074 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import zipfile
import tempfile
import os
import copy
import reversion
from reversion.models import Version
from django.utils import timezone
from django.contrib.gis.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.contrib.contenttypes.fields import GenericRelation
from django.core.validators import MinValueValidator, MaxValueValidator
from django.core.exceptions import ValidationError
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import GEOSGeometry, MultiPolygon
from django.contrib.gis.geos.prototypes.io import wkt_w
from django.contrib.gis import geos
from gwells.models import AuditModel, CodeTableModel, BasicCodeTableModel
from gwells.db_comments.patch_fields import patch_fields
from .vertical_aquifer_extents import *
patch_fields()
class DynamicMaxValueValidator(MaxValueValidator):
"""
MaxValueValidator cannot validate using a fn, so we created this class to allow that
Now we can set a models validators to include a DynamicMaxValueValidator that accepts a fn
(such as) get_current_year is passed into this validator allowing us to validate by current year
rather than the constant value of current year
"""
def __call__(self, value):
cleaned = self.clean(value)
limit_value = self.limit_value() if callable(self.limit_value) else self.limit_value
params = {'limit_value': limit_value, 'show_value': cleaned, 'value': value}
if self.compare(cleaned, limit_value):
raise ValidationError(self.message, code=self.code, params=params)
def get_current_year() -> int:
return timezone.now().year
class WaterRightsPurpose(AuditModel):
"""
Material choices for describing Aquifer Material
"""
code = models.CharField(primary_key=True, max_length=10,
db_column='water_rights_purpose_code')
description = models.CharField(max_length=100)
display_order = models.PositiveIntegerField(default=0)
effective_date = models.DateTimeField(default=timezone.now, null=False)
expiry_date = models.DateTimeField(default=timezone.make_aware(
timezone.datetime.max, timezone.get_default_timezone()), null=False)
class Meta:
db_table = 'water_rights_purpose_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Water Rights Purpose Codes'
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class WaterRightsLicence(AuditModel):
"""
Water rights licences for an aquifer
"""
# Unique in the water rights database we import from.
wrl_sysid = models.IntegerField(
primary_key=True,
verbose_name="Water Rights Licence System ID")
purpose = models.ForeignKey(
WaterRightsPurpose,
db_column='water_rights_purpose_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Water Rights Purpose Reference",
related_name='licences')
# A non-unique licence number, used to calculate allocations along with
# the quantity flag, below.
licence_number = models.BigIntegerField(db_index=True)
# QUANTITY FLAG is the code used to identify how the total quantity is assigned
# across multiple Points of Well Diversion (PWD) for a particular licence and purpose use,
# i.e., T, M, D, or P.
# Only in the case of 'M', the quantity is shared across wells in the licence.
quantity_flag = models.CharField(
max_length=1,
default='T',
choices=(('T', 'T'), ('M', 'M'), ('D', 'D'), ('P', 'P')))
quantity = models.DecimalField(
max_digits=12, decimal_places=3, blank=True, null=True, verbose_name='Quanitity')
effective_date = models.DateTimeField(default=timezone.now, null=False)
class Meta:
verbose_name_plural = 'Aquifer Licences'
def __str__(self):
return '{}'.format(self.licence_number)
class AquiferMaterial(CodeTableModel):
"""
Material choices for describing Aquifer Material
"""
code = models.CharField(
primary_key=True, max_length=10, db_column='aquifer_material_code',
db_comment=('Code for valid options for the broad grouping of geological material found in the'
' aquifer, i.e. SG, S, G, B'))
description = models.CharField(
max_length=100,
db_comment=('Describes the broad grouping of geological material found in the aquifer, i.e.,'
' Sand and Gravel, Sand, Gravel, Bedrock'))
class Meta:
db_table = 'aquifer_material_code'
ordering = ['code']
verbose_name_plural = 'Aquifer Material Codes'
db_table_comment = ('Describes the broad grouping of geological material found in the aquifer, i.e., '
'Sand and Gravel, Sand, Gravel, Bedrock')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferSubtype(CodeTableModel):
"""
Subtypes of Aquifer
"""
code = models.CharField(
primary_key=True, max_length=3, db_column='aquifer_subtype_code',
db_comment=("Categorizes an aquifer based on how it was formed geologically (depositional description). Understanding of how aquifers were formed governs important attributes such as their productivity, vulnerability to contamination as well as proximity and likelihood of hydraulic connection to streams. The code value is a combination of an aquifer type represented by a number and an optional letter representing a more specific aquifer sub-type. There are six major aquifer types, some with multiple subtypes. E.g. aquifer sub-type code 6b is comprised of the aquifer type number (6: Crystalline bedrock aquifers) and subtype letter (b) specifically described as: Fractured crystalline (igneous intrusive or metamorphic, meta-sedimentary, meta-volcanic, volcanic) rock aquifers. Code values range from 1a to 6b."))
description = models.CharField(
max_length=100,
db_comment=('Descriptions that define how the aquifer was formed geologically'
' (depositional description). E.g. Unconfined sand and gravel - large river system,'
' Confined sand and gravel - glacial, Flat-lying to gently-dipping volcanic bedrock.'))
class Meta:
db_table = 'aquifer_subtype_code'
db_table_comment = ('Categorizes an aquifer based on how it was formed geologically (depositional'
' description). Understanding of how aquifers were formed governs important'
' attributes such as their productivity, vulnerability to contamination as well as'
' proximity and likelihood of hydraulic connection to streams. The code value is a'
' combination of an aquifer type represented by a number and an optional letter'
' representing a more specific aquifer sub-type. E.g. Crystalline bedrock aquifers)'
' and subtype letter, Fractured crystalline (igneous intrusive or metamorphic,'
' meta-sedimentary, meta-volcanic, volcanic) rock aquifers. Code values range from'
' 1a to 6b.')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferProductivity(CodeTableModel):
"""
Productivity choices for describing Aquifer
-------------------
"""
code = models.CharField(
primary_key=True, max_length=1, db_column='aquifer_productivity_code',
db_comment=('Valid code for the aquifer\'s productivity, which represent an aquifers ability to'
' transmit and yield groundwater; i.e., L, M, H'))
description = models.CharField(
max_length=100,
db_comment=('Describes the aquifer\'s productivity which represent an aquifers ability to'
' transmit and yield groundwater; i.e., Low, Moderate, High'))
class Meta:
db_table = 'aquifer_productivity_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Productivity Codes'
db_table_comment = ('Describes the aquifer\'s productivity which represent an aquifers ability to '
'transmit and yield groundwater; i.e., Low, Moderate, High')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferDemand(CodeTableModel):
"""
Demand choices for describing Aquifer
Note on db_comments: db_comment properties on model columns are
overriden by the db_column_supplemental_comments provided below.
db_column_supplemental_comments provides an easier way for the DA to add/update
comments in bulk.
"""
code = models.CharField(
primary_key=True, max_length=1, db_column='aquifer_demand_code',
db_comment=('Describes the level of groundwater use at the time aquifer was mapped; i.e., High,'
' Moderate, Low.'))
description = models.CharField(
max_length=100,
db_comment=('Describes the level of groundwater use at the time aquifer was mapped; i.e., High,'
' Moderate, Low.'))
class Meta:
db_table = 'aquifer_demand_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Demand Codes'
db_table_comment = ('Describes the level of groundwater use at the time aquifer was mapped; i.e., High, '
'Moderate, Low.')
db_column_supplemental_comments = {
}
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class WaterUse(CodeTableModel):
"""
Type of Known Water Use choices for describing Aquifer
-------------------
"""
code = models.CharField(
primary_key=True, max_length=2, db_column='water_use_code',
db_comment=('Standard terms that define the type of known water use of an aquifer at the time of'
' mapping. It indicates the variability or diversity of uses of the aquifer water as'
' a supply source. I.e. Domestic, Multiple, Potential Domestic'))
description = models.CharField(
max_length=100,
db_comment=('Description of the standard terms that define the type of known water use of an'
' aquifer at the time of mapping. It indicates the variability or diversity of uses'
' of the aquifer water as a supply source. I.e. Domestic, Multiple, Potential'
' Domestic'))
class Meta:
db_table = 'water_use_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Water Use Codes'
db_table_comment = ('Describes the type of known water use of an aquifer at the time of mapping. It'
' indicates the variability or diversity of uses of the aquifer water as a supply'
' source. I.e. Domestic, Multiple, Potential Domestic')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class QualityConcern(CodeTableModel):
code = models.CharField(
primary_key=True, max_length=2, db_column='quality_concern_code',
db_comment=('Standard terms used to represent the extent of documented concerns of contaminants'
' in the aquifer at the time of mapping. i.e. isloated, local, regional, none.'))
description = models.CharField(
max_length=100,
db_comment=('Description of the standard terms used to represent the extent of documented'
' concerns of contaminants in the aquifer at the time of mapping. i.e. isloated,'
' local, regional, none.'))
class Meta:
db_table = 'quality_concern_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Quality Concern Codes'
db_table_comment = ('Extent of documented concerns of contaminants in the aquifer at the time of'
' mapping. i.e. isloated, local, regional, none.')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferVulnerabilityCode(CodeTableModel):
"""
Demand choices for describing Aquifer
"""
code = models.CharField(
primary_key=True, max_length=1, db_column='aquifer_vulnerability_code',
db_comment=('Code for the aquifer’s relative intrinsic vulnerability to impacts from human'
' activities on the land surface. Vulnerability is based on: the type, thickness,'
' and extent of geologic materials above the aquifer, depth to water table (or to'
' top of confined aquifer), and type of aquifer materials, i.e., L, M, H.'))
description = models.CharField(
max_length=100,
db_comment=('Describes an aquifer’s relative intrinsic vulnerability to impacts from human'
' activities on the land surface. Vulnerability is based on: the type, thickness,'
' and extent of geologic materials above the aquifer, depth to water table (or to'
' top of confined aquifer), and type of aquifer materials, i.e., Low, Moderate, High.'))
class Meta:
db_table = 'aquifer_vulnerability_code'
ordering = ['display_order', 'code']
verbose_name_plural = 'Aquifer Vulnerability Codes'
db_table_comment = ('Describes an aquifer’s relative intrinsic vulnerability to impacts from human '
'activities on the land surface. Vulnerability is based on: the type, thickness, '
'and extent of geologic materials above the aquifer, depth to water table (or to '
'top of confined aquifer), and type of aquifer materials, i.e., Low, Moderate, High.')
def __str__(self):
return '{} - {}'.format(self.code, self.description)
@reversion.register()
class Aquifer(AuditModel):
"""
An underground layer of water-bearing permeable rock, rock fractures or unconsolidated materials
(gravel, sand, or silt), from which groundwater is extracted using a water well.
This table holds ONLY the aquifers to which we have associated one or more wells. It is not
the definitive source of all aquifers in the province.
Note on db_comments: db_comment properties on model columns are
overriden by the db_column_supplemental_comments provided below.
db_column_supplemental_comments provides an easier way for the DA to add/update
comments in bulk.
"""
aquifer_id = models.AutoField(
primary_key=True, verbose_name="Aquifer ID Number",
db_comment=('System generated unique sequential number assigned to each mapped aquifer. The'
' aquifer_id identifies which aquifer a well is in. An aquifer can have multiple'
' wells, while a single well can only be in one aquifer.'))
aquifer_name = models.CharField(
max_length=100, blank=True, null=True,
db_comment=('Name assigned for a specific aquifer. Typically derived from geographic names or names '
'in common use, but may also be lithologic or litho-stratigraphic units, e.g. '
'Abbotsford-Sumas, McDougall Creek Deltaic.'))
location_description = models.CharField(
max_length=100, blank=True, null=True, verbose_name='Description of Location',
db_comment=('Brief description of the geographic location of the aquifer. The description is usually '
'referenced to a nearby major natural geographic area or community, e.g., Grand Forks.'))
material = models.ForeignKey(
AquiferMaterial,
db_column='aquifer_material_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Material Reference",
related_name='aquifers',
db_comment=('Code for valid options for the broad grouping of geological material found in the'
' aquifer, i.e. SG, S, G, B'))
subtype = models.ForeignKey(
AquiferSubtype,
db_column='aquifer_subtype_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Subtype Reference",
related_name='aquifers',
db_comment=('Categorizes an aquifer based on how it was formed geologically (depositional'
' description). Understanding of how aquifers were formed governs important'
' attributes such as their productivity, vulnerability to contamination as well as'
' proximity and likelihood of hydraulic connection to streams. The code value is a'
' combination of an aquifer type represented by a number and an optional letter'
' representing a more specific aquifer sub-type. E.g. 1a, 2, 6a.'))
area = models.DecimalField(
max_digits=5, decimal_places=1, blank=True, null=True, verbose_name='Size (square km)',
db_comment='Approximate size of the aquifer in square kilometers.')
vulnerability = models.ForeignKey(
AquiferVulnerabilityCode,
# TODO: Spelling mistake below!
db_column='aquifer_vulnerablity_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Aquifer Vulnerabiliy",
db_comment=('Standard terms used to define an aquifer’s relative intrinsic vulnerability to'
' impacts from human activities on the land surface. Vulnerability is based on: the'
' type, thickness, and extent of geologic materials above the aquifer, depth to'
' water table (or to top of confined aquifer), and type of aquifer materials, i.e.,'
' Low, Moderate, High.'))
productivity = models.ForeignKey(
AquiferProductivity,
db_column='aquifer_productivity_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Productivity Reference",
related_name='aquifers',
db_comment=('Valid code for the aquifer\'s productivity, which represent an aquifers ability to'
' transmit and yield groundwater; i.e., L, M, H'))
demand = models.ForeignKey(
AquiferDemand,
db_column='aquifer_demand_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Demand Reference",
related_name='aquifers',
db_comment=('Describes the level of groundwater use at the time aquifer was mapped; i.e., High,'
' Moderate, Low.'))
known_water_use = models.ForeignKey(
WaterUse,
db_column='water_use_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Known Water Use Reference",
related_name='aquifers',
db_comment=('Standard terms that define the type of known water use of an aquifer at the time of'
' mapping. It indicates the variability or diversity of uses of the aquifer water as'
' a supply source. I.e. Domestic, Multiple, Potential Domestic'))
quality_concern = models.ForeignKey(
QualityConcern,
db_column='quality_concern_code',
blank=True,
null=True,
on_delete=models.PROTECT,
verbose_name="Quality Concern Reference",
related_name='aquifers',
db_comment=('Standard terms used to represent the extent of documented concerns of contaminants'
' in the aquifer at the time of mapping. i.e. isloated, local, regional, none.'))
litho_stratographic_unit = models.CharField(
max_length=100, blank=True, null=True, verbose_name='Lithographic Stratographic Unit',
db_comment=('Permeable geologic unit (where available) that comprises the aquifer. It is typically '
'either; the era of deposition, the name of a specific formation and/or the broad '
'material types, e.g., Paleozoic to Mesozoic Era, Cache Creek Complex, Intrusive Rock.'))
mapping_year = models.PositiveIntegerField(
validators=[
MinValueValidator(1990),
DynamicMaxValueValidator(get_current_year())],
blank=True,
null=True,
verbose_name="Date of Mapping",
help_text="Use the following format: <YYYY>",
db_comment='The year the aquifer was initially mapped or last updated.')
notes = models.TextField(
max_length=2000,
blank=True,
null=True,
verbose_name='Notes on Aquifer, for internal use only.',
db_comment=('Details about the mapped aquifer that the province deems important to maintain such as'
' local knowledge about the aquifer or decisions for changes related to attributes of'
' the mapped aquifer.'))
effective_date = models.DateTimeField(
default=timezone.now, null=False,
db_comment='The date and time that the aquifer became published.')
expiry_date = models.DateTimeField(
default=timezone.make_aware(timezone.datetime.max, timezone.get_default_timezone()), null=False,
db_comment='The date and time after which the aquifer became unpublished.')
retire_date = models.DateTimeField(
default=timezone.make_aware(timezone.datetime.max, timezone.get_default_timezone()), null=False,
db_comment='The date and time after which the aquifer is considered to be retired')
geom = models.MultiPolygonField(srid=3005, null=True)
# This version is pre-rendered in WGS 84 for display on web-maps.
# Only used by the v1 API
geom_simplified = models.MultiPolygonField(srid=4326, null=True)
history = GenericRelation(Version)
@property
def status_retired(self):
return timezone.now() > self.retire_date
@property
def status_draft(self):
return timezone.now() < self.effective_date
@property
def status_published(self):
now = timezone.now()
return now >= self.effective_date and now < self.expiry_date
@property
def status_unpublished(self):
now = timezone.now()
return now >= self.expiry_date
def load_shapefile(self, f):
"""
Given a shapefile with a single feature, update spatial fields of the aquifer.
You must still call aquifer.save() afterwards.
"""
try:
zip_ref = zipfile.ZipFile(f)
except zipfile.BadZipFile as e:
raise Aquifer.BadShapefileException(str(e))
ret = zip_ref.testzip()
if ret is not None:
raise Aquifer.BadShapefileException("Bad zipfile, info: %s" % ret)
the_shapefile = None
output_dir = tempfile.mkdtemp()
for item in zip_ref.namelist():
# Check filename endswith shp
zip_ref.extract(item, output_dir)
if item.endswith('.shp'):
# Extract a single file from zip
the_shapefile = os.path.join(output_dir, item)
# break
zip_ref.close()
if the_shapefile is None:
raise Aquifer.BadShapefileException("Bad zipfile. No shapefile found.")
ds = DataSource(the_shapefile)
self.update_geom_from_feature(ds[0][0])
def update_geom_from_feature(self, feat):
"""
Given a spatial feature with Geometry, update spatial fields of the aquifer.
You must still call aquifer.save() afterwards.
"""
geom = feat.geom
if not geom.srid:
raise Aquifer.BadShapefileException("Shapefile contains no projection information")
# Make a GEOSGeometry object using the string representation.
# Eliminate any 3d geometry so it fits in PostGIS' 2d geometry schema.
wkt = wkt_w(dim=2).write(GEOSGeometry(geom.wkt, srid=geom.srid)).decode()
geos_geom = GEOSGeometry(wkt, srid=geom.srid)
geos_geom.transform(3005)
# Convert plain Polygons to MultiPolygons,
if isinstance(geos_geom, geos.MultiPolygon):
geos_geom_out = geos_geom
elif isinstance(geos_geom, geos.Polygon):
geos_geom_out = MultiPolygon(geos_geom)
else:
raise Aquifer.BadShapefileException("Bad geometry type: {}, skipping.".format(geos_geom.__class__))
self.geom = geos_geom_out
class Meta:
db_table = 'aquifer'
ordering = ['aquifer_id']
verbose_name_plural = 'Aquifers'
db_table_comment = ('A geological formation, a group of geological formations, or a part of one or more '
'geological formations that is groundwater bearing and capable of storing, '
'transmitting and yielding groundwater.')
class BadShapefileException(Exception):
pass
def __str__(self):
return '{} - {}'.format(self.aquifer_id, self.aquifer_name)
db_column_supplemental_comments = {
"aquifer_demand_code": "Describes the level of groundwater use at the time the aquifer was mapped; i.e., High, Moderate, Low.",
"aquifer_id": "System generated sequential number assigned to each aquifer. It is widely used by groundwater staff as it is the only consistent unique identifier for a mapped aquifer. It is also commonly referred to as Aquifer Number.",
"aquifer_material_code": "Describes the broad grouping of geological material found in the aquifer, i.e., Sand and Gravel, Sand, Gravel, Bedrock",
"aquifer_productivity_code": "Describes the aquifer's productivity which represent an aquifers ability to transmit and yield groundwater; i.e., Low, Moderate, High",
"aquifer_subtype_code": "Categorizes an aquifer based on how it was formed geologically (depositional description). Understanding of how aquifers were formed governs important attributes such as their productivity, vulnerability to contamination as well as proximity and likelihood of hydraulic connection to streams. The code value is a combination of an aquifer type represented by a number and an optional letter representing a more specific aquifer sub-type. There are six major aquifer types, some with multiple subtypes. E.g. aquifer sub-type code 6b is comprised of the aquifer type number (6: Crystalline bedrock aquifers) and subtype letter (b) specifically described as: Fractured crystalline (igneous intrusive or metamorphic, meta-sedimentary, meta-volcanic, volcanic) rock aquifers. Code values range from 1a to 6b.",
"aquifer_vulnerablity_code": "Describes an aquifer’s relative intrinsic vulnerability to impacts from human activities on the land surface. Vulnerability is based on: the type, thickness, and extent of geologic materials above the aquifer, depth to water table (or to top of confined aquifer), and type of aquifer materials, i.e., Low, Moderate, High.",
"quality_concern_code": "Extent of documented concerns of contaminants in the aquifer at the time of mapping. i.e. isloated, local, regional, none.",
"water_use_code": "Describes the type of known water use of an aquifer at the time of mapping. It indicates the variability or diversity of uses of the aquifer water as a supply source. I.e. Domestic, Multiple, Potential Domestic",
}
@receiver(pre_save, sender=Aquifer)
def update_geom_simplified(sender, instance, **kwargs):
geos_geom_simplified = None
if instance.geom:
simplified_polygons = []
for poly in instance.geom:
geos_geom_simplified = poly.simplify(40, preserve_topology=True)
geos_geom_simplified.transform(4326)
simplified_polygons.append(geos_geom_simplified)
geos_geom_simplified = MultiPolygon(simplified_polygons)
instance.geom_simplified = geos_geom_simplified
@receiver(pre_save, sender=Aquifer)
def update_area(sender, instance, **kwargs):
area = None
if instance.geom:
area = instance.geom.area / 1_000_000 # convert to km²
instance.area = area
class AquiferResourceSection(BasicCodeTableModel):
"""
Defines the available sections (categories) of aquifer resources.
"""
code = models.CharField(primary_key=True, max_length=1,
db_column='aquifer_resource_section_code')
name = models.CharField(max_length=100)
description = models.CharField(max_length=100, default="")
class Meta:
ordering = ['name']
verbose_name_plural = 'Aquifer Resource Sections'
db_table = 'aquifer_resource_section_code'
def __str__(self):
return '{} - {}'.format(self.code, self.description)
class AquiferResource(AuditModel):
"""
A PDF document associated with a given aquifer.
"""
id = models.AutoField(
primary_key=True,
verbose_name="Aquifer Resource Identifier",
db_column='aquifer_resource_id')
aquifer = models.ForeignKey(
Aquifer,
related_name='resources',
on_delete=models.CASCADE,
db_comment=('System generated sequential number assigned to each aquifer. It is widely used by groundwater staff as it is the only consistent unique identifier for a mapped aquifer. It is also commonly referred to as Aquifer Number.'))
section = models.ForeignKey(
AquiferResourceSection,
db_column='aquifer_resource_section_code',
verbose_name="Aquifer Resource Section",
on_delete=models.PROTECT,
help_text="The section (category) of this resource.")
name = models.CharField(
max_length=255,
verbose_name="Aquifer Resource Name",
help_text="",
db_comment=('Descriptive name of the document at the URL that contains the internal or external information about the aquifer.')
)
url = models.URLField(
verbose_name="PDF Document URL",
max_length=255,
help_text="A resolvable link to the PDF document associated with this aquifer resource.",
db_comment=('The web address where the internal or external information about the aquifer can be found.'))
class Meta:
ordering = ['name']
verbose_name_plural = 'Aquifer Resource'
def __str__(self):
return self.name
| apache-2.0 | 2,642,793,636,649,225,000 | 46.496942 | 841 | 0.661655 | false |
wfxiang08/Nuitka | misc/make-pypi-upload.py | 1 | 2245 | #!/usr/bin/env python
# Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Make PyPI upload of Nuitka, and check success of it. """
from __future__ import print_function
import os
import subprocess
import time
import xmlrpclib
nuitka_version = subprocess.check_output(
"./bin/nuitka --version", shell = True
).strip()
branch_name = subprocess.check_output(
"git name-rev --name-only HEAD".split()
).strip()
assert branch_name == "master", branch_name
assert "pre" not in nuitka_version
# Need to remove the contents from the Rest, or else PyPI will not render
# it. Stupid but true.
contents = open("README.rst", "rb").read()
contents = contents.replace(b".. contents::", b"")
open("README.rst", "wb").write(contents)
contents = open("README.rst", "rb").read()
assert b".. contents" not in contents
assert 0 == os.system("misc/make-doc.py")
assert 0 == os.system("python setup.py sdist upload")
# A delay might be necessary before making the check.
for i in range(60):
# Wait some time for PyPI to catch up with us. Without delay
# the old version will still appear. Since this is running
# in a Buildbot, we need not be optimal.
time.sleep(5*60)
pypi = xmlrpclib.ServerProxy("http://pypi.python.org/pypi")
pypi_versions = pypi.package_releases("Nuitka")
assert len(pypi_versions) == 1, pypi_versions
if nuitka_version == pypi_versions[0]:
break
print("Version check failed:", nuitka_version, pypi_versions)
print("Uploaded OK:", pypi_versions[0])
| apache-2.0 | -1,278,441,740,454,644,000 | 32.014706 | 78 | 0.700223 | false |
Parallels/githooks | test.py | 1 | 19045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab
#
'''
Unit tests for githooks
How it works:
* Create a workspace tmp/ in cwd, set up a dummy STASH_HOME,
a remote repo and a local repo there.
* Replace temp/remote_repo.git/hooks/update in the remote repo with
hook_fixture.py. The hook_fixture.py script doesn’t do anything
but dumps the arguments it is called with to a file (branch and
2 hashes, old and new).
* Each unit test in test.py modifies the local repo somehow, commits
the changes and then runs `git push` asynchronously. `git push`
invokes the update hook (hook_fixture.py) in tmp/remote_repo.git.
The hook script dumps its arguments to a file tmp/request.json.
* The unit test (test.py) waits until tmp/request.json is written,
reads it in and removes the file. Then, it instantiates the Hook
object from the hook module it tests, and performs various testing
using the data from tmp/request.json.
* When the testing is done, the unit test writes a response file
tmp/response.json for the update script (the update script waits
until it is able to read this file). The response file contains
the testing exit code. The update script reads in the file, removes
it and returns the exit code to git (asynchronously called from the
unit test in test.py).
'''
import unittest
import subprocess
import shutil
import os
import multiprocessing
import json
import sys
import logging
from time import sleep
import githooks
def git(cmd, repo=None):
if repo:
return subprocess.check_output(['git', '-C', repo] + cmd,
stderr=subprocess.STDOUT)
else:
return subprocess.check_output(['git'] + cmd,
stderr=subprocess.STDOUT)
def git_async(cmd, repo=None):
def call_git(cmd, repo=None, result=None):
try:
result.put([0, git(cmd, repo)])
except subprocess.CalledProcessError, e:
result.put([e.returncode, e.output])
result = multiprocessing.Queue()
proc = multiprocessing.Process(target=call_git, args=(cmd, repo, result))
proc.start()
return [proc, result]
def git_async_result(git_call):
git_call[0].join()
result = git_call[1].get()
if result[0] == 0:
return result[1]
else:
raise subprocess.CalledProcessError(result[0], 'git', result[1])
def write_string(filename, string):
with open(filename, 'w+') as f:
f.write(string)
class TestBase(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
self.base = os.path.join(self.cwd, 'tmp')
self.cleanUp()
os.mkdir(self.base)
self.remote_repo = os.path.join(self.base, 'remote_repo.git')
self.repo = os.path.join(self.base, 'repo')
# Create tmp/test.conf
with open(os.path.join(self.base, 'test.conf'), 'w') as f:
f.write(json.dumps({"line_endings":[],
"notify":[],
"email_mention":[]},
indent=4))
gh = githooks.Githooks(conf_file='test.conf', ini_file='testhooks.ini',
repo_dir=self.remote_repo)
self.hooks = dict(zip(gh.conf.keys(), gh.hooks))
# Set up repositories
self.__setup_remote_repo()
self.__setup_local_repo()
self.__add_remote_repo()
self.hook_request = os.path.join(self.base, 'request.json')
self.hook_response = os.path.join(self.base, 'response.json')
os.chdir(self.repo)
def cleanUp(self):
base = self.base
if os.path.isdir(base):
shutil.rmtree(base)
def __setup_remote_repo(self):
git(['init', '--bare', self.remote_repo])
shutil.copy(os.path.join(self.cwd, 'hook_fixture.py'),
os.path.join(self.remote_repo, 'hooks', 'update'))
def __setup_local_repo(self):
git(['init', self.repo])
git(['config', 'push.default', 'simple'], self.repo)
def __add_remote_repo(self):
git(['remote', 'add', 'origin', self.remote_repo], self.repo)
def get_request(self):
request = self.hook_request
attempts = 0
while 1:
if not os.path.exists(request):
attempts = attempts + 1
sleep(0.1)
else:
break
if attempts >= 200:
raise RuntimeError('Timeout exceeded')
with open(request) as f:
data = f.read()
os.remove(request)
return json.loads(data)
def write_response(self, code, data):
with open(self.hook_response, 'w+') as f:
f.write(json.dumps([code, data]))
def tearDown(self):
os.chdir(self.cwd)
#self.cleanUp()
class TestBasicHooks(TestBase):
def test_successful_hook(self):
write_string('foo.txt', 'data')
git(['add', 'foo.txt'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
def test_failed_hook(self):
write_string('foo.txt', 'otherdata')
git(['add', 'foo.txt'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
self.get_request()
self.write_response(1, 'hook_failed')
with self.assertRaises(subprocess.CalledProcessError) as cm:
git_async_result(git_call)
self.assertRegexpMatches(cm.exception.output, ".*hook_failed.*")
class TestLineEndings(TestBase):
def test_get_attr(self):
write_string('a.txt', 'data')
write_string('b.txt', 'data')
write_string('c.txt', 'data')
write_string('.gitattributes', 'a.txt binary\nb.txt text')
git(['add', 'a.txt', 'b.txt', 'c.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
import hookutil
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'a.txt', 'binary'),
'set')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'a.txt', 'text'),
'unset')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'b.txt', 'binary'),
'unspecified')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'b.txt', 'text'),
'set')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'c.txt', 'binary'),
'unspecified')
self.assertEquals(hookutil.get_attr(self.repo, request[2], 'c.txt', 'text'),
'unspecified')
self.write_response(0, 'success')
git_async_result(git_call)
def test_successful_hook(self):
write_string('a.txt', 'data\n')
write_string('.gitattributes', 'a.txt text')
git(['add', 'a.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["line_endings"]
self.assertTrue(hook.check(request[0], request[1], request[2])[0])
self.write_response(0, 'success')
git_async_result(git_call)
def test_failed_hook(self):
git(['config', 'core.autocrlf', 'false'])
write_string('a.txt', 'data\r\n\n')
write_string('b.txt', 'data\r\n\n')
write_string('c.txt', 'data\r\n\n')
# git will normalize eols when attr 'text' is set
write_string('.gitattributes', 'a.txt text')
git(['add', 'a.txt', 'b.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git(['add', 'c.txt'])
git(['commit', '-m', 'second commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["line_endings"]
permit, messages = hook.check(request[0], request[1], request[2])
self.assertFalse(permit)
self.assertTrue(len(messages) == 2)
self.assertTrue([message['text'] for message in messages] == [
"Error: file 'c.txt' has mixed line endings (CRLF/LF)",
"Error: file 'b.txt' has mixed line endings (CRLF/LF)"
])
self.write_response(0, 'success')
git_async_result(git_call)
class TestNotify(TestBase):
def test_compose_mail(self):
write_string('a.txt', 'data')
write_string('b.txt', 'data')
write_string('.gitattributes', 'a.txt [email protected],[email protected]\nb.txt [email protected]')
git(['add', 'a.txt', 'b.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["notify"]
owners = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue('[email protected]' in owners)
text = owners['[email protected]']
self.assertTrue('<b>Branch:</b> master' in text)
self.assertTrue('Commit: %s' % request[2] in text)
self.assertTrue('A a.txt' in text)
self.assertTrue('[email protected]' in owners)
text = owners['[email protected]']
self.assertTrue('<b>Branch:</b> master' in text)
self.assertTrue('Commit: %s' % request[2] in text)
self.assertTrue('A a.txt' in text)
self.assertTrue('A b.txt' in text)
self.write_response(0, 'success')
git_async_result(git_call)
def test_merge_commit(self):
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
git(['checkout', '-b', 'hotfix'])
write_string('a.txt', 'newdata')
write_string('.gitattributes', 'b.txt [email protected]')
git(['add', 'a.txt', '.gitattributes'])
git(['commit', '-m', 'hotfix'])
git(['checkout', 'master'])
git(['checkout', '-b', 'feature'])
write_string('b.txt', 'reallynewdata')
git(['add', 'b.txt'])
git(['commit', '-m', 'feature'])
git(['checkout', 'master'])
git(['merge', 'hotfix'])
git(['merge', 'feature'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["notify"]
owners = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue('[email protected]' in owners)
text = owners['[email protected]']
self.assertTrue("Merge branch 'feature'\n\n\tA b.txt" in text)
self.assertTrue("feature\n\n\tA b.txt" in text)
self.assertFalse("hotfix\n\n\tM a.txt" in text)
self.write_response(0, 'success')
git_async_result(git_call)
def test_successful_hook(self):
write_string('a.txt', 'data')
write_string('.gitattributes', '*.txt owners=somebody,andmore')
git(['add', 'a.txt', '.gitattributes'])
git(['commit', '-m', 'initial commit'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
git(['checkout', '-b', 'feature/test'])
write_string('a.txt', 'newdata')
git(['add', 'a.txt'])
git(['commit', '-m', 'update a.txt'])
write_string('c.txt', 'data')
write_string('a.txt', 'againnewdata')
git(['add', 'c.txt', 'a.txt'])
git(['commit', '-m', 'create c.txt, update a.txt'])
git_call = git_async(['push', '-u', 'origin', 'feature/test'], self.repo)
self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
git(['checkout', 'master'])
write_string('b.txt', 'data')
git(['add', 'b.txt'])
git(['commit', '-m', 'create b.txt'])
git(['merge', 'feature/test'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["notify"]
hook.settings = [
"refs/heads/master"
]
owners = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue('somebody' in owners)
text = owners['somebody']
self.assertTrue('andmore' in owners)
self.assertTrue(text == owners['andmore'])
self.assertTrue("Merge branch 'feature/test'\n\n\tM a.txt\n\tA c.txt" in text)
self.assertTrue("create b.txt\n\n\tA b.txt" in text)
self.assertTrue("create c.txt, update a.txt\n\n\tM a.txt\n\tA c.txt" in text)
self.assertTrue("update a.txt\n\n\tM a.txt" in text)
self.write_response(0, 'success')
git_async_result(git_call)
def test_successful_hook_send(self):
hook = self.hooks["notify"]
assert hook.params['smtp_from'], 'please configure smtp_from to run this test'
write_string('a.txt', 'data')
write_string('b.txt', 'data')
git(['add', 'a.txt', 'b.txt'])
git(['commit', '-m', 'initial commit'])
sleep(1)
git_call = git_async(['push', '-u', 'origin', 'master:another'], self.repo)
self.get_request()
self.write_response(0, 'success')
git_async_result(git_call)
write_string('b.txt', 'dat')
write_string('.gitattributes', '*.txt owners=%s' % hook.params['smtp_from'])
git(['add', 'b.txt', '.gitattributes'])
git(['commit', '-m', 'second commit'])
sleep(1)
write_string('a.txt', 'dat')
git(['add', 'a.txt'])
# Test long commit message trimming
mes = ' length over one hundred symbols'
git(['commit', '-m', 'third commit' + mes + mes + mes])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook.settings = [
"refs/heads/master"
]
hook.check(request[0], request[1], request[2])
self.write_response(0, 'success')
git_async_result(git_call)
class TestEmailMention(TestBase):
'''
Test email_mention hook.
'''
def test_compose_mail_simple(self):
'''
Test simpliest commit message:
Some feature.
@somebody
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature.\n@somebody'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 1)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_dot_end(self):
'''
Test dot in the end of username:
Some feature.
CC @somebody.
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature.\nCC @somebody.'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 1)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_mention_at_begin(self):
'''
Test in the beginning of commit message:
@somebody check it
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', '@somebody check it'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 1)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_many_mentions(self):
'''
Test a list of usernames:
Some feature @somebody,@andmore
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature @somebody,@andmore'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 2)
self.assertTrue('[email protected]' in mails)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_many_mentions_and_commits(self):
'''
Test composing mails across several commits.
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature @somebody'])
write_string('a.txt', 'newdata')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature @somebody @andmore.'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 2)
self.assertTrue('[email protected]' in mails)
self.assertTrue('[email protected]' in mails)
self.write_response(0, 'success')
git_async_result(git_call)
def test_compose_mail_mention_email(self):
'''
Test do not parse email addresses.
'''
write_string('a.txt', 'data')
git(['add', 'a.txt'])
git(['commit', '-m', 'Some feature [email protected]'])
git_call = git_async(['push', '-u', 'origin', 'master'], self.repo)
request = self.get_request()
hook = self.hooks["email_mention"]
mails = hook.compose_mail(request[0], request[1], request[2])
self.assertTrue(len(mails) == 0)
self.write_response(0, 'success')
git_async_result(git_call)
if __name__ == '__main__':
unittest.main()
| mit | -1,477,752,506,485,911,800 | 32.350263 | 123 | 0.564722 | false |
jackyyf/paste.py | src/paste.py | 1 | 2800 | _version = '0.0.1'
import sys
import os
import argparse
from lib import logger, config, uri
from lib.provider import ProviderBase, getProvider
# Change default encoding to UTF-8
reload(sys)
sys.setdefaultencoding('UTF-8')
del sys.setdefaultencoding
sys.path = [os.path.abspath('.')] + sys.path
class _Smart_formatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
# this is the RawTextHelpFormatter._split_lines
if '\n' in text:
return text.splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def run():
parser = argparse.ArgumentParser(prog='paste.py', description='Push to or pull from paste pads!',
conflict_handler='resolve', add_help=False,
formatter_class=_Smart_formatter)
opt_common = parser.add_argument_group('Common Options')
opt_common.add_argument('-h', '--help', action='help',
help='Print this help message and exit.\n'
'Use `paste.py provider -h` for specific information.')
opt_common.add_argument('-V', '--version', action='version', version='%(prog)s ' + _version)
opt_log = parser.add_argument_group('Logging Options')
opt_log.add_argument('--verbose', '-v', action='store_const', dest='log.level', const=logger.Level.INFO,
default=logger.Level.WARN, help='Enable verbose output.')
opt_log.add_argument('--debug', '-g', action='store_const', dest='log.level', const=logger.Level.DEBUG,
help='Enable debug output. (VERY VERBOSE!)')
opt_log.add_argument('--quiet', '-q', action='store_const', dest='log.level', const=logger.Level.ERROR,
help='Just be quiet, output only error message.')
opt_log.add_argument('--simple-log', action='store_const', dest='log.format', const='{message}',
default=None, help='Output just simple message without timestamp, log level etc.')
opt_log.add_argument('--no-color', action='store_const', dest='log.colorize', const=False,
default=True, help='Disable colorful output. Note: colorful is always false if output file is not a terminal.')
opt_action = parser.add_subparsers(title='Paste pads', help='introduction', metavar='provider', dest='provider')
__import__('providers', globals(), locals())
for provider in ProviderBase.__subclasses__():
ins = provider()
opt_ins = opt_action.add_parser(ins._name, help=ins._info, conflict_handler='resolve')
ins.add_args(opt_ins)
args = parser.parse_args()
conf = config.getConfig()
for arg in args._get_kwargs():
conf.set(arg[0], arg[1])
logger.init(colorize=conf.getboolean('log.colorize'), level=conf.getint('log.level'), log_format=conf.get('log.format'))
getProvider(conf.get('provider')).run()
if __name__ == '__main__':
run()
| mit | 9,107,660,406,084,468,000 | 46.457627 | 121 | 0.666786 | false |
apple/llvm-project | lldb/test/API/commands/expression/import-std-module/vector-of-vectors/TestVectorOfVectorsFromStdModule.py | 5 | 2035 | """
Test std::vector functionality when it's contents are vectors.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestVectorOfVectors(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.",
lldb.SBFileSpec("main.cpp"))
vector_type = "std::vector<int>"
vector_of_vector_type = "std::vector<" + vector_type + " >"
size_type = vector_of_vector_type + "::size_type"
value_type = "std::__vector_base<int, std::allocator<int> >::value_type"
self.runCmd("settings set target.import-std-module true")
self.expect_expr(
"a",
result_type=vector_of_vector_type,
result_children=[
ValueCheck(type="std::vector<int>",
children=[
ValueCheck(value='1'),
ValueCheck(value='2'),
ValueCheck(value='3'),
]),
ValueCheck(type="std::vector<int>",
children=[
ValueCheck(value='3'),
ValueCheck(value='2'),
ValueCheck(value='1'),
]),
])
self.expect_expr("a.size()", result_type=size_type, result_value="2")
self.expect_expr("a.front().front()",
result_type=value_type,
result_value="1")
self.expect_expr("a[1][1]", result_type=value_type, result_value="2")
self.expect_expr("a.back().at(0)",
result_type=value_type,
result_value="3")
| apache-2.0 | -44,866,906,489,282,680 | 36.685185 | 80 | 0.474693 | false |
AnhellO/DAS_Sistemas | Ene-Jun-2021/guerrero-lopez-cristian-edgardo/PARCIAL 1 PRACTICA 1/Practica 1.py | 1 | 1520 | class Automovil:
def __init__(self, Vel, Kilometraje_Marcado, Cupo_Max):
self.Vel = Vel
self.Kilometraje_Marcado = Kilometraje_Marcado
self.Cupo_Max = Cupo_Max
def Costo(self, Cupo_Max):
Costo = self.Cupo_Max*100
return Costo
def Precio(self, Cupo_Max):
Precio = self.Cupo_Max*100
Precio1 = (Precio*.1) + Precio
return Precio1
random=(50)
def __str__(self):
return f"AUTOMOVIL CON VELOCIDAD MAXIMA DE: {self.Vel}\n Kilometraje_Marcado MARCADO: {self.Kilometraje_Marcado}\n Cupo_Max TOTAL: {self.Cupo_Max}\n "
class Camion(Automovil):
def __init__(self, Vel, Kilometraje_Marcado, Cupo_Max):
Automovil.__init__(self, Vel, Kilometraje_Marcado, Cupo_Max)
def __str__(self):
return f"CAMION CON VELOCIDAD MAXIMA DE: {self.Vel}\n Kilometraje_Marcado MARCADO: {self.Kilometraje_Marcado}\n Cupo_Max TOTAL: {self.Cupo_Max}\n"
if __name__ == "__main__":
Camion1=Camion(300,100000,45)
Auto1=Automovil(150,3251212,4)
Camion2=Camion(400,60000,50)
Auto2=Automovil(100,5160,8)
Lista_Chida = [Camion1,Auto1,Camion2,Auto2]
for z in Lista_Chida:
if isinstance(z, Camion):
m = z.Precio(z.Cupo_Max)
print(f"{z} EL TOTAL EN ESTA OCACION ES DE: {m} ")
elif isinstance(z, Automovil):
n = z.Costo(z.Cupo_Max)
print(f"{z} EL TOTAL EN ESTA OCACION ES DE: {n} ") | mit | -3,127,128,528,588,065,300 | 34.372093 | 157 | 0.589474 | false |
Nablaquabla/sns-analysis | run-am-analysis-v4.py | 1 | 3313 | import os
import time as tm
import sys
# Handles the creation of condor files for a given set of directories
# -----------------------------------------------------------------------------
def createCondorFile(dataDir,outDir,run,day,times):
# Condor submission file name convention: run-day-time.condor
with open('/home/bjs66/CondorFiles/%s-%s.condor'%(run,day),'w') as f:
# Fixed program location'
f.write('Executable = /home/bjs66/GitHub/sns-analysis/sns-analysis-v4\n')
# Arguments passed to the exe:
# Set main run directory, e.g. Run-15-10-02-27-32-23/151002
# Set current time to be analzyed (w/o .zip extension!), e.g. 184502
# Set output directory, eg Output/ Run-15-10-02-27-32-23/151002
f.write('Arguments = \"2 %s $(Process) %s 0\"\n'%(dataDir,outDir))
# Standard cluster universe
f.write('universe = vanilla\n')
f.write('getenv = true\n')
# Program needs at least 300 MB of free memory to hold unzipped data
f.write('request_memory = 300\n')
# Output, error and log name convention: run-day-time.log/out/err
f.write('log = ../../Logs/%s-%s-$(Process).log\n'%(run,day))
f.write('Output = ../../Outs/%s-%s-$(Process).out\n'%(run,day))
f.write('Error = ../../Errs/%s-%s-$(Process).err\n'%(run,day))
# Do not write any emails
f.write('notification = never\n')
f.write('+Department = Physics\n')
f.write('should_transfer_files = NO\n')
# Add single job to queue
f.write('Queue %i'%times)
# Main function handling all internals
# -----------------------------------------------------------------------------
def main(r):
# Choose main directory, i.e. ~/csi/beam_on_data/Run-15-06-25-xyz/
mainRunDir = '/var/phy/project/phil/grayson/COHERENT/CsI/'
# Choose output directory, i.e. ~/output/Run-15-06-25-xyz/
mainOutDir = '/var/phy/project/phil/grayson/COHERENT/CsI/bjs-analysis/'
# Choose run to analyze
run = 'Position-%s'%r
subdirs = {}
subdirs[run] = 'am_calibration_1350v'
days_in = {}
days_in[run] = ['150617']
# Iterate through all days in a given run folder, create a condor file and run it.
for day in days_in[run]:
# Prepare paths for further processing
dataRunDir = mainRunDir + '%s/%s/%s'%(subdirs[run],run,day)
outDir = mainOutDir + '%s/%s'%(run,day)
# Create output directory if it does not exist
if not os.path.exists(outDir):
os.makedirs(outDir)
# Get all times within the day folder chosen and prepare condor submit files
tList = [x.split('.')[0] for x in os.listdir(dataRunDir)]
createCondorFile(dataRunDir,outDir,run,day,len(tList))
# createCondorFile(dataRunDir,outDir,run,day,2)
cmd = 'condor_submit /home/bjs66/CondorFiles/%s-%s.condor'%(run,day)
os.system(cmd)
tm.sleep(1)
if __name__ == '__main__':
main(sys.argv[1])
| gpl-3.0 | -7,294,928,957,756,611,000 | 31.165049 | 102 | 0.532146 | false |
PeytonXu/learn-python | cases/henghan_oa_checkin_checkout/test_fun.py | 1 | 1511 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging.handlers
import configparser
import re
import time
handler = logging.handlers.TimedRotatingFileHandler(filename="test", when='s', interval=2, backupCount=5,
encoding='UTF-8')
handler.suffix = '%Y-%m-%d-%H-%M-%S.log'
handler.extMatch = re.compile(r'^\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}.log$')
formatter = logging.Formatter("%(asctime)s %(message)s")
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.INFO)
handler2 = logging.handlers.RotatingFileHandler(filename='test.log', maxBytes=1024, backupCount= 3)
handler2.setFormatter(formatter)
# root_logger.removeHandler(handler)
root_logger.addHandler(handler2)
def test():
for i in range(100):
root_logger.info("test" + str(i))
# time.sleep(1)
def test_config():
conf = configparser.ConfigParser()
conf.read('config.ini', encoding='utf-8')
name = conf.get('login', 'name')
passwd = conf.get('login', 'password')
if name == 'name' and passwd == 'password':
name = input("Please input your login name: ")
passwd = input("Please input your login password: ")
conf.set('login', 'name', name)
conf.set('login', 'password', passwd)
with open('config.ini', 'w', encoding='utf-8') as f:
conf.write(f)
print(name)
print(passwd)
if __name__ == '__main__':
test_config()
| mit | 6,081,286,916,630,466,000 | 30.479167 | 105 | 0.632694 | false |
smlbiobot/SML-Cogs | royalerant/royalerant.py | 1 | 6221 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
import re
from collections import defaultdict
import aiohttp
import discord
import peony
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from discord.ext import commands
from peony.exceptions import PeonyException
PATH = os.path.join("data", "royalerant")
JSON = os.path.join(PATH, "settings.json")
ROLES = ['Member', 'Guest', 'SUPERMOD', 'MOD', 'Patron', 'Wrapper', 'Showcase', 'Collaborator']
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
class RoyaleRant:
"""RoyaleRant Twitter client.
User type !royalerant message which gets broadcasted to @RoyaleRant
"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = nested_dict()
self.settings.update(dataIO.load_json(JSON))
if self.settings.get("twitter_api") is None:
self.settings["twitter_api"] = {
"consumer_key": '12345',
"consumer_secret": '12345',
"access_token": '12345',
"access_token_secret": '12345'
}
dataIO.save_json(JSON, self.settings)
def peony_client(self, **kwargs):
"""Return Twitter API instance."""
return peony.PeonyClient(**self.settings['twitter_api'], **kwargs)
@commands.group(pass_context=True)
async def royalerantset(self, ctx):
"""Settings."""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@checks.is_owner()
@royalerantset.command(name="twitterapi", pass_context=True)
async def royalerantset_twitterapi(self,
ctx, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""Twitter API settings"""
if not any([consumer_key, consumer_secret, access_token, access_token_secret]):
await self.bot.send_cmd_help(ctx)
em = discord.Embed(title="RoyaleRant Settings")
for k, v in self.settings['twitter_api'].items():
em.add_field(name=k, value=v)
await self.bot.send_message(ctx.message.author, embed=em)
return
self.settings.update({
"twitter_api": {
"consumer_key": consumer_key,
"consumer_secret": consumer_secret,
"access_token": access_token,
"access_token_secret": access_token_secret
}
})
dataIO.save_json(JSON, self.settings)
await self.bot.say("Settings updated")
await self.bot.delete_message(ctx.message)
@commands.has_any_role(*ROLES)
@commands.command(aliases=['rrant'], pass_context=True, no_pm=True)
async def royalerant(self, ctx, *, msg):
"""Post a Tweet from @RoyaleRant."""
clean_content = ctx.message.clean_content
msg = clean_content[clean_content.index(' '):]
async with aiohttp.ClientSession() as session:
client = self.peony_client(session=session)
author = ctx.message.author
author_initials = "".join(re.findall("[a-zA-Z0-9]+", author.display_name))[:2]
attachment_urls = [attachment['url'] for attachment in ctx.message.attachments]
try:
media_ids = []
if len(attachment_urls):
for url in attachment_urls:
media = await client.upload_media(url, chunk_size=2 ** 18, chunked=True)
media_ids.append(media.media_id)
tweet = "[{}] {}".format(author_initials, msg)
resp = await client.api.statuses.update.post(status=tweet, media_ids=media_ids)
except peony.exceptions.PeonyException as e:
await self.bot.say("Error tweeting: {}".format(e.response))
return
url = "https://twitter.com/{0[user][screen_name]}/status/{0[id_str]}".format(resp)
await self.bot.say("Tweeted: <{}>".format(url))
@commands.has_any_role(*ROLES)
@commands.command(aliases=['rrantrt'], pass_context=True, no_pm=True)
async def royalerant_retweet(self, ctx, arg):
"""Retweet by original tweet URL or status ID."""
client = self.peony_client()
status_id = arg
if arg.startswith('http'):
status_id = re.findall("[0-9]+$", arg)[0]
try:
resp = await client.api.statuses.retweet.post(id=status_id)
except PeonyException as e:
await self.bot.say("Error tweeting: {}".format(e.response))
return
url = "https://twitter.com/{0[user][screen_name]}/status/{0[id_str]}".format(resp)
await self.bot.say("Tweeted: <{}>".format(url))
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = RoyaleRant(bot)
bot.add_cog(n)
| mit | -1,134,916,283,987,317,100 | 35.810651 | 96 | 0.620961 | false |
xuru/pyvisdk | pyvisdk/do/dv_port_config_spec.py | 1 | 1075 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def DVPortConfigSpec(vim, *args, **kwargs):
'''Specification to reconfigure a DistributedVirtualPort.'''
obj = vim.client.factory.create('ns0:DVPortConfigSpec')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'operation' ]
optional = [ 'configVersion', 'description', 'key', 'name', 'scope', 'setting',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | -482,861,134,010,992,100 | 30.647059 | 124 | 0.594419 | false |
rupak0577/ginga | ginga/misc/plugins/SaveImage.py | 1 | 17618 | """Save output images local plugin for Ginga."""
from __future__ import absolute_import, division, print_function
from ginga.util.six import itervalues
from ginga.util.six.moves import map
# STDLIB
import os
import shutil
# THIRD-PARTY
from astropy.io import fits
# GINGA
from ginga.GingaPlugin import GlobalPlugin
from ginga.gw import Widgets
from ginga.misc import Bunch
from ginga.util.iohelper import shorten_name
try:
from ginga.gw.GwHelp import DirectorySelection
except ImportError: # This is needed for RTD to build
pass
__all__ = []
class SaveImage(GlobalPlugin):
"""Save images to output files.
"""
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(SaveImage, self).__init__(fv)
# Image listing
self.columns = [('Image', 'IMAGE'), ('Mod. Ext.', 'MODEXT')]
# User preferences. Some are just default values and can also be
# changed by GUI.
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_SaveImage')
self.settings.addDefaults(output_directory = '.',
output_suffix = 'ginga',
include_chname = True,
clobber = False,
modified_only = True,
max_mosaic_size = 1e8,
max_rows_for_col_resize = 5000)
self.settings.load(onError='silent')
self.outdir = os.path.abspath(
self.settings.get('output_directory', '.'))
self.suffix = self.settings.get('output_suffix', 'ginga')
self.fv.add_callback('add-image', lambda *args: self.redo())
self.fv.add_callback('remove-image', lambda *args: self.redo())
self.fv.add_callback('add-channel',
lambda *args: self.update_channels())
self.fv.add_callback('delete-channel',
lambda *args: self.update_channels())
self.chnames = []
self.chname = None
self.gui_up = False
def build_gui(self, container):
"""Build GUI such that image list area is maximized."""
vbox, sw, orientation = Widgets.get_oriented_box(container)
msgFont = self.fv.getFont('sansFont', 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(msgFont)
self.tw = tw
fr = Widgets.Expander('Instructions')
fr.set_widget(tw)
container.add_widget(fr, stretch=0)
captions = (('Channel:', 'label', 'Channel Name', 'combobox',
'Modified only', 'checkbutton'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.channel_name.set_tooltip('Channel for locating images to save')
b.channel_name.add_callback('activated', self.select_channel_cb)
mod_only = self.settings.get('modified_only', True)
b.modified_only.set_state(mod_only)
b.modified_only.add_callback('activated', lambda *args: self.redo())
b.modified_only.set_tooltip("Show only locally modified images")
container.add_widget(w, stretch=0)
captions = (('Path:', 'llabel', 'OutDir', 'entry', 'Browse', 'button'),
('Suffix:', 'llabel', 'Suffix', 'entry'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.outdir.set_text(self.outdir)
b.outdir.set_tooltip('Output directory')
b.outdir.add_callback('activated', lambda w: self.set_outdir())
b.browse.set_tooltip('Browse for output directory')
b.browse.add_callback('activated', lambda w: self.browse_outdir())
b.suffix.set_text(self.suffix)
b.suffix.set_tooltip('Suffix to append to filename')
b.suffix.add_callback('activated', lambda w: self.set_suffix())
container.add_widget(w, stretch=0)
self.treeview = Widgets.TreeView(auto_expand=True,
sortable=True,
selection='multiple',
use_alt_row_color=True)
self.treeview.setup_table(self.columns, 1, 'IMAGE')
self.treeview.add_callback('selected', self.toggle_save_cb)
container.add_widget(self.treeview, stretch=1)
captions = (('Status', 'llabel'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.status.set_text('')
b.status.set_tooltip('Status message')
container.add_widget(w, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button('Save')
btn.set_tooltip('Save selected image(s)')
btn.add_callback('activated', lambda w: self.save_images())
btn.set_enabled(False)
btns.add_widget(btn, stretch=0)
self.w.save = btn
btn = Widgets.Button('Close')
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
container.add_widget(btns, stretch=0)
self.gui_up = True
# Initialize directory selection dialog
self.dirsel = DirectorySelection(self.fv.w.root.get_widget())
# Generate initial listing
self.update_channels()
def instructions(self):
self.tw.set_text("""Enter output directory and suffix, if different than default. Left click to select image name to save. Multiple images can be selected using click with Shift or CTRL key. Click Save to save the selected image(s).
Output image will have the filename of <inputname>_<suffix>.fits.""")
def redo(self, *args):
"""Generate listing of images that user can save."""
if not self.gui_up:
return
mod_only = self.w.modified_only.get_state()
treedict = Bunch.caselessDict()
self.treeview.clear()
self.w.status.set_text('')
channel = self.fv.get_channelInfo(self.chname)
if channel is None:
return
# Only list modified images for saving. Scanning Datasrc is enough.
if mod_only:
all_keys = channel.datasrc.keys(sort='alpha')
# List all images in the channel.
else:
all_keys = channel.get_image_names()
# Extract info for listing and saving
for key in all_keys:
iminfo = channel.get_image_info(key)
path = iminfo.get('path')
idx = iminfo.get('idx')
t = iminfo.get('time_modified')
if path is None: # Special handling for generated buffer, eg mosaic
infile = key
is_fits = True
else:
infile = os.path.basename(path)
infile_ext = os.path.splitext(path)[1]
infile_ext = infile_ext.lower()
is_fits = False
if 'fit' in infile_ext:
is_fits = True
# Only list FITS files unless it is Ginga generated buffer
if not is_fits:
continue
# Only list modified buffers
if mod_only and t is None:
continue
# More than one ext modified, append to existing entry
if infile in treedict:
if t is not None:
treedict[infile].extlist.add(idx)
elist = sorted(treedict[infile].extlist)
treedict[infile].MODEXT = ';'.join(
map(self._format_extname, elist))
# Add new entry
else:
if t is None:
s = ''
extlist = set()
else:
s = self._format_extname(idx)
extlist = set([idx])
treedict[infile] = Bunch.Bunch(
IMAGE=infile, MODEXT=s, extlist=extlist, path=path)
self.treeview.set_tree(treedict)
# Resize column widths
n_rows = len(treedict)
if n_rows == 0:
self.w.status.set_text('Nothing available for saving')
elif n_rows < self.settings.get('max_rows_for_col_resize', 5000):
self.treeview.set_optimal_column_widths()
self.logger.debug('Resized columns for {0} row(s)'.format(n_rows))
def update_channels(self):
"""Update the GUI to reflect channels and image listing.
"""
if not self.gui_up:
return
self.logger.debug("channel configuration has changed--updating gui")
try:
channel = self.fv.get_channelInfo(self.chname)
except KeyError:
channel = self.fv.get_channelInfo()
if channel is None:
raise ValueError('No channel available')
self.chname = channel.name
w = self.w.channel_name
w.clear()
self.chnames = list(self.fv.get_channelNames())
#self.chnames.sort()
for chname in self.chnames:
w.append_text(chname)
# select the channel that is the current one
try:
i = self.chnames.index(channel.name)
except IndexError:
i = 0
self.w.channel_name.set_index(i)
# update the image listing
self.redo()
def select_channel_cb(self, w, idx):
self.chname = self.chnames[idx]
self.logger.debug("channel name changed to '%s'" % (self.chname))
self.redo()
def _format_extname(self, ext):
"""Pretty print given extension name and number tuple."""
if ext is None:
outs = ext
else:
outs = '{0},{1}'.format(ext[0], ext[1])
return outs
def browse_outdir(self):
"""Browse for output directory."""
self.dirsel.popup(
'Select directory', self.w.outdir.set_text, initialdir=self.outdir)
self.set_outdir()
def set_outdir(self):
"""Set output directory."""
dirname = self.w.outdir.get_text()
if os.path.isdir(dirname):
self.outdir = dirname
self.logger.debug('Output directory set to {0}'.format(self.outdir))
else:
self.w.outdir.set_text(self.outdir)
self.logger.error('{0} is not a directory'.format(dirname))
def set_suffix(self):
"""Set output suffix."""
self.suffix = self.w.suffix.get_text()
self.logger.debug('Output suffix set to {0}'.format(self.suffix))
def _write_history(self, pfx, hdu, linechar=60, indentchar=2):
"""Write change history to given HDU header.
Limit each HISTORY line to given number of characters.
Subsequent lines of the same history will be indented.
"""
channel = self.fv.get_channelInfo(self.chname)
if channel is None:
return
history_plgname = 'ChangeHistory'
try:
history_obj = self.fv.gpmon.getPlugin(history_plgname)
except:
self.logger.error(
'{0} plugin is not loaded. No HISTORY will be written to '
'{1}.'.format(history_plgname, pfx))
return
if channel.name not in history_obj.name_dict:
self.logger.error(
'{0} channel not found in {1}. No HISTORY will be written to '
'{2}.'.format(channel.name, history_plgname, pfx))
return
file_dict = history_obj.name_dict[channel.name]
chistory = []
ind = ' ' * indentchar
# NOTE: List comprehension too slow!
for key in file_dict:
if not key.startswith(pfx):
continue
for bnch in itervalues(file_dict[key]):
chistory.append('{0} {1}'.format(bnch.MODIFIED, bnch.DESCRIP))
# Add each HISTORY prettily into header, sorted by timestamp
for s in sorted(chistory):
for i in range(0, len(s), linechar):
subs = s[i:i+linechar]
if i > 0:
subs = ind + subs.lstrip()
hdu.header.add_history(subs)
def _write_header(self, image, hdu):
"""Write header from image object to given HDU."""
hduhdr = hdu.header
# Ginga image header object for the given extension only.
# Cannot use get_header() because that might also return PRI hdr.
ghdr = image.metadata['header']
for key in ghdr:
# Need this to avoid duplication because COMMENT is a weird field
if key.upper() == 'COMMENT':
continue
bnch = ghdr.get_card(key)
# Insert new keyword
if key not in hduhdr:
hduhdr[key] = (bnch.value, bnch.comment)
# Update existing keyword
elif hduhdr[key] != bnch.value:
hduhdr[key] = bnch.value
def _write_mosaic(self, key, outfile):
"""Write out mosaic data (or any new data generated within Ginga)
to single-extension FITS.
"""
maxsize = self.settings.get('max_mosaic_size', 1e8) # Default 10k x 10k
channel = self.fv.get_channelInfo(self.chname)
image = channel.datasrc[key]
# Prevent writing very large mosaic
if (image.width * image.height) > maxsize:
s = 'Mosaic too large to be written {0}'.format(image.shape)
self.w.status.set_text(s)
self.logger.error(s)
return
# Insert mosaic data and header into output HDU
hdu = fits.PrimaryHDU(image.get_data())
self._write_header(image, hdu)
# Write history to PRIMARY
self._write_history(key, hdu)
# Write to file
hdu.writeto(outfile, clobber=True)
def _write_mef(self, key, extlist, outfile):
"""Write out regular multi-extension FITS data."""
channel = self.fv.get_channelInfo(self.chname)
with fits.open(outfile, mode='update') as pf:
# Process each modified data extension
for idx in extlist:
k = '{0}[{1}]'.format(key, self._format_extname(idx))
image = channel.datasrc[k]
# Insert data and header into output HDU
pf[idx].data = image.get_data()
self._write_header(image, pf[idx])
# Write history to PRIMARY
self._write_history(key, pf['PRIMARY'])
def toggle_save_cb(self, w, res_dict):
"""Only enable saving if something is selected."""
if len(res_dict) > 0:
self.w.save.set_enabled(True)
else:
self.w.save.set_enabled(False)
def save_images(self):
"""Save selected images.
This uses Astropy FITS package to save the outputs no matter
what user chose to load the images.
"""
res_dict = self.treeview.get_selected()
clobber = self.settings.get('clobber', False)
self.treeview.clear_selection() # Automatically disables Save button
# If user gives empty string, no suffix.
if self.suffix:
sfx = '_' + self.suffix
else:
sfx = ''
# Also include channel name in suffix. This is useful if user likes to
# open the same image in multiple channels.
if self.settings.get('include_chname', True):
sfx += '_' + self.chname
# Process each selected file. Each can have multiple edited extensions.
for infile in res_dict:
f_pfx = os.path.splitext(infile)[0] # prefix
f_ext = '.fits' # Only FITS supported
oname = f_pfx + sfx + f_ext
outfile = os.path.join(self.outdir, oname)
self.w.status.set_text(
'Writing out {0} to {1} ...'.format(shorten_name(infile, 10),
shorten_name(oname, 10)))
self.logger.debug(
'Writing out {0} to {1} ...'.format(infile, oname))
if os.path.exists(outfile) and not clobber:
self.logger.error('{0} already exists'.format(outfile))
continue
bnch = res_dict[infile]
if bnch.path is None or not os.path.isfile(bnch.path):
self._write_mosaic(f_pfx, outfile)
else:
shutil.copyfile(bnch.path, outfile)
self._write_mef(f_pfx, bnch.extlist, outfile)
self.logger.info('{0} written'.format(outfile))
self.w.status.set_text('Saving done, see log')
def close(self):
self.fv.stop_global_plugin(str(self))
def start(self):
self.instructions()
self.resume()
def resume(self):
# turn off any mode user may be in
try:
self.modes_off()
except AttributeError:
pass
self.fv.showStatus('See instructions')
def stop(self):
self.gui_up = False
self.fv.showStatus('')
def __str__(self):
"""
This method should be provided and should return the lower case
name of the plugin.
"""
return 'saveimage'
# Replace module docstring with config doc for auto insert by Sphinx.
# In the future, if we need the real docstring, we can append instead of
# overwrite.
from ginga.util.toolbox import generate_cfg_example
__doc__ = generate_cfg_example('plugin_SaveImage', package='ginga')
| bsd-3-clause | -5,133,613,292,894,115,000 | 33.887129 | 240 | 0.566353 | false |
MatthieuDartiailh/eapii | eapii/core/iprops/proxies.py | 1 | 4998 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Copyright 2014 by Eapii Authors, see AUTHORS for more details.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENCE, distributed with this software.
#------------------------------------------------------------------------------
"""Proxies used to provide per instance variation of the IProperty behaviour.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from types import MethodType, FunctionType
from weakref import WeakKeyDictionary
from .i_property import get_chain, set_chain
class _ProxyManager(object):
"""Manager caching the custom class used for proxying the different types
of IProperty.
This class is not meant to be instantiated by user code.
"""
def __init__(self):
super(_ProxyManager, self).__init__()
self._proxy_cache = {}
def make_proxy(self, iprop, instance, kwargs):
"""Build a proxy for the given iprop.
For each type of IProperty a new mixin Proxy type is created by mixing
the IPropProxy class and the iprop class. This class is then cached and
used to build to create the proxy instance.
Parameters
----------
iprop : IProperty
Instance whose behaviour should be altered by the use of a proxy.
instance : HasIProps
Object for which the IProperty should have a peculiar behaviour.
attrs : dict
Dict containing the attributes whose values should be overriden.
"""
iprop_class = type(iprop)
if iprop_class not in self._proxy_cache:
# Python 2 compatibility cast
proxy = type(str(iprop_class.__name__+'Proxy'),
(IPropertyProxy, iprop_class), {})
self._proxy_cache[iprop_class] = proxy
return self._proxy_cache[iprop_class](iprop, instance, kwargs)
make_proxy = _ProxyManager().make_proxy
"""Build a proxy for the given iprop.
This used the singleton _ProxyManager instance to handle the caching of the
proxy classes.
"""
class IPropertyProxy(object):
"""Generic proxy for IProperty, used to get per HasIProps instance
behaviour.
Parameters
----------
iprop : IProperty
Instance whose behaviour should be altered by the use of a proxy.
instance : HasIProps
Object for which the IProperty should have a peculiar behaviour.
attrs : dict
Dict containing the attributes whose values should be overriden.
"""
def __init__(self, iprop, instance, attrs):
self._iprop = iprop
# This is created now to avoid creating lots of those for nothing.
if not iprop._proxies:
iprop._proxies = WeakKeyDictionary()
# First get all the instance attr of the IProperty to preserve the
# special behaviours imparted by the HasIProps object.
aux = iprop.__dict__.copy()
aux.update(attrs)
self.patch(aux)
iprop._proxies[instance] = self
def patch(self, attrs):
"""Update the proxy with new values.
Parameters
----------
attrs : dict
New values to give to the proxy attributes.
"""
for k, v in attrs.items():
# Make sure the instance method are correctly redirected to the
# proxy and the functions are bound to the proxy.
if isinstance(v, MethodType):
v = MethodType(v.__func__, self)
elif isinstance(v, FunctionType):
v = MethodType(v, self)
setattr(self, k, v)
def unpatch(self, attrs):
"""Reverse the proxy behaviour to the original IProperty behaviour.
Parameters
----------
attrs : iterable
Names of the attrs whose values should match again the one of the
IProperty.
"""
i_dir = self._iprop.__dict__
for attr in attrs:
if attr in i_dir:
v = i_dir[attr]
if isinstance(v, MethodType):
v = MethodType(v.__func__, self)
setattr(self, attr, getattr(self._iprop, attr))
else:
delattr(self, attr)
@property
def obsolete(self):
"""Boolean indicating whether the proxy differ from the original.
"""
ip_dict = self._iprop.__dict__
test_meth = MethodType(lambda: None, object())
for k, v in self.__dict__.items():
if isinstance(v, MethodType):
if v.__func__ != ip_dict.get(k, test_meth).__func__:
return False
elif k not in ('_iprop', 'instance'):
if k not in ip_dict or v != ip_dict[k]:
return False
return True
proxy_get = get_chain
proxy_set = set_chain
| bsd-3-clause | -3,187,524,225,825,217,500 | 31.666667 | 79 | 0.576631 | false |
victronenergy/dbus-fronius | test/src/fronius_sim/app.py | 1 | 5868 | #!/usr/bin/python -u
import datetime
import modbus_tcp_sim
import os
import sys
from twisted.internet import reactor
from fronius_sim import FroniusSim
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
bottle_dir = os.path.normpath(os.path.join(app_dir, '..', '..', 'software', 'ext', 'bottle'))
sys.path.extend([bottle_dir, app_dir])
import bottle
application = bottle.default_app()
inverters = [
FroniusSim(id='1', device_type=232, unique_id='1234', custom_name='SouthWest', has_3phases=True, modbus_enabled=False),
FroniusSim(id='2', device_type=224, unique_id='4321', custom_name='', has_3phases=False, modbus_enabled=False),
FroniusSim(id='3', device_type=208, unique_id='1111', custom_name='Tmp', has_3phases=False, modbus_enabled=True)
]
sma_inverter = FroniusSim(id='126', device_type=None, unique_id='10988912', custom_name='SMA', has_3phases=False, modbus_enabled=True)
@bottle.route('/solar_api/GetAPIVersion.cgi')
def get_api_version():
return dict(APIVersion=1, BaseUrl='solar_api/v1/')
@bottle.route('/solar_api/v1/GetInverterInfo.cgi')
def get_inverter_info():
return {
'Head': create_head({}),
'Body': {
'Data': dict((x.id, {
'DT': x.device_type,
'PVPower': 5000,
'Show': 1,
'UniqueID': x.unique_id,
'ErrorCode': 0,
'StatusCode': 7,
'CustomName': x.custom_name })
for x in inverters)}}
@bottle.route('/solar_api/v1/GetInverterRealtimeData.cgi')
def get_inverter_realtime_data():
scope = bottle.request.query.Scope
device_id = bottle.request.query.DeviceId
data_collection = bottle.request.query.DataCollection
if scope == 'Device':
try:
inverter = next((i for i in inverters if i.id == device_id))
except StopIteration:
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection},
error_code=1,
error_message='device not found')}
if data_collection == 'CumulationInverterData':
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection}),
'Body': {
'Data': {
'PAC': {'Value': 3373, 'Unit': 'W'},
'DAY_ENERGY': {'Value': 8000, 'Unit': 'Wh'},
'YEAR_ENERGY': {'Value': 44000, 'Unit': 'Wh'},
'TOTAL_ENERGY': {'Value': 45000, 'Unit': 'Wh'},
'DeviceStatus': {
'StatusCode': 7,
'MgmtTimerRemainingTime': -1,
'ErrorCode': 0,
'LEDCode': 0,
'LEDColor': 2,
'LEDState': 0,
'StateToReset': False}}}}
if data_collection == 'CommonInverterData':
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection}),
'Body': {
'Data': {
'PAC': {'Value': inverter.main.power, 'Unit': 'W'},
'SAC': {'Value': 3413, 'Unit': 'VA'},
'IAC': {'Value': inverter.main.current, 'Unit': 'Hz'},
'UAC': {'Value': inverter.main.voltage, 'Unit': 'V'},
'FAC': {'Value': 50, 'Unit': 'Hz'},
'IDC': {'Value': 8.2, 'Unit': 'A'},
'UDC': {'Value': 426, 'Unit': 'V'},
'DAY_ENERGY': {'Value': 8000, 'Unit': 'Wh'},
'YEAR_ENERGY': {'Value': 44000, 'Unit': 'Wh'},
'TOTAL_ENERGY': {'Value': inverter.main.energy, 'Unit': 'Wh'},
'DeviceStatus': {
'StatusCode': 7,
'MgmtTimerRemainingTime': -1,
'ErrorCode': 0,
'LEDCode': 0,
'LEDColor': 2,
'LEDState': 0,
'StateToReset': False}}}}
if data_collection == '3PInverterData':
if not inverter.has_3phases:
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection},
error_code=2,
error_message='not supported')}
return {
'Head': create_head({
'Scope': scope,
'DeviceId': device_id,
'DataCollection': data_collection}),
'Body': {
'Data': {
'IAC_L1': {'Value': inverter.l1.current, 'Unit': 'A'},
'IAC_L2': {'Value': inverter.l2.current, 'Unit': 'A'},
'IAC_L3': {'Value': inverter.l3.current, 'Unit': 'A'},
'UAC_L1': {'Value': inverter.l1.voltage, 'Unit': 'V'},
'UAC_L2': {'Value': inverter.l2.voltage, 'Unit': 'V'},
'UAC_L3': {'Value': inverter.l3.voltage, 'Unit': 'V'},
'T_AMBIENT': {'Value': 27, 'Unit': 'V'},
'ROTATION_SPEED_FAN_FL': {'Value': 83, 'Unit': 'RPM'},
'ROTATION_SPEED_FAN_FR': {'Value': 83, 'Unit': 'RPM'},
'ROTATION_SPEED_FAN_BL': {'Value': 83, 'Unit': 'RPM'},
'ROTATION_SPEED_FAN_BR': {'Value': 83, 'Unit': 'RPM'}}}}
elif scope == 'System':
return {
'Head': create_head({'Scope': scope}),
'Body': {
'Data': {
'PAC': {'Value': 3373, 'Unit': 'W'},
'DAY_ENERGY': {'Value': 8000, 'Unit': 'Wh'},
'YEAR_ENERGY': {'Value': 44000, 'Unit': 'Wh'},
'TOTAL_ENERGY': {'Value': 45000, 'Unit': 'Wh'}}}}
else:
raise Exception('Unknown scope')
def create_head(args, error_code=0, error_message=''):
return {
'RequestArguments': args,
'Status': {
"Code": error_code,
"Reason": error_message,
"UserMessage": ""},
'Timestamp': datetime.datetime.now().isoformat()}
class TwistedServer(bottle.ServerAdapter):
def start(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool(minthreads=0, maxthreads=1)
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
# reactor.run()
if __name__ == '__main__':
# host='0.0.0.0': accept connections from all sources
server = TwistedServer(host='0.0.0.0', port=8080, debug=True)
server.start(application)
modbus_tcp_sim.start_server(inverters + [sma_inverter])
reactor.run()
| mit | -4,941,800,377,040,512,000 | 31.966292 | 134 | 0.605658 | false |
cloud9ers/j25framework | j25/loaders/ControllerLoader.py | 1 | 2345 | from j25.web import Controller
import inspect
import logging
import pkgutil
import traceback
logger = logging.getLogger("ControllerLoader")
class AutoControllerLoader(object):
@classmethod
def load(cls, app_name, router, dispatcher, package_or_packages):
if not isinstance(package_or_packages, list):
package_or_packages = [package_or_packages]
total = 0
logger.debug("Scanning package(s) %s for controllers.", str(package_or_packages))
controllers = {}
for base_package in package_or_packages:
for _, modname, ispkg in pkgutil.iter_modules(base_package.__path__):
if ispkg == False:
module = __import__(base_package.__name__ + "." + modname, fromlist="t")
for class_name in dir(module):
klass = getattr(module, class_name)
if inspect.isclass(klass):
if klass is Controller:
continue
if not issubclass(klass, Controller):
logger.debug("Class %s was found in '%s' package but is not a subclass of j25.web.Controller -- ignoring...", klass.__name__, base_package.__path__)
continue
# load it
try:
# dispatcher.registerServiceFactory(klass.PATH, klass.BASE_SERVICE.createFactory(klass.NAME, config, klass))
controllers[klass.__name__] = klass
logger.debug("Controller %s is loaded.", klass.__name__)
total += 1
except:
logger.error("Failed to load controller %s:%s", klass.__name__, traceback.format_exc())
if controllers:
# app_package = importlib.import_module(app_name)
app_package = __import__(app_name, fromlist="t")
if not dispatcher.register_app(app_package, controllers, router):
logger.error("Couldn't register application %s", app_name)
return 0
if total > 0:
logger.info("%s controller(s) are/is loaded successfully from app (%s)", total, app_name)
return total | lgpl-3.0 | -2,055,934,310,574,117,400 | 51.133333 | 180 | 0.53049 | false |
markelg/xray | xray/test/test_conventions.py | 1 | 25820 | import contextlib
import numpy as np
import pandas as pd
import warnings
from xray import conventions, Variable, Dataset, open_dataset
from xray.core import utils, indexing
from . import TestCase, requires_netCDF4, unittest
from .test_backends import CFEncodedDataTest
from xray.core.pycompat import iteritems
from xray.backends.memory import InMemoryDataStore
from xray.conventions import cf_encoder, cf_decoder, decode_cf
class TestMaskedAndScaledArray(TestCase):
def test(self):
x = conventions.MaskedAndScaledArray(np.arange(3), fill_value=0)
self.assertEqual(x.dtype, np.dtype('float'))
self.assertEqual(x.shape, (3,))
self.assertEqual(x.size, 3)
self.assertEqual(x.ndim, 1)
self.assertEqual(len(x), 3)
self.assertArrayEqual([np.nan, 1, 2], x)
x = conventions.MaskedAndScaledArray(np.arange(3), add_offset=1)
self.assertArrayEqual(np.arange(3) + 1, x)
x = conventions.MaskedAndScaledArray(np.arange(3), scale_factor=2)
self.assertArrayEqual(2 * np.arange(3), x)
x = conventions.MaskedAndScaledArray(np.array([-99, -1, 0, 1, 2]),
-99, 0.01, 1)
expected = np.array([np.nan, 0.99, 1, 1.01, 1.02])
self.assertArrayEqual(expected, x)
def test_0d(self):
x = conventions.MaskedAndScaledArray(np.array(0), fill_value=0)
self.assertTrue(np.isnan(x))
self.assertTrue(np.isnan(x[...]))
x = conventions.MaskedAndScaledArray(np.array(0), fill_value=10)
self.assertEqual(0, x[...])
def test_multiple_fill_value(self):
x = conventions.MaskedAndScaledArray(
np.arange(4), fill_value=np.array([0, 1]))
self.assertArrayEqual([np.nan, np.nan, 2, 3], x)
x = conventions.MaskedAndScaledArray(
np.array(0), fill_value=np.array([0, 1]))
self.assertTrue(np.isnan(x))
self.assertTrue(np.isnan(x[...]))
class TestCharToStringArray(TestCase):
def test_wrapper_class(self):
array = np.array(list('abc'), dtype='S')
actual = conventions.CharToStringArray(array)
expected = np.array('abc', dtype='S')
self.assertEqual(actual.dtype, expected.dtype)
self.assertEqual(actual.shape, expected.shape)
self.assertEqual(actual.size, expected.size)
self.assertEqual(actual.ndim, expected.ndim)
with self.assertRaises(TypeError):
len(actual)
self.assertArrayEqual(expected, actual)
with self.assertRaises(IndexError):
actual[:2]
self.assertEqual(str(actual), 'abc')
array = np.array([list('abc'), list('cdf')], dtype='S')
actual = conventions.CharToStringArray(array)
expected = np.array(['abc', 'cdf'], dtype='S')
self.assertEqual(actual.dtype, expected.dtype)
self.assertEqual(actual.shape, expected.shape)
self.assertEqual(actual.size, expected.size)
self.assertEqual(actual.ndim, expected.ndim)
self.assertEqual(len(actual), len(expected))
self.assertArrayEqual(expected, actual)
self.assertArrayEqual(expected[:1], actual[:1])
with self.assertRaises(IndexError):
actual[:, :2]
def test_char_to_string(self):
array = np.array([['a', 'b', 'c'], ['d', 'e', 'f']])
expected = np.array(['abc', 'def'])
actual = conventions.char_to_string(array)
self.assertArrayEqual(actual, expected)
expected = np.array(['ad', 'be', 'cf'])
actual = conventions.char_to_string(array.T) # non-contiguous
self.assertArrayEqual(actual, expected)
def test_string_to_char(self):
array = np.array([['ab', 'cd'], ['ef', 'gh']])
expected = np.array([[['a', 'b'], ['c', 'd']],
[['e', 'f'], ['g', 'h']]])
actual = conventions.string_to_char(array)
self.assertArrayEqual(actual, expected)
expected = np.array([[['a', 'b'], ['e', 'f']],
[['c', 'd'], ['g', 'h']]])
actual = conventions.string_to_char(array.T)
self.assertArrayEqual(actual, expected)
@np.vectorize
def _ensure_naive_tz(dt):
if hasattr(dt, 'tzinfo'):
return dt.replace(tzinfo=None)
else:
return dt
class TestDatetime(TestCase):
@requires_netCDF4
def test_cf_datetime(self):
import netCDF4 as nc4
for num_dates, units in [
(np.arange(10), 'days since 2000-01-01'),
(np.arange(10).reshape(2, 5), 'days since 2000-01-01'),
(12300 + np.arange(5), 'hours since 1680-01-01 00:00:00'),
# here we add a couple minor formatting errors to test
# the robustness of the parsing algorithm.
(12300 + np.arange(5), 'hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), u'Hour since 1680-01-01 00:00:00'),
(12300 + np.arange(5), ' Hour since 1680-01-01 00:00:00 '),
(10, 'days since 2000-01-01'),
([10], 'daYs since 2000-01-01'),
([[10]], 'days since 2000-01-01'),
([10, 10], 'days since 2000-01-01'),
(np.array(10), 'days since 2000-01-01'),
(0, 'days since 1000-01-01'),
([0], 'days since 1000-01-01'),
([[0]], 'days since 1000-01-01'),
(np.arange(2), 'days since 1000-01-01'),
(np.arange(0, 100000, 20000), 'days since 1900-01-01'),
(17093352.0, 'hours since 1-1-1 00:00:0.0'),
([0.5, 1.5], 'hours since 1900-01-01T00:00:00'),
(0, 'milliseconds since 2000-01-01T00:00:00'),
(0, 'microseconds since 2000-01-01T00:00:00'),
]:
for calendar in ['standard', 'gregorian', 'proleptic_gregorian']:
expected = _ensure_naive_tz(nc4.num2date(num_dates, units, calendar))
print(num_dates, units, calendar)
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = conventions.decode_cf_datetime(num_dates, units,
calendar)
if (isinstance(actual, np.ndarray)
and np.issubdtype(actual.dtype, np.datetime64)):
# self.assertEqual(actual.dtype.kind, 'M')
# For some reason, numpy 1.8 does not compare ns precision
# datetime64 arrays as equal to arrays of datetime objects,
# but it works for us precision. Thus, convert to us
# precision for the actual array equal comparison...
actual_cmp = actual.astype('M8[us]')
else:
actual_cmp = actual
self.assertArrayEqual(expected, actual_cmp)
encoded, _, _ = conventions.encode_cf_datetime(actual, units,
calendar)
if '1-1-1' not in units:
# pandas parses this date very strangely, so the original
# units/encoding cannot be preserved in this case:
# (Pdb) pd.to_datetime('1-1-1 00:00:0.0')
# Timestamp('2001-01-01 00:00:00')
self.assertArrayEqual(num_dates, np.around(encoded, 1))
if (hasattr(num_dates, 'ndim') and num_dates.ndim == 1
and '1000' not in units):
# verify that wrapping with a pandas.Index works
# note that it *does not* currently work to even put
# non-datetime64 compatible dates into a pandas.Index :(
encoded, _, _ = conventions.encode_cf_datetime(
pd.Index(actual), units, calendar)
self.assertArrayEqual(num_dates, np.around(encoded, 1))
def test_decoded_cf_datetime_array(self):
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')
expected = pd.date_range('1900-01-01', periods=3).values
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual, expected)
# default calendar
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01')
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual, expected)
def test_slice_decoded_cf_datetime_array(self):
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')
expected = pd.date_range('1900-01-01', periods=3).values
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual[slice(0, 2)], expected[slice(0, 2)])
actual = conventions.DecodedCFDatetimeArray(
np.array([0, 1, 2]), 'days since 1900-01-01', 'standard')
expected = pd.date_range('1900-01-01', periods=3).values
self.assertEqual(actual.dtype, np.dtype('datetime64[ns]'))
self.assertArrayEqual(actual[[0, 2]], expected[[0, 2]])
def test_decode_cf_datetime_non_standard_units(self):
expected = pd.date_range(periods=100, start='1970-01-01', freq='h')
# netCDFs from madis.noaa.gov use this format for their time units
# they cannot be parsed by netcdftime, but pd.Timestamp works
units = 'hours since 1-1-1970'
actual = conventions.decode_cf_datetime(np.arange(100), units)
self.assertArrayEqual(actual, expected)
def test_decode_cf_with_conflicting_fill_missing_value(self):
var = Variable(['t'], np.arange(10),
{'units': 'foobar',
'missing_value': 0,
'_FillValue': 1})
self.assertRaisesRegexp(ValueError, "_FillValue and missing_value",
lambda: conventions.decode_cf_variable(var))
@requires_netCDF4
def test_decode_cf_datetime_non_iso_strings(self):
# datetime strings that are _almost_ ISO compliant but not quite,
# but which netCDF4.num2date can still parse correctly
expected = pd.date_range(periods=100, start='2000-01-01', freq='h')
cases = [(np.arange(100), 'hours since 2000-01-01 0'),
(np.arange(100), 'hours since 2000-1-1 0'),
(np.arange(100), 'hours since 2000-01-01 0:00')]
for num_dates, units in cases:
actual = conventions.decode_cf_datetime(num_dates, units)
self.assertArrayEqual(actual, expected)
@requires_netCDF4
def test_decode_non_standard_calendar(self):
import netCDF4 as nc4
for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',
'366_day']:
units = 'days since 0001-01-01'
times = pd.date_range('2001-04-01-00', end='2001-04-30-23',
freq='H')
noleap_time = nc4.date2num(times.to_pydatetime(), units,
calendar=calendar)
expected = times.values
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = conventions.decode_cf_datetime(noleap_time, units,
calendar=calendar)
self.assertEqual(actual.dtype, np.dtype('M8[ns]'))
abs_diff = abs(actual - expected)
# once we no longer support versions of netCDF4 older than 1.1.5,
# we could do this check with near microsecond accuracy:
# https://github.com/Unidata/netcdf4-python/issues/355
self.assertTrue((abs_diff <= np.timedelta64(1, 's')).all())
@requires_netCDF4
def test_decode_non_standard_calendar_single_element(self):
units = 'days since 0001-01-01'
for calendar in ['noleap', '365_day', '360_day', 'julian', 'all_leap',
'366_day']:
for num_time in [735368, [735368], [[735368]]]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Unable to decode time axis')
actual = conventions.decode_cf_datetime(num_time, units,
calendar=calendar)
self.assertEqual(actual.dtype, np.dtype('M8[ns]'))
@requires_netCDF4
def test_decode_non_standard_calendar_single_element_fallback(self):
import netCDF4 as nc4
units = 'days since 0001-01-01'
dt = nc4.netcdftime.datetime(2001, 2, 29)
for calendar in ['360_day', 'all_leap', '366_day']:
num_time = nc4.date2num(dt, units, calendar)
with self.assertWarns('Unable to decode time axis'):
actual = conventions.decode_cf_datetime(num_time, units,
calendar=calendar)
expected = np.asarray(nc4.num2date(num_time, units, calendar))
print(num_time, calendar, actual, expected)
self.assertEqual(actual.dtype, np.dtype('O'))
self.assertEqual(expected, actual)
@requires_netCDF4
def test_decode_non_standard_calendar_multidim_time(self):
import netCDF4 as nc4
calendar = 'noleap'
units = 'days since 0001-01-01'
times1 = pd.date_range('2001-04-01', end='2001-04-05', freq='D')
times2 = pd.date_range('2001-05-01', end='2001-05-05', freq='D')
noleap_time1 = nc4.date2num(times1.to_pydatetime(), units,
calendar=calendar)
noleap_time2 = nc4.date2num(times2.to_pydatetime(), units,
calendar=calendar)
mdim_time = np.empty((len(noleap_time1), 2), )
mdim_time[:, 0] = noleap_time1
mdim_time[:, 1] = noleap_time2
expected1 = times1.values
expected2 = times2.values
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Unable to decode time axis')
actual = conventions.decode_cf_datetime(mdim_time, units,
calendar=calendar)
self.assertEqual(actual.dtype, np.dtype('M8[ns]'))
self.assertArrayEqual(actual[:, 0], expected1)
self.assertArrayEqual(actual[:, 1], expected2)
@requires_netCDF4
def test_decode_non_standard_calendar_fallback(self):
import netCDF4 as nc4
# ensure leap year doesn't matter
for year in [2010, 2011, 2012, 2013, 2014]:
for calendar in ['360_day', '366_day', 'all_leap']:
calendar = '360_day'
units = 'days since {0}-01-01'.format(year)
num_times = np.arange(100)
expected = nc4.num2date(num_times, units, calendar)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
actual = conventions.decode_cf_datetime(num_times, units,
calendar=calendar)
self.assertEqual(len(w), 1)
self.assertIn('Unable to decode time axis',
str(w[0].message))
self.assertEqual(actual.dtype, np.dtype('O'))
self.assertArrayEqual(actual, expected)
def test_cf_datetime_nan(self):
for num_dates, units, expected_list in [
([np.nan], 'days since 2000-01-01', ['NaT']),
([np.nan, 0], 'days since 2000-01-01',
['NaT', '2000-01-01T00:00:00Z']),
([np.nan, 0, 1], 'days since 2000-01-01',
['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z']),
]:
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'All-NaN')
actual = conventions.decode_cf_datetime(num_dates, units)
expected = np.array(expected_list, dtype='datetime64[ns]')
self.assertArrayEqual(expected, actual)
def test_infer_datetime_units(self):
for dates, expected in [(pd.date_range('1900-01-01', periods=5),
'days since 1900-01-01 00:00:00'),
(pd.date_range('1900-01-01 12:00:00', freq='H',
periods=2),
'hours since 1900-01-01 12:00:00'),
(['1900-01-01', '1900-01-02',
'1900-01-02 00:00:01'],
'seconds since 1900-01-01 00:00:00'),
(pd.to_datetime(['1900-01-01', '1900-01-02', 'NaT']),
'days since 1900-01-01 00:00:00'),
(pd.to_datetime(['1900-01-01',
'1900-01-02T00:00:00.005']),
'seconds since 1900-01-01 00:00:00'),
(pd.to_datetime(['NaT', '1900-01-01']),
'days since 1900-01-01 00:00:00'),
(pd.to_datetime(['NaT']),
'days since 1970-01-01 00:00:00'),
]:
self.assertEqual(expected, conventions.infer_datetime_units(dates))
def test_cf_timedelta(self):
examples = [
('1D', 'days', np.int64(1)),
(['1D', '2D', '3D'], 'days', np.array([1, 2, 3], 'int64')),
('1h', 'hours', np.int64(1)),
('1ms', 'milliseconds', np.int64(1)),
('1us', 'microseconds', np.int64(1)),
(['NaT', '0s', '1s'], None, [np.nan, 0, 1]),
(['30m', '60m'], 'hours', [0.5, 1.0]),
]
if pd.__version__ >= '0.16':
# not quite sure why, but these examples don't work on older pandas
examples.extend([(np.timedelta64('NaT', 'ns'), 'days', np.nan),
(['NaT', 'NaT'], 'days', [np.nan, np.nan])])
for timedeltas, units, numbers in examples:
timedeltas = pd.to_timedelta(timedeltas, box=False)
numbers = np.array(numbers)
expected = numbers
actual, _ = conventions.encode_cf_timedelta(timedeltas, units)
self.assertArrayEqual(expected, actual)
self.assertEqual(expected.dtype, actual.dtype)
if units is not None:
expected = timedeltas
actual = conventions.decode_cf_timedelta(numbers, units)
self.assertArrayEqual(expected, actual)
self.assertEqual(expected.dtype, actual.dtype)
expected = np.timedelta64('NaT', 'ns')
actual = conventions.decode_cf_timedelta(np.array(np.nan), 'days')
self.assertArrayEqual(expected, actual)
def test_infer_timedelta_units(self):
for deltas, expected in [
(pd.to_timedelta(['1 day', '2 days']), 'days'),
(pd.to_timedelta(['1h', '1 day 1 hour']), 'hours'),
(pd.to_timedelta(['1m', '2m', np.nan]), 'minutes'),
(pd.to_timedelta(['1m3s', '1m4s']), 'seconds')]:
self.assertEqual(expected, conventions.infer_timedelta_units(deltas))
def test_invalid_units_raises_eagerly(self):
ds = Dataset({'time': ('time', [0, 1], {'units': 'foobar since 123'})})
with self.assertRaisesRegexp(ValueError, 'unable to decode time'):
decode_cf(ds)
@requires_netCDF4
def test_dataset_repr_with_netcdf4_datetimes(self):
# regression test for #347
attrs = {'units': 'days since 0001-01-01', 'calendar': 'noleap'}
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'unable to decode time')
ds = decode_cf(Dataset({'time': ('time', [0, 1], attrs)}))
self.assertIn('(time) object', repr(ds))
attrs = {'units': 'days since 1900-01-01'}
ds = decode_cf(Dataset({'time': ('time', [0, 1], attrs)}))
self.assertIn('(time) datetime64[ns]', repr(ds))
class TestNativeEndiannessArray(TestCase):
def test(self):
x = np.arange(5, dtype='>i8')
expected = np.arange(5, dtype='int64')
a = conventions.NativeEndiannessArray(x)
assert a.dtype == expected.dtype
assert a.dtype == expected[:].dtype
self.assertArrayEqual(a, expected)
@requires_netCDF4
class TestEncodeCFVariable(TestCase):
def test_incompatible_attributes(self):
invalid_vars = [
Variable(['t'], pd.date_range('2000-01-01', periods=3),
{'units': 'foobar'}),
Variable(['t'], pd.to_timedelta(['1 day']), {'units': 'foobar'}),
Variable(['t'], [0, 1, 2], {'add_offset': 0}, {'add_offset': 2}),
Variable(['t'], [0, 1, 2], {'_FillValue': 0}, {'_FillValue': 2}),
]
for var in invalid_vars:
with self.assertRaises(ValueError):
conventions.encode_cf_variable(var)
def test_missing_fillvalue(self):
v = Variable(['x'], np.array([np.nan, 1, 2, 3]))
v.encoding = {'dtype': 'int16'}
with self.assertWarns('floating point data as an integer'):
conventions.encode_cf_variable(v)
@requires_netCDF4
class TestDecodeCF(TestCase):
def test_dataset(self):
original = Dataset({
't': ('t', [0, 1, 2], {'units': 'days since 2000-01-01'}),
'foo': ('t', [0, 0, 0], {'coordinates': 'y', 'units': 'bar'}),
'y': ('t', [5, 10, -999], {'_FillValue': -999})
})
expected = Dataset({'foo': ('t', [0, 0, 0], {'units': 'bar'})},
{'t': pd.date_range('2000-01-01', periods=3),
'y': ('t', [5.0, 10.0, np.nan])})
actual = conventions.decode_cf(original)
self.assertDatasetIdentical(expected, actual)
def test_invalid_coordinates(self):
# regression test for GH308
original = Dataset({'foo': ('t', [1, 2], {'coordinates': 'invalid'})})
actual = conventions.decode_cf(original)
self.assertDatasetIdentical(original, actual)
def test_0d_int32_encoding(self):
original = Variable((), np.int32(0), encoding={'dtype': 'int64'})
expected = Variable((), np.int64(0))
actual = conventions.maybe_encode_dtype(original)
self.assertDatasetIdentical(expected, actual)
def test_decode_cf_with_multiple_missing_values(self):
original = Variable(['t'], [0, 1, 2],
{'missing_value': np.array([0, 1])})
expected = Variable(['t'], [np.nan, np.nan, 2], {})
with warnings.catch_warnings(record=True) as w:
actual = conventions.decode_cf_variable(original)
self.assertDatasetIdentical(expected, actual)
self.assertIn('variable has multiple fill', str(w[0].message))
def test_decode_cf_with_drop_variables(self):
original = Dataset({
't': ('t', [0, 1, 2], {'units': 'days since 2000-01-01'}),
'x' : ("x", [9, 8, 7], {'units' : 'km'}),
'foo': (('t', 'x'), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {'units': 'bar'}),
'y': ('t', [5, 10, -999], {'_FillValue': -999})
})
expected = Dataset({
't': pd.date_range('2000-01-01', periods=3),
'x' : ("x", [0, 1, 2]),
'foo': (('t', 'x'), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {'units': 'bar'}),
'y': ('t', [5, 10, np.nan])
})
actual = conventions.decode_cf(original, drop_variables=("x",))
actual2 = conventions.decode_cf(original, drop_variables="x")
self.assertDatasetIdentical(expected, actual)
self.assertDatasetIdentical(expected, actual2)
class CFEncodedInMemoryStore(InMemoryDataStore):
def store(self, variables, attributes):
variables, attributes = cf_encoder(variables, attributes)
InMemoryDataStore.store(self, variables, attributes)
class NullWrapper(utils.NDArrayMixin):
"""
Just for testing, this lets us create a numpy array directly
but make it look like its not in memory yet.
"""
def __init__(self, array):
self.array = array
def __getitem__(self, key):
return self.array[indexing.orthogonal_indexer(key, self.shape)]
def null_wrap(ds):
"""
Given a data store this wraps each variable in a NullWrapper so that
it appears to be out of memory.
"""
variables = dict((k, Variable(v.dims, NullWrapper(v.values), v.attrs))
for k, v in iteritems(ds))
return InMemoryDataStore(variables=variables, attributes=ds.attrs)
@requires_netCDF4
class TestCFEncodedDataStore(CFEncodedDataTest, TestCase):
@contextlib.contextmanager
def create_store(self):
yield CFEncodedInMemoryStore()
@contextlib.contextmanager
def roundtrip(self, data, decode_cf=True):
store = CFEncodedInMemoryStore()
data.dump_to_store(store)
yield open_dataset(store, decode_cf=decode_cf)
def test_roundtrip_coordinates(self):
raise unittest.SkipTest('cannot roundtrip coordinates yet for '
'CFEncodedInMemoryStore')
| apache-2.0 | 6,889,391,297,185,541,000 | 45.606498 | 85 | 0.547289 | false |
nejstastnejsistene/gardendb | gardendb/postgres.py | 1 | 5218 | import binascii
import threading
import psycopg2
from psycopg2.extensions import new_type, register_type
try:
import cPickles as pickle
except ImportError:
import pickle
from . import BaseGarden
def adapt_bytea(obj):
'''Adapt an object to a bytea by pickling.'''
if isinstance(obj, str):
# Convert strings to unicodes when possible
try:
obj = unicode(obj)
except UnicodeDecodeError:
pass
p = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
return psycopg2.Binary(p)
def cast_bytea(value, cur):
'''Convert a bytea to a python value by unpickling.'''
# Decode the bytea using the original typecast object.
value = psycopg2.BINARY(value, cur)
try:
return pickle.loads(value)
except pickle.UnpicklingError:
mesg = 'unable to unpickle buffer: {!r}'.format(value)
raise psycopg2.InterfaceError(mesg)
# Register cast_bytea with psycopg2.
PICKLE = new_type(psycopg2.BINARY.values, 'PICKLE', cast_bytea)
register_type(PICKLE)
def dummy_pool(conn):
class DummyPool(object):
def getconn(self):
return conn
def putconn(self, conn):
pass
return DummyPool()
class PgGarden(BaseGarden):
table_def_fmt = '''
CREATE TABLE {name}
( key bytea NOT NULL UNIQUE
, value bytea NOT NULL
, mtime timestamp NOT NULL DEFAULT localtimestamp
)
'''
replace_def_fmt = '''
CREATE RULE "replace_{name}" AS
ON INSERT TO "{name}"
WHERE
EXISTS(SELECT 1 FROM {name} WHERE key=NEW.key)
DO INSTEAD
UPDATE {name}
SET value = NEW.value, mtime = localtimestamp
WHERE key = NEW.key
'''
select_all_cmd_fmt = 'SELECT key, value FROM {name}'
select_cmd_fmt = 'SELECT value FROM {name} WHERE key = %s'
insert_cmd_fmt = 'INSERT INTO {name} (key, value) VALUES '
delete_cmd_fmt = 'DELETE FROM {name} WHERE key = %s'
def __init__(self, name, pool, cls=None):
BaseGarden.__init__(self, cls)
self.name = name
self.pool = pool
# Format the various sql commands that we use.
for name, value in PgGarden.__dict__.items():
if name.endswith('_fmt'):
setattr(self, name[:-4], value.format(name=self.name))
conn = self.pool.getconn()
# Create the table and replacement rule if not already defined.
with conn.cursor() as cur:
cur.execute('''
SELECT 1 FROM pg_tables WHERE tablename = '{name}'
'''.format(name=self.name))
if not cur.fetchone():
cur.execute(self.table_def)
cur.execute('''
SELECT 1 FROM pg_rules WHERE rulename = 'replace_{name}'
'''.format(name=self.name))
if not cur.fetchone():
cur.execute(self.replace_def)
conn.commit()
self.pool.putconn(conn)
def lock(garden, key, default=None):
lock = threading.Lock()
class Ctx(object):
def __enter__(self):
lock.acquire()
self.value = garden.get(key, default)
return self
def __exit__(self, *args):
garden[key] = self.value
lock.release()
return Ctx()
def getall(self):
conn = self.pool.getconn()
with conn.cursor() as cur:
cur.execute(self.select_all_cmd)
pairs = cur.fetchall()
self.pool.putconn(conn)
return {k: self.unpack_state(v) for k, v in pairs}
def putmany(self, dct):
'''Place/replace many cucumbers into the Garden.'''
if not dct:
# Silently ignore requests to put nothing.
return
# Pack values.
dct = {k: self.pack_state(v) for k, v in dct.items()}
# Calculate the SQL command format.
cmd = self.insert_cmd + ', '.join(['(%s, %s)'] * len(dct))
# Generate the SQL parameters.
args = []
for pair in dct.items():
args += map(adapt_bytea, pair)
conn = self.pool.getconn()
with conn.cursor() as cur:
cur.execute(cmd, args)
conn.commit()
self.pool.putconn(conn)
def __getitem__(self, key):
'''Retrieve a cucumber from the Garden.'''
_key = adapt_bytea(key)
conn = self.pool.getconn()
with conn.cursor() as cur:
cur.execute(self.select_cmd, (_key,))
value = cur.fetchone()
self.pool.putconn(conn)
if value is None:
raise KeyError, key
return self.unpack_state(value[0])
def __setitem__(self, key, value):
'''Place/replace a cucumber into the Garden.'''
self.putmany({key: value})
def __delitem__(self, key):
'''Delete a cucumber from the Garden.
If the key does not exist, no exception is raised.'
'''
key = adapt_bytea(key)
conn = self.pool.getconn()
with conn.cursor() as cur:
cur.execute(self.delete_cmd, (key,))
conn.commit()
self.pool.putconn(conn)
| bsd-3-clause | 2,349,762,603,324,349,000 | 28.150838 | 72 | 0.561518 | false |
ellisonbg/nbgrader | nbgrader/tests/nbextensions/test_assignment_list.py | 1 | 12592 | import pytest
import os
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from .. import run_nbgrader
from .conftest import notwindows
def _wait(browser):
return WebDriverWait(browser, 10)
def _load_assignments_list(browser, port, retries=5):
# go to the correct page
browser.get("http://localhost:{}/tree".format(port))
def page_loaded(browser):
return browser.execute_script(
'return typeof IPython !== "undefined" && IPython.page !== undefined;')
# wait for the page to load
try:
_wait(browser).until(page_loaded)
except TimeoutException:
if retries > 0:
print("Retrying page load...")
# page timeout, but sometimes this happens, so try refreshing?
_load_assignments_list(browser, port, retries=retries - 1)
else:
print("Failed to load the page too many times")
raise
# wait for the extension to load
_wait(browser).until(EC.presence_of_element_located((By.CSS_SELECTOR, "#assignments")))
# switch to the assignments list
element = browser.find_element_by_link_text("Assignments")
element.click()
# make sure released, downloaded, and submitted assignments are visible
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#released_assignments_list")))
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#fetched_assignments_list")))
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#submitted_assignments_list")))
def _expand(browser, list_id, assignment):
browser.find_element_by_link_text(assignment).click()
rows = browser.find_elements_by_css_selector("{} .list_item".format(list_id))
for i in range(1, len(rows)):
_wait(browser).until(lambda browser: browser.find_elements_by_css_selector("{} .list_item".format(list_id))[i].is_displayed())
return rows
def _unexpand(browser, list_id, assignment):
browser.find_element_by_link_text(assignment).click()
rows = browser.find_elements_by_css_selector("{} .list_item".format(list_id))
for i in range(1, len(rows)):
_wait(browser).until(lambda browser: not browser.find_elements_by_css_selector("{} .list_item".format(list_id))[i].is_displayed())
def _wait_for_modal(browser):
_wait(browser).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".modal-dialog")))
def _dismiss_modal(browser):
button = browser.find_element_by_css_selector(".modal-footer .btn-primary")
button.click()
def modal_gone(browser):
try:
browser.find_element_by_css_selector(".modal-dialog")
except NoSuchElementException:
return True
return False
_wait(browser).until(modal_gone)
def _sort_rows(x):
try:
item_name = x.find_element_by_class_name("item_name").text
except NoSuchElementException:
item_name = ""
return item_name
def _wait_until_loaded(browser):
_wait(browser).until(lambda browser: browser.find_element_by_css_selector("#course_list_dropdown").is_enabled())
def _change_course(browser, course):
# wait until the dropdown is enabled
_wait_until_loaded(browser)
# click the dropdown to show the menu
dropdown = browser.find_element_by_css_selector("#course_list_dropdown")
dropdown.click()
# parse the list of courses and click the one that's been requested
courses = browser.find_elements_by_css_selector("#course_list > li")
text = [x.text for x in courses]
index = text.index(course)
courses[index].click()
# wait for the dropdown to be disabled, then enabled again
_wait_until_loaded(browser)
# verify the dropdown shows the correct course
default = browser.find_element_by_css_selector("#course_list_default")
assert default.text == course
def _wait_for_list(browser, name, num_rows):
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#{}_assignments_list_loading".format(name))))
_wait(browser).until(EC.invisibility_of_element_located((By.CSS_SELECTOR, "#{}_assignments_list_placeholder".format(name))))
_wait(browser).until(lambda browser: len(browser.find_elements_by_css_selector("#{}_assignments_list > .list_item".format(name))) == num_rows)
rows = browser.find_elements_by_css_selector("#{}_assignments_list > .list_item".format(name))
assert len(rows) == num_rows
return rows
@pytest.mark.nbextensions
@notwindows
def test_show_assignments_list(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
_wait_until_loaded(browser)
# make sure all the placeholders are initially showing
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#released_assignments_list_placeholder")))
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#fetched_assignments_list_placeholder")))
_wait(browser).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#submitted_assignments_list_placeholder")))
# release an assignment
run_nbgrader(["assign", "Problem Set 1"])
run_nbgrader(["release", "Problem Set 1", "--course", "abc101"])
# click the refresh button
browser.find_element_by_css_selector("#refresh_assignments_list").click()
_wait_until_loaded(browser)
# wait for the released assignments to update
rows = _wait_for_list(browser, "released", 1)
assert rows[0].find_element_by_class_name("item_name").text == "Problem Set 1"
assert rows[0].find_element_by_class_name("item_course").text == "abc101"
@pytest.mark.nbextensions
@notwindows
def test_multiple_released_assignments(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
_wait_until_loaded(browser)
# release another assignment
run_nbgrader(["assign", "ps.01"])
run_nbgrader(["release", "ps.01", "--course", "xyz 200"])
# click the refresh button
browser.find_element_by_css_selector("#refresh_assignments_list").click()
_wait_until_loaded(browser)
# choose the course "xyz 200"
_change_course(browser, "xyz 200")
rows = _wait_for_list(browser, "released", 1)
assert rows[0].find_element_by_class_name("item_name").text == "ps.01"
assert rows[0].find_element_by_class_name("item_course").text == "xyz 200"
@pytest.mark.nbextensions
@notwindows
def test_fetch_assignment(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
_wait_until_loaded(browser)
# choose the course "xyz 200"
_change_course(browser, "xyz 200")
# click the "fetch" button
rows = _wait_for_list(browser, "released", 1)
rows[0].find_element_by_css_selector(".item_status button").click()
# wait for the downloaded assignments list to update
rows = _wait_for_list(browser, "fetched", 1)
assert rows[0].find_element_by_class_name("item_name").text == "ps.01"
assert rows[0].find_element_by_class_name("item_course").text == "xyz 200"
assert os.path.exists(os.path.join(tempdir, "ps.01"))
# expand the assignment to show the notebooks
rows = _expand(browser, "#nbgrader-xyz_200-ps01", "ps.01")
rows.sort(key=_sort_rows)
assert len(rows) == 2
assert rows[1].find_element_by_class_name("item_name").text == "problem 1"
# unexpand the assignment
_unexpand(browser, "#nbgrader-xyz_200-ps01", "ps.01")
@pytest.mark.nbextensions
@notwindows
def test_submit_assignment(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
_wait_until_loaded(browser)
# choose the course "xyz 200"
_change_course(browser, "xyz 200")
# submit it
rows = _wait_for_list(browser, "fetched", 1)
rows[0].find_element_by_css_selector(".item_status button").click()
# wait for the submitted assignments list to update
rows = _wait_for_list(browser, "submitted", 1)
assert rows[0].find_element_by_class_name("item_name").text == "ps.01"
assert rows[0].find_element_by_class_name("item_course").text == "xyz 200"
# submit it again
rows = browser.find_elements_by_css_selector("#fetched_assignments_list > .list_item")
rows[0].find_element_by_css_selector(".item_status button").click()
# wait for the submitted assignments list to update
rows = _wait_for_list(browser, "submitted", 2)
rows.sort(key=_sort_rows)
assert rows[0].find_element_by_class_name("item_name").text == "ps.01"
assert rows[0].find_element_by_class_name("item_course").text == "xyz 200"
assert rows[1].find_element_by_class_name("item_name").text == "ps.01"
assert rows[1].find_element_by_class_name("item_course").text == "xyz 200"
assert rows[0].find_element_by_class_name("item_status").text != rows[1].find_element_by_class_name("item_status").text
@pytest.mark.nbextensions
@notwindows
def test_fetch_second_assignment(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
_wait_until_loaded(browser)
# click the "fetch" button
rows = _wait_for_list(browser, "released", 1)
rows[0].find_element_by_css_selector(".item_status button").click()
# wait for the downloaded assignments list to update
rows = _wait_for_list(browser, "fetched", 1)
rows.sort(key=_sort_rows)
assert rows[0].find_element_by_class_name("item_name").text == "Problem Set 1"
assert rows[0].find_element_by_class_name("item_course").text == "abc101"
assert os.path.exists(os.path.join(tempdir, "Problem Set 1"))
# expand the assignment to show the notebooks
rows = _expand(browser, "#nbgrader-abc101-Problem_Set_1", "Problem Set 1")
rows.sort(key=_sort_rows)
assert len(rows) == 3
assert rows[1].find_element_by_class_name("item_name").text == "Problem 1"
assert rows[2].find_element_by_class_name("item_name").text == "Problem 2"
# unexpand the assignment
_unexpand(browser, "abc101-Problem_Set_1", "Problem Set 1")
@pytest.mark.nbextensions
@notwindows
def test_submit_other_assignment(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
_wait_until_loaded(browser)
# submit it
rows = _wait_for_list(browser, "fetched", 1)
rows[0].find_element_by_css_selector(".item_status button").click()
# wait for the submitted assignments list to update
rows = _wait_for_list(browser, "submitted", 1)
rows.sort(key=_sort_rows)
assert rows[0].find_element_by_class_name("item_name").text == "Problem Set 1"
assert rows[0].find_element_by_class_name("item_course").text == "abc101"
@pytest.mark.nbextensions
@notwindows
def test_validate_ok(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
_wait_until_loaded(browser)
# choose the course "xyz 200"
_change_course(browser, "xyz 200")
# expand the assignment to show the notebooks
_wait_for_list(browser, "fetched", 1)
rows = _expand(browser, "#nbgrader-xyz_200-ps01", "ps.01")
rows.sort(key=_sort_rows)
assert len(rows) == 2
assert rows[1].find_element_by_class_name("item_name").text == "problem 1"
# click the "validate" button
rows[1].find_element_by_css_selector(".item_status button").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it succeeded
browser.find_element_by_css_selector(".modal-dialog .validation-success")
# close the modal dialog
_dismiss_modal(browser)
@pytest.mark.nbextensions
@notwindows
def test_validate_failure(browser, port, class_files, tempdir):
_load_assignments_list(browser, port)
_wait_until_loaded(browser)
# expand the assignment to show the notebooks
_wait_for_list(browser, "fetched", 1)
rows = _expand(browser, "#nbgrader-abc101-Problem_Set_1", "Problem Set 1")
rows.sort(key=_sort_rows)
assert len(rows) == 3
assert rows[1].find_element_by_class_name("item_name").text == "Problem 1"
assert rows[2].find_element_by_class_name("item_name").text == "Problem 2"
# click the "validate" button
rows[2].find_element_by_css_selector(".item_status button").click()
# wait for the modal dialog to appear
_wait_for_modal(browser)
# check that it succeeded
browser.find_element_by_css_selector(".modal-dialog .validation-failed")
# close the modal dialog
_dismiss_modal(browser)
| bsd-3-clause | 4,763,972,484,128,794,000 | 36.813814 | 146 | 0.687976 | false |
zhaipro/AlphaLineupPuzzle | AlphaLineupPuzzle/models/__init__.py | 1 | 1118 | # coding: utf-8
import numpy as np
import chainer.links as L
import chainer.functions as F
from chainer import serializers, Variable
import policy
from AlphaLineupPuzzle.preprocessing import preprocessing
def load_policy_network(name):
in_dim = preprocessing.state_to_tensor.features
out_dim = preprocessing.action_to_tensor.features
model = L.Classifier(policy.Policy(in_dim, out_dim))
serializers.load_npz('%s.model.npz' % name, model)
def policy_network(gs):
state = preprocessing.state_to_tensor(gs)
Y = model.predictor([state]).data[0]
actions = []
for idx, pos in gs.legal_moves():
action = preprocessing.action_to_tensor(gs, idx, pos, gs.size)
actions.append(action)
# 确保即使actions为空列表,也要构造一个int型的空np数组
actions = np.array(actions, dtype=np.int32)
Y = Y[actions]
Y = Y.reshape((1, Y.size))
Y = Variable(Y)
P = F.softmax(Y).data[0]
for idx, pos in enumerate(gs.legal_moves()):
yield pos, P[idx]
return policy_network
| mit | 446,345,322,601,292,350 | 28.135135 | 74 | 0.648423 | false |
estaban/pyload | module/plugins/hoster/YoupornCom.py | 1 | 1531 | # -*- coding: utf-8 -*-
import re
from module.plugins.Hoster import Hoster
class YoupornCom(Hoster):
__name__ = "YoupornCom"
__type__ = "hoster"
__pattern__ = r'http://(?:www\.)?youporn\.com/watch/.+'
__version__ = "0.2"
__description__ = """Youporn.com hoster plugin"""
__author_name__ = "willnix"
__author_mail__ = "[email protected]"
def process(self, pyfile):
self.pyfile = pyfile
if not self.file_exists():
self.offline()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
url = self.pyfile.url
self.html = self.load(url, post={"user_choice": "Enter"}, cookies=False)
def get_file_url(self):
""" returns the absolute downloadable filepath
"""
if not self.html:
self.download_html()
return re.search(r'(http://download\.youporn\.com/download/\d+\?save=1)">', self.html).group(1)
def get_file_name(self):
if not self.html:
self.download_html()
file_name_pattern = r"<title>(.*) - Free Porn Videos - YouPorn</title>"
return re.search(file_name_pattern, self.html).group(1).replace("&", "&").replace("/", "") + '.flv'
def file_exists(self):
""" returns True or False
"""
if not self.html:
self.download_html()
if re.search(r"(.*invalid video_id.*)", self.html) is not None:
return False
else:
return True
| gpl-3.0 | 1,178,821,542,456,942,600 | 28.442308 | 111 | 0.556499 | false |
CiscoSystems/networking-cisco | networking_cisco/plugins/cisco/cfg_agent/cfg_exceptions.py | 1 | 3138 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions by Cisco Configuration Agent."""
from neutron.common import exceptions
class DriverException(exceptions.NeutronException):
"""Exception created by the Driver class."""
class DriverExpectedKeyNotSetException(DriverException):
"""An attribute expected to be set by plugin is missing"""
message = (_("Value for expected key: %(key)s is missing."
"Driver cannot proceed"))
class InitializationException(DriverException):
"""Exception when initialization of Routing Driver object."""
message = (_("Critical device parameter missing. Failed initializing "
"routing driver object."))
class ConnectionException(DriverException):
"""Connection exception when connecting to IOS XE hosting device."""
message = (_("Failed connecting to Device. Reason: %(reason)s. "
"Connection params are User:%(user)s, Host:%(host)s, "
"Port:%(port)s, Device timeout:%(timeout)s."))
class CSR1kvConfigException(DriverException):
"""Configuration exception thrown when modifying the running config."""
message = (_("Error executing snippet:%(snippet)s. "
"Hosting device:%(dev_id)s Mgmt IP:%(ip)s "
"ErrorType:%(type)s ErrorTag:%(tag)s Config string:%("
"confstr)s."))
class CSR1kvMissingInterfaceException(DriverException):
"""Configuration exception thrown when modifying the running config."""
message = (_("Interface corresponding to port:%(id)s and mac-address:%("
"mac)s is missing in the CSR. Cannot proceed with interface"
"config."))
class CSR1kvUnknownValueException(DriverException):
"""CSR1kv Exception thrown when an unknown value is received."""
message = (_("Data in attribute: %(attribute)s does not correspond to "
"expected value. Value received is %(value)s. "))
class DriverNotExist(DriverException):
message = _("Driver %(driver)s does not exist.")
class DriverNotFound(DriverException):
message = _("Driver not found for %(resource)s id:%(id)s.")
class DriverNotSetForMissingParameter(DriverException):
message = _("Driver cannot be set for missing parameter:%(p)s.")
class HAParamsMissingException(DriverException):
"""MissingParams exception thrown when HA params are missing"""
message = (_("For router: %(r_id)s and port: %(p_id)s, HA_ENABLED is set, "
"but port ha info is missing. Port details: %(port)s"))
| apache-2.0 | 111,988,582,621,257,380 | 38.721519 | 79 | 0.681963 | false |
ExCiteS/geokey-airquality | geokey_airquality/serializers.py | 1 | 10302 | """All serializers for the extension."""
import json
from django.core.exceptions import ValidationError
from django.contrib.gis.geos import Point
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from rest_framework.serializers import BaseSerializer
from geokey_airquality.models import AirQualityLocation, AirQualityMeasurement
class LocationSerializer(BaseSerializer):
"""
Serialiser for geokey_airquality.models.AirQualityLocation.
"""
def is_valid(self, raise_exception=False):
"""
Checks if location is valid.
Parameter
---------
raise_exception : Boolean
Indicates if an exeption should be raised if the data is invalid.
If set to false, the method will return False rather than raising
an exception.
Returns
-------
Boolean
Indicating if data is valid.
Raises
------
ValidationError
If data is invalid. Exception is raised only when raise_exception
is set to True.
"""
self._errors = {}
self._validated_data = {}
# Validate name
name = self.initial_data.get('name')
try:
if name is not None:
self._validated_data['name'] = name
else:
raise ValidationError('Name must be specified.')
except ValidationError, error:
self._errors['name'] = error
# Validate geometry
geometry = self.initial_data.get('geometry')
try:
if geometry.get('type') == 'Point':
coordinates = geometry.get('coordinates')
if coordinates is not None:
x = coordinates[0]
y = coordinates[1]
if x is not None and y is not None:
self._validated_data['geometry'] = Point(x, y)
else:
raise ValidationError('Coordinates are incorrect.')
else:
raise ValidationError('Coordinates are not set.')
else:
raise ValidationError('Only points can be used.')
except ValidationError, error:
self._errors['geometry'] = error
# Validate properties
properties = self.initial_data.get('properties') or {}
self._validated_data['properties'] = {}
if properties is not None:
for key, value in properties.iteritems():
if key in ['height', 'distance', 'characteristics']:
self._validated_data['properties'][key] = value
# Raise the exception
if self._errors and raise_exception:
raise ValidationError(self._errors)
return not bool(self._errors)
def create(self, validated_data):
"""
Creates a new location and returns the instance.
Parameter
---------
validated_data : dict
Data after validation.
Returns
-------
geokey_airquality.models.AirQualityLocation
The instance created.
"""
data = self.context.get('data')
created = data.get('created')
called = data.get('called')
now = timezone.now()
if created is None or called is None:
created = now
else:
timedelta = parse_datetime(called) - parse_datetime(created)
created = now - timedelta
self.instance = AirQualityLocation.objects.create(
name=validated_data.get('name'),
geometry=validated_data.get('geometry'),
creator=self.context.get('user'),
created=created,
properties=validated_data.get('properties')
)
return self.instance
def update(self, instance, validated_data):
"""
Updates an existing location and returns the instance.
Parameter
---------
instance : geokey_airquality.models.AirQualityLocation
The instance to be updated.
validated_data : dict
Data after validation.
Returns
-------
geokey_airquality.models.AirQualityLocation
The instance updated.
"""
instance.name = validated_data.get('name')
instance.geometry = validated_data.get('geometry')
instance.properties = validated_data.get('properties')
instance.save()
return instance
def to_representation(self, object):
"""
Returns the native representation of a location.
Parameter
---------
object : geokey_airquality.models.AirQualityLocation
The instance that is serialised.
Returns
-------
dict
Native represenation of the location.
"""
measurement_serializer = MeasurementSerializer(
object.measurements.all(),
many=True,
context=self.context
)
return {
'type': 'Feature',
'geometry': json.loads(object.geometry.geojson),
'id': object.id,
'name': object.name,
'created': str(object.created),
'properties': object.properties,
'measurements': measurement_serializer.data
}
class MeasurementSerializer(BaseSerializer):
"""
Serialiser for geokey_airquality.models.AirQualityMeasurement.
"""
def is_valid(self, raise_exception=False):
"""
Checks if measurement is valid.
Parameter
---------
raise_exception : Boolean
Indicates if an exeption should be raised if the data is invalid.
If set to false, the method will return False rather than raising
an exception.
Returns
-------
Boolean
Indicating if data is valid.
Raises
------
ValidationError
If data is invalid. Exception is raised only when raise_exception
is set to True.
"""
self._errors = {}
self._validated_data = {}
# Validate barcode
barcode = self.initial_data.get('barcode')
try:
if barcode is not None:
self._validated_data['barcode'] = barcode
else:
raise ValidationError('Barcode must be specified.')
except ValidationError, error:
self._errors['barcode'] = error
# Validate properties
properties = self.initial_data.get('properties') or {}
self._validated_data['properties'] = {}
if properties is not None:
for key, value in properties.iteritems():
if key in [
'results',
'additional_details',
'made_by_students'
]:
self._validated_data['properties'][key] = value
# Raise the exception
if self._errors and raise_exception:
raise ValidationError(self._errors)
return not bool(self._errors)
def create(self, validated_data):
"""
Creates a new measurement and returns the instance.
Parameter
---------
validated_data : dict
Data after validation.
Returns
-------
geokey_airquality.models.AirQualityMeasurement
The instance created.
"""
data = self.context.get('data')
started = data.get('started', None)
finished = data.get('finished', None)
called = data.get('called', None)
now = timezone.now()
if started is None or called is None:
started = now
else:
timedelta = parse_datetime(called) - parse_datetime(started)
started = now - timedelta
if finished is not None:
if called is None:
finished = now
else:
timedelta = parse_datetime(called) - parse_datetime(finished)
finished = now - timedelta
self.instance = AirQualityMeasurement.objects.create(
location=self.context.get('location'),
barcode=validated_data.get('barcode'),
creator=self.context.get('user'),
started=started,
finished=finished,
properties=validated_data.get('properties')
)
return self.instance
def update(self, instance, validated_data):
"""
Updates an existing measurement and returns the instance.
Parameter
---------
instance : geokey_airquality.models.AirQualityMeasurement
The instance to be updated.
validated_data : dict
Data after validation.
Returns
-------
geokey_airquality.models.AirQualityMeasurement
The instance updated.
"""
data = self.context.get('data')
finished = data.get('finished', None)
called = data.get('called', None)
now = timezone.now()
if finished is not None:
if called is None:
finished = now
else:
timedelta = parse_datetime(called) - parse_datetime(finished)
finished = now - timedelta
instance.finished = finished
instance.barcode = validated_data.get('barcode')
instance.properties = validated_data.get('properties')
instance.save()
return instance
def to_representation(self, object):
"""
Returns the native representation of a measurement.
Parameter
---------
object : geokey_airquality.models.AirQualityMeasurement
The instance that is serialised.
Returns
-------
dict
Native represenation of the measurement.
"""
finished = object.finished or None
if finished is not None:
finished = str(finished)
return {
'id': object.id,
'barcode': object.barcode,
'started': str(object.started),
'finished': finished,
'properties': object.properties
}
| mit | 3,727,138,254,284,817,400 | 28.019718 | 78 | 0.553582 | false |
arnau126/django-mysql | src/django_mysql/forms.py | 1 | 9665 | import json
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils.text import format_lazy
from django.utils.translation import ugettext_lazy as _
from django_mysql.validators import (
ListMaxLengthValidator,
ListMinLengthValidator,
SetMaxLengthValidator,
SetMinLengthValidator,
)
class SimpleListField(forms.CharField):
default_error_messages = {
"item_n_invalid": _("Item %(nth)s in the list did not validate: "),
"no_double_commas": _("No leading, trailing, or double commas."),
}
def __init__(self, base_field, max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
super().__init__(*args, **kwargs)
if max_length is not None:
self.max_length = max_length
self.validators.append(ListMaxLengthValidator(int(max_length)))
if min_length is not None:
self.min_length = min_length
self.validators.append(ListMinLengthValidator(int(min_length)))
def prepare_value(self, value):
if isinstance(value, list):
return ",".join(str(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if value and len(value):
items = value.split(",")
else:
items = []
errors = []
values = []
for i, item in enumerate(items, start=1):
if not len(item):
errors.append(
ValidationError(
self.error_messages["no_double_commas"], code="no_double_commas"
)
)
continue
try:
value = self.base_field.to_python(item)
except ValidationError as e:
for error in e.error_list:
errors.append(
ValidationError(
format_lazy(
"{}{}",
self.error_messages["item_n_invalid"],
error.message,
),
code="item_n_invalid",
params={"nth": i},
)
)
values.append(value)
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super().validate(value)
errors = []
for i, item in enumerate(value, start=1):
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(
ValidationError(
format_lazy(
"{}{}",
self.error_messages["item_n_invalid"],
message,
),
code="item_invalid",
params={"nth": i},
)
)
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super().run_validators(value)
errors = []
for i, item in enumerate(value, start=1):
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(
ValidationError(
format_lazy(
"{}{}",
self.error_messages["item_n_invalid"],
message,
),
code="item_n_invalid",
params={"nth": i},
)
)
if errors:
raise ValidationError(errors)
class SimpleSetField(forms.CharField):
empty_values = list(validators.EMPTY_VALUES) + [set()]
default_error_messages = {
"item_invalid": _('Item "%(item)s" in the set did not validate: '),
"item_n_invalid": _("Item %(nth)s in the set did not validate: "),
"no_double_commas": _("No leading, trailing, or double commas."),
"no_duplicates": _(
"Duplicates are not supported. " "'%(item)s' appears twice or more."
),
}
def __init__(self, base_field, max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
super().__init__(*args, **kwargs)
if max_length is not None:
self.max_length = max_length
self.validators.append(SetMaxLengthValidator(int(max_length)))
if min_length is not None:
self.min_length = min_length
self.validators.append(SetMinLengthValidator(int(min_length)))
def prepare_value(self, value):
if isinstance(value, set):
return ",".join(str(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if value and len(value):
items = value.split(",")
else:
items = []
errors = []
values = set()
for i, item in enumerate(items, start=1):
if not len(item):
errors.append(
ValidationError(
self.error_messages["no_double_commas"], code="no_double_commas"
)
)
continue
try:
value = self.base_field.to_python(item)
except ValidationError as e:
for error in e.error_list:
errors.append(
ValidationError(
format_lazy(
"{}{}",
self.error_messages["item_n_invalid"],
error.message,
),
code="item_n_invalid",
params={"nth": i},
)
)
if value in values:
errors.append(
ValidationError(
self.error_messages["no_duplicates"],
code="no_duplicates",
params={"item": item},
)
)
else:
values.add(value)
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super().validate(value)
errors = []
for item in value:
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(
ValidationError(
format_lazy(
"{}{}", self.error_messages["item_invalid"], message
),
code="item_invalid",
params={"item": item},
)
)
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super().run_validators(value)
errors = []
for item in value:
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(
ValidationError(
format_lazy(
"{}{}", self.error_messages["item_invalid"], message
),
code="item_invalid",
params={"item": item},
)
)
if errors:
raise ValidationError(errors)
class InvalidJSONInput(str):
pass
class JSONString(str):
pass
class JSONField(forms.CharField):
default_error_messages = {"invalid": _("'%(value)s' value must be valid JSON.")}
widget = forms.Textarea
def to_python(self, value):
if self.disabled:
return value
if value in self.empty_values:
return None
elif isinstance(value, (list, dict, int, float, JSONString)):
return value
try:
converted = json.loads(value)
except ValueError:
raise forms.ValidationError(
self.error_messages["invalid"], code="invalid", params={"value": value}
)
if isinstance(converted, str):
return JSONString(converted)
else:
return converted
def bound_data(self, data, initial):
if self.disabled:
return initial
try:
return json.loads(data)
except ValueError:
return InvalidJSONInput(data)
def prepare_value(self, value):
if isinstance(value, InvalidJSONInput):
return value
return json.dumps(value)
| bsd-3-clause | -4,095,754,749,629,425,700 | 32.793706 | 88 | 0.455354 | false |
rizar/actor-critic-public | lvsr/main.py | 1 | 52286 | from __future__ import print_function
import time
import logging
import pprint
import math
import os
import re
import cPickle as pickle
import sys
import yaml
import copy
from collections import OrderedDict
import numpy
from lvsr.algorithms import BurnIn
from blocks_extras.extensions.embed_ipython import EmbedIPython
import theano
from theano import tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams
from blocks.bricks.lookup import LookupTable
from blocks.graph import ComputationGraph, apply_dropout, apply_noise
from blocks.algorithms import (GradientDescent,
StepClipping, CompositeRule,
Momentum, RemoveNotFinite, AdaDelta,
Restrict, VariableClipping, RMSProp,
Adam)
from blocks.monitoring import aggregation
from blocks.monitoring.aggregation import MonitoredQuantity
from blocks.theano_expressions import l2_norm
from blocks.extensions import (
FinishAfter, Printing, Timing, ProgressBar, SimpleExtension,
TrainingExtension)
from blocks.extensions.saveload import Checkpoint, Load
from blocks.extensions.monitoring import (
TrainingDataMonitoring, DataStreamMonitoring)
from blocks_extras.extensions.plot import Plot
from blocks.extensions.training import TrackTheBest
from blocks.extensions.predicates import OnLogRecord
from blocks.log import TrainingLog
from blocks.model import Model
from blocks.main_loop import MainLoop
from blocks.filter import VariableFilter, get_brick
from blocks.roles import WEIGHT, OUTPUT
from blocks.utils import reraise_as, dict_subset
from blocks.select import Selector
from lvsr.bricks.recognizer import EncoderDecoder
from lvsr.datasets import Data
from lvsr.expressions import (
monotonicity_penalty, entropy, weights_std)
from lvsr.extensions import (
CGStatistics, AdaptiveClipping, GeneratePredictions, Patience,
CodeVersion)
from lvsr.error_rate import wer, _bleu
from lvsr.graph import apply_adaptive_noise
from lvsr.utils import rename
from blocks.serialization import load_parameters, continue_training
from lvsr.log_backends import NDarrayLog
from lvsr.beam_search import BeamSearch
floatX = theano.config.floatX
logger = logging.getLogger(__name__)
def _gradient_norm_is_none(log):
return math.isnan(log.current_row.get('total_gradient_norm', 0))
class PhonemeErrorRate(MonitoredQuantity):
def __init__(self, recognizer, data, metric, beam_size,
char_discount=None, round_to_inf=None, stop_on=None,
consider_all_eos=None,
**kwargs):
self.recognizer = recognizer
self.metric = metric
self.beam_size = beam_size
self.char_discount = char_discount
self.round_to_inf = round_to_inf
self.stop_on = stop_on
self.consider_all_eos = consider_all_eos
# Will only be used to decode generated outputs,
# which is necessary for correct scoring.
self.data = data
kwargs.setdefault('name', 'per')
kwargs.setdefault('requires', (self.recognizer.single_inputs.values() +
[self.recognizer.single_labels]))
super(PhonemeErrorRate, self).__init__(**kwargs)
if not self.metric in ['per', 'bleu']:
raise ValueError
self.recognizer.init_beam_search(self.beam_size)
def initialize(self):
self.total_length = 0.
self.total_recognized_length = 0.
self.decoded = []
# BLEU
self.total_correct = numpy.zeros(4)
self.total_possible = numpy.zeros(4)
# Edit distance
self.total_errors = 0.
self.num_examples = 0
def aggregate(self, *args):
input_vars = self.requires[:-1]
beam_inputs = {var.name: val for var, val in zip(input_vars,
args[:-1])}
transcription = args[-1]
data = self.data
groundtruth = data.decode(transcription)
search_kwargs = dict(
char_discount=self.char_discount,
round_to_inf=self.round_to_inf,
stop_on=self.stop_on,
validate_solution_function=getattr(
data.info_dataset, 'validate_solution', None),
consider_all_eos=self.consider_all_eos)
# We rely on the defaults hard-coded in BeamSearch
search_kwargs = {k: v for k, v in search_kwargs.items() if v}
outputs, search_costs = self.recognizer.beam_search(
beam_inputs, **search_kwargs)
recognized = data.decode(outputs[0])
self.decoded.append(outputs[0])
self.total_length += len(groundtruth)
self.total_recognized_length += len(recognized)
self.num_examples += 1
if self.metric == 'per':
error = min(1, wer(groundtruth, recognized))
self.total_errors += error * len(groundtruth)
self.mean_error = self.total_errors / self.total_length
elif self.metric == 'bleu':
_, correct, _, _ = _bleu(groundtruth, recognized)
self.total_correct += correct
self.total_possible += numpy.array([len(recognized) - i for i in range(4)])
def get_aggregated_value(self):
if self.metric == 'per':
return self.mean_error
elif self.metric == 'bleu':
logger.debug('Precisions {}'.format(self.total_correct / self.total_possible))
logger.debug('Total recognized length: {}'.format(self.total_recognized_length))
logger.debug('Total groundtruth length: {}'.format(self.total_length))
brevity_penalty = numpy.exp(min(0.0, 1 - self.total_length / float(self.total_recognized_length)))
logger.debug('Brevity penalty: {}'.format(brevity_penalty))
bleu = (self.total_correct / self.total_possible).prod() ** 0.25
bleu *= brevity_penalty
return 100 *bleu
class _OutputDecoded(SimpleExtension):
def __init__(self, data, per, decoded_path, **kwargs):
self.data = data
self.per = per
self.decoded_path = decoded_path
super(_OutputDecoded, self).__init__(**kwargs)
def do(self, *args, **kwargs):
if not os.path.exists(self.decoded_path):
os.mkdir(self.decoded_path)
with open(os.path.join(
self.decoded_path,
str(self.main_loop.status['iterations_done'])), 'w') as dst:
for output in self.per.decoded:
print(self.data.pretty_print(output, None), file=dst)
class Strings(MonitoredQuantity):
def __init__(self, data, **kwargs):
super(Strings, self).__init__(**kwargs)
self.data = data
def initialize(self):
self.result = None
def aggregate(self, string):
self.result = [
self.data.monospace_print(string[:, i])
for i in range(string.shape[1])]
def get_aggregated_value(self):
return self.result
class SwitchOffLengthFilter(SimpleExtension):
def __init__(self, length_filter, **kwargs):
self.length_filter = length_filter
super(SwitchOffLengthFilter, self).__init__(**kwargs)
def do(self, *args, **kwargs):
self.length_filter.max_length = None
self.main_loop.log.current_row['length_filter_switched'] = True
class LoadLog(TrainingExtension):
"""Loads a the log from the checkoint.
Makes a `LOADED_FROM` record in the log with the dump path.
Parameters
----------
path : str
The path to the folder with dump.
"""
def __init__(self, path, **kwargs):
super(LoadLog, self).__init__(**kwargs)
self.path = path[:-4] + '_log.zip'
def load_to(self, main_loop):
with open(self.path, "rb") as source:
loaded_log = pickle.load(source)
#TODO: remove and fix the printing issue!
loaded_log.status['resumed_from'] = None
#make sure that we start a new epoch
if loaded_log.status.get('epoch_started'):
logger.warn('Loading a snaphot taken during an epoch. '
'Iteration information will be destroyed!')
loaded_log.status['epoch_started'] = False
main_loop.log = loaded_log
def before_training(self):
if not os.path.exists(self.path):
logger.warning("No log dump found")
return
logger.info("loading log from {}".format(self.path))
try:
self.load_to(self.main_loop)
#self.main_loop.log.current_row[saveload.LOADED_FROM] = self.path
except Exception:
reraise_as("Failed to load the state")
def create_model(config, data,
load_path=None,
test_tag=False):
"""
Build the main brick and initialize or load all parameters.
Parameters
----------
config : dict
the configuration dict
data : object of class Data
the dataset creation object
load_path : str or None
if given a string, it will be used to load model parameters. Else,
the parameters will be randomly initalized by calling
recognizer.initialize()
test_tag : bool
if true, will add tag the input variables with test values
"""
# First tell the recognizer about required data sources
net_config = dict(config["net"])
train_config = dict(config["training"])
bottom_class = net_config['bottom']['bottom_class']
input_dims = {
source: data.num_features(source)
for source in bottom_class.vector_input_sources}
input_num_chars = {
source: len(data.token_map(source))
for source in bottom_class.discrete_input_sources}
recognizer = EncoderDecoder(
input_dims=input_dims,
input_num_chars=input_num_chars,
bos_label=data.bos_label,
eos_label=data.eos_label,
num_labels=data.num_labels,
name="recognizer",
data_prepend_eos=data.prepend_eos,
token_map=data.token_map('labels'),
generate_predictions=not train_config.get('external_predictions', False),
compute_targets=not train_config.get('external_targets', False),
extra_generation_steps=train_config.get('extra_generation_steps'),
**net_config)
if load_path:
recognizer.load_params(load_path)
else:
for brick_path, attribute_dict in sorted(
config['initialization'].items(),
key=lambda (k, v): k.count('/')):
for attribute, value in attribute_dict.items():
brick, = Selector(recognizer).select(brick_path).bricks
setattr(brick, attribute, value)
brick.push_initialization_config()
recognizer.initialize()
if test_tag:
stream = data.get_stream("train")
data = next(stream.get_epoch_iterator(as_dict=True))
for var in (recognizer.inputs.values() +
[recognizer.inputs_mask, recognizer.labels, recognizer.labels_mask]):
var.tag.test_value = data[var.name]
theano.config.compute_test_value = 'warn'
theano.config.print_test_value = True
return recognizer
def initialize_all(config, save_path, bokeh_name,
params, bokeh_server, bokeh, test_tag, use_load_ext,
load_log, fast_start):
root_path, extension = os.path.splitext(save_path)
data = Data(**config['data'])
train_conf = config['training']
mon_conf = config['monitoring']
recognizer = create_model(config, data,
test_tag=test_tag)
step_number = theano.shared(0)
# Separate attention_params to be handled differently
# when regularization is applied
attention = recognizer.generator.recurrent.attention
attention_params = Selector(attention).get_parameters().values()
logger.info(
"Initialization schemes for all bricks.\n"
"Works well only in my branch with __repr__ added to all them,\n"
"there is an issue #463 in Blocks to do that properly.")
def show_init_scheme(cur):
result = dict()
for attr in dir(cur):
if attr.endswith('_init'):
result[attr] = getattr(cur, attr)
for child in cur.children:
result[child.name] = show_init_scheme(child)
return result
logger.info(pprint.pformat(show_init_scheme(recognizer)))
cg = recognizer.get_cost_graph(batch=True, training=True)
labels, = VariableFilter(
applications=[recognizer.costs], name='prediction')(cg)
labels_mask, = VariableFilter(
applications=[recognizer.costs], name='prediction_mask')(cg)
batch_cost = cg.outputs[0].sum()
batch_size = rename(recognizer.labels.shape[1], "batch_size")
# Assumes constant batch size. `aggregation.mean` is not used because
# of Blocks #514.
cost = batch_cost / batch_size
cost.name = "sequence_total_cost"
logger.info("Cost graph is built")
# Fetch variables useful for debugging.
# It is important not to use any aggregation schemes here,
# as it's currently impossible to spread the effect of
# regularization on their variables, see Blocks #514.
cg = ComputationGraph(cost)
r = recognizer
bottom_output = VariableFilter(
# We need name_regex instead of name because LookupTable calls itsoutput output_0
applications=[r.bottom.apply], name_regex="output")(
cg)[-1]
attended, = VariableFilter(
applications=[r.generator.recurrent.apply], name="attended")(
cg)
attended_mask, = VariableFilter(
applications=[r.generator.recurrent.apply], name="attended_mask")(
cg)
weights, = VariableFilter(
applications=[r.generator.costs], name="weights")(
cg)
max_recording_length = rename(bottom_output.shape[0],
"max_recording_length")
# To exclude subsampling related bugs
max_attended_mask_length = rename(attended_mask.shape[0],
"max_attended_mask_length")
max_attended_length = rename(attended.shape[0],
"max_attended_length")
max_num_phonemes = rename(labels.shape[0],
"max_num_phonemes")
mean_attended = rename(abs(attended).mean(),
"mean_attended")
mean_bottom_output = rename(abs(bottom_output).mean(),
"mean_bottom_output")
weights_penalty = rename(monotonicity_penalty(weights, labels_mask),
"weights_penalty")
weights_entropy = rename(entropy(weights, labels_mask),
"weights_entropy")
mask_density = rename(labels_mask.mean(),
"mask_density")
# Observables:
primary_observables = [] # monitored each batch
secondary_observables = [] # monitored every 10 batches
validation_observables = [] # monitored on the validation set
verbosity = config['monitoring'].get('verbosity', 0.)
secondary_observables = [
weights_penalty, weights_entropy,
mean_attended, mean_bottom_output,
batch_size, max_num_phonemes,
mask_density]
# Regularization. It is applied explicitly to all variables
# of interest, it could not be applied to the cost only as it
# would not have effect on auxiliary variables, see Blocks #514.
reg_config = config.get('regularization', dict())
regularized_cg = ComputationGraph([cost] + secondary_observables)
if reg_config.get('dropout'):
logger.info('apply dropout')
regularized_cg = apply_dropout(cg, [bottom_output], 0.5)
if reg_config.get('noise'):
logger.info('apply noise')
noise_subjects = [p for p in cg.parameters if p not in attention_params]
regularized_cg = apply_noise(cg, noise_subjects, reg_config['noise'])
train_cost = regularized_cg.outputs[0]
if reg_config.get("penalty_coof", .0) > 0:
# big warning!!!
# here we assume that:
# regularized_weights_penalty = regularized_cg.outputs[1]
train_cost = (train_cost +
reg_config.get("penalty_coof", .0) *
regularized_cg.outputs[1] / batch_size)
if reg_config.get("decay", .0) > 0:
logger.debug("Using weight decay of {}".format(reg_config['decay']))
train_cost = (train_cost + reg_config.get("decay", .0) *
l2_norm(VariableFilter(roles=[WEIGHT])(cg.parameters)) ** 2)
gradients = None
if reg_config.get('adaptive_noise'):
logger.info('apply adaptive noise')
if ((reg_config.get("penalty_coof", .0) > 0) or
(reg_config.get("decay", .0) > 0)):
logger.error('using adaptive noise with alignment weight panalty '
'or weight decay is probably stupid')
train_cost, regularized_cg, gradients, noise_brick = apply_adaptive_noise(
cg, cg.outputs[0],
variables=cg.parameters,
num_examples=data.get_dataset('train').num_examples,
parameters=Model(regularized_cg.outputs[0]).get_parameter_dict().values(),
**reg_config.get('adaptive_noise')
)
adapt_noise_cg = ComputationGraph(train_cost)
model_prior_mean = rename(
VariableFilter(applications=[noise_brick.apply],
name='model_prior_mean')(adapt_noise_cg)[0],
'model_prior_mean')
model_cost = rename(
VariableFilter(applications=[noise_brick.apply],
name='model_cost')(adapt_noise_cg)[0],
'model_cost')
model_prior_variance = rename(
VariableFilter(applications=[noise_brick.apply],
name='model_prior_variance')(adapt_noise_cg)[0],
'model_prior_variance')
regularized_cg = ComputationGraph(
[train_cost, model_cost] +
regularized_cg.outputs +
[model_prior_mean, model_prior_variance])
primary_observables += [
regularized_cg.outputs[1], # model cost
regularized_cg.outputs[2], # task cost
regularized_cg.outputs[-2], # model prior mean
regularized_cg.outputs[-1]] # model prior variance
# Additional components of the costs required for some criterions
if config['net']['criterion']['name'] == 'reinforce':
readout = r.generator.readout
baselines, = VariableFilter(
bricks=[readout], name='baselines')(regularized_cg)
baseline_errors, = VariableFilter(
bricks=[readout], name='baseline_errors')(regularized_cg)
mean_baseline = rename(baselines.mean(),
'mean_baseline')
mean_baseline_error = rename(baseline_errors.sum(axis=0).mean(),
'mean_baseline_error')
train_cost = (train_cost * config['net']['criterion'].get('train_cost_coof', 1.0)
+ mean_baseline_error)
# Add log-likelihood of the groundtruth to the cost
log_likelihood_coef = config['net']['criterion'].get('log_likelihood_coef')
if log_likelihood_coef:
logger.info("Also add log-likelihood to the cost")
groundtruth_cg = recognizer.get_cost_graph(
training=False, use_prediction=False, groundtruth_as_predictions=True)
prediction_log_probs, = VariableFilter(
bricks=[r.generator.readout], name='prediction_log_probs')(groundtruth_cg)
groundtruth_mask, = VariableFilter(
bricks=[r.generator.readout], name='groundtruth_mask')(groundtruth_cg)
log_likelihood = (prediction_log_probs * groundtruth_mask).sum(axis=0).mean()
train_cost -= log_likelihood_coef * log_likelihood
# Build the model and load parameters if necessary
train_cost.name = 'train_cost'
model = Model(train_cost)
if params:
logger.info("Load parameters from " + params)
# please note: we cannot use recognizer.load_params
# as it builds a new computation graph that dies not have
# shapred variables added by adaptive weight noise
with open(params, 'r') as src:
param_values = load_parameters(src)
model.set_parameter_values(param_values)
parameters = model.get_parameter_dict()
def _log_parameters(message, ps):
logger.info(message + "\n" +
pprint.pformat(
[(key, parameters[key].get_value().shape) for key
in sorted(ps.keys())],
width=120))
_log_parameters("Parameters", parameters)
# Define the training algorithm.
trainable_parameters = OrderedDict(
[(key, value) for key, value in parameters.items()
if re.match(train_conf.get('trainable_regexp', '.*'), key)])
if trainable_parameters.keys() != parameters.keys():
_log_parameters("Trainable parameters", trainable_parameters)
if train_conf['gradient_threshold']:
clipping = StepClipping(train_conf['gradient_threshold'])
clipping.threshold.name = "gradient_norm_threshold"
clipping = [clipping]
else:
clipping = []
rule_names = train_conf.get('rules', ['momentum'])
core_rules = []
if 'momentum' in rule_names:
logger.info("Using scaling and momentum for training")
core_rules.append(
Momentum(train_conf['scale'], train_conf['momentum']))
if 'adadelta' in rule_names:
logger.info("Using AdaDelta for training")
core_rules.append(
AdaDelta(train_conf['decay_rate'], train_conf['epsilon']))
if 'rmsprop' in rule_names:
logger.info("Using RMSProp for training")
core_rules.append(
RMSProp(train_conf['scale'], train_conf['decay_rate'],
train_conf['max_scaling']))
if 'adam' in rule_names:
logger.info("Using ADAM for training")
core_rules.append(Adam(
train_conf['scale'],
train_conf['momentum'],
train_conf['decay_rate'],
epsilon=train_conf['epsilon']))
max_norm_rules = []
if reg_config.get('max_norm', False) > 0:
logger.info("Apply MaxNorm")
maxnorm_subjects = VariableFilter(roles=[WEIGHT])(trainable_parameters)
if reg_config.get('max_norm_exclude_lookup', False):
maxnorm_subjects = [v for v in maxnorm_subjects
if not isinstance(get_brick(v), LookupTable)]
logger.info(
"Parameters covered by MaxNorm:\n"
+ pprint.pformat([name for name, p in trainable_parameters.items()
if p in maxnorm_subjects]))
logger.info(
"Parameters NOT covered by MaxNorm:\n"
+ pprint.pformat([name for name, p in trainable_parameters.items()
if not p in maxnorm_subjects]))
max_norm_rules = [
Restrict(VariableClipping(reg_config['max_norm'], axis=0),
maxnorm_subjects)]
burn_in = []
if train_conf.get('burn_in_steps', 0):
burn_in.append(
BurnIn(num_steps=train_conf['burn_in_steps']))
algorithm = GradientDescent(
cost=train_cost,
parameters=trainable_parameters.values(),
gradients=gradients,
step_rule=CompositeRule(
clipping + core_rules + max_norm_rules +
# Parameters are not changed at all
# when nans are encountered.
[RemoveNotFinite(0.0)] + burn_in),
on_unused_sources='warn')
if regularized_cg.updates:
logger.debug("There are updates in the computation graph")
algorithm.updates.extend(regularized_cg.updates.items())
algorithm.updates.append((step_number, step_number + 1))
logger.debug("Scan Ops in the gradients")
gradient_cg = ComputationGraph(algorithm.gradients.values())
for op in ComputationGraph(gradient_cg).scans:
logger.debug(op)
# More variables for debugging: some of them can be added only
# after the `algorithm` object is created.
primary_observables += [
train_cost,
max_recording_length,
max_attended_length, max_attended_mask_length]
if clipping:
primary_observables += [
algorithm.total_gradient_norm,
algorithm.total_step_norm,
clipping[0].threshold]
secondary_observables = list(regularized_cg.outputs)
if not 'train_cost' in [v.name for v in secondary_observables]:
secondary_observables += [train_cost]
if clipping:
secondary_observables += [
algorithm.total_step_norm, algorithm.total_gradient_norm,
clipping[0].threshold]
if mon_conf.get('monitor_parameters'):
for name, param in parameters.items():
num_elements = numpy.product(param.get_value().shape)
norm = param.norm(2) / num_elements ** 0.5
grad_norm = algorithm.gradients[param].norm(2) / num_elements ** 0.5
step_norm = algorithm.steps[param].norm(2) / num_elements ** 0.5
stats = tensor.stack(norm, grad_norm, step_norm, step_norm / grad_norm)
stats.name = name + '_stats'
secondary_observables.append(stats)
# Fetch variables that make sense only for some criteria.
# Add respective observables
cost_to_track = cost
choose_best = min
if r.cost_involves_generation():
readout = r.generator.readout
rewards, = VariableFilter(
bricks=[readout], name='rewards')(regularized_cg)
mean_total_reward = rename(rewards.sum(axis=0).mean(), 'mean_total_reward')
primary_observables += [mean_total_reward]
if verbosity >= 1:
primary_observables += [aggregation.take_last(rewards)]
secondary_observables += [
Strings(data, requires=[r.labels], name='groundtruth'),
Strings(data, requires=[labels], name='predictions')]
if r.criterion['name'] == 'reinforce':
baselines, = VariableFilter(
bricks=[readout], name='baselines')(regularized_cg)
log_probs, = VariableFilter(
bricks=[readout], name='log_probs')(regularized_cg)
baseline_errors, = VariableFilter(
bricks=[readout], name='baseline_errors')(regularized_cg)
est_entropy = rename(log_probs.sum(axis=0).mean(), 'entropy')
primary_observables += [est_entropy, mean_baseline, mean_baseline_error]
if verbosity >= 1:
primary_observables += [aggregation.take_last(baselines)]
rewards, = VariableFilter(
bricks=[readout], name='rewards')(regularized_cg)
validation_observables += [
rename(rewards.sum(axis=0).mean(), 'mean_total_reward')]
validation_updates = cg.updates
if r.criterion['name'] in ['sarsa', 'actor_critic']:
value_biases, = VariableFilter(
bricks=[readout], name='value_biases')(regularized_cg)
prediction_mask, = VariableFilter(
bricks=[readout], name='prediction_mask')(regularized_cg)
prediction_values, = VariableFilter(
bricks=[readout], name='prediction_values')(regularized_cg)
prediction_outputs, = VariableFilter(
bricks=[readout], name='prediction_outputs')(regularized_cg)
probs, = VariableFilter(
applications=[readout.costs], name='probs')(regularized_cg)
value_targets, = VariableFilter(
bricks=[readout], name='value_targets')(regularized_cg)
values, = VariableFilter(
applications=[readout.costs], name='values')(regularized_cg)
outputs, = VariableFilter(
bricks=[readout], name='outputs')(regularized_cg)
last_character_costs, = VariableFilter(
bricks=[readout], name='last_character_costs')(regularized_cg)
mean_expected_reward, = VariableFilter(
bricks=[readout], name='mean_expected_reward')(regularized_cg)
mean_last_character_cost = rename(
last_character_costs.mean(),
'mean_last_character_cost')
mean_action_entropy, = VariableFilter(
bricks=[readout], name='mean_actor_entropy')(regularized_cg)
mean2_output, = VariableFilter(
bricks=[readout], name='mean2_output')(regularized_cg)
max_output, = VariableFilter(
bricks=[readout], name='max_output')(regularized_cg)
primary_observables += [
mean_expected_reward, mean_last_character_cost,
mean2_output, max_output,
mean_action_entropy]
if verbosity >= 1:
primary_observables += map(aggregation.take_last,
[prediction_mask, prediction_values, prediction_outputs,
probs, value_biases, outputs, values, value_targets])
# Note, that we build a "clean" cg for the validation.
# In particular, it contains not dangling free variables
# like "value_targets", probs, etc.
clean_cg = recognizer.get_cost_graph(batch=True)
clean_rewards, = VariableFilter(
bricks=[readout], name='rewards')(clean_cg)
validation_observables += [
rename(clean_rewards.sum(axis=0).mean(), 'mean_total_reward')]
cost_to_track = validation_observables[-1]
choose_best = max
validation_updates = clean_cg.updates
# In addition we monitoring the rewards of a mixed policy
mixed_cg = recognizer.get_cost_graph(batch=True, with_mixed_generation=True)
mixed_rewards, = VariableFilter(
bricks=[readout], name='rewards')(mixed_cg)
mixed_validation_observables = [
rename(mixed_rewards.sum(axis=0).mean(), 'mean_total_reward')
]
mixed_validation_updates = mixed_cg.updates
if r.criterion['name'] == 'actor_critic':
mean_critic_cost, = VariableFilter(
bricks=[readout], name='mean_critic_cost')(regularized_cg)
mean_critic_monte_carlo_cost, = VariableFilter(
bricks=[readout], name='mean_critic_monte_carlo_cost')(regularized_cg)
mean_actor_cost, = VariableFilter(
bricks=[readout], name='mean_actor_cost')(regularized_cg)
primary_observables += [
mean_critic_cost, mean_critic_monte_carlo_cost, mean_actor_cost]
if r.criterion['name'] in ['log_likelihood', 'reinforce']:
energies, = VariableFilter(
applications=[r.generator.readout.all_scores], roles=[OUTPUT])(
cg)
min_energy = rename(energies.min(), "min_energy")
max_energy = rename(energies.max(), "max_energy")
secondary_observables += [min_energy, max_energy]
if r.criterion['name'] == 'log_likelihood':
validation_observables += [
rename(aggregation.mean(batch_cost, batch_size), cost.name),
weights_entropy, weights_penalty]
validation_updates = cg.updates
def attach_aggregation_schemes(variables):
# Attaches non-trivial aggregation schemes to
# secondary and validation observables
result = []
for var in variables:
if var.name == 'weights_penalty':
result.append(rename(aggregation.mean(var, batch_size),
'weights_penalty_per_recording'))
elif var.name == 'weights_entropy':
result.append(rename(aggregation.mean(var, labels_mask.sum()),
'weights_entropy_per_label'))
else:
result.append(var)
return result
if verbosity >= 2:
# Override the frequencies
mon_conf['primary_freq'] = 1
mon_conf['secondary_freq'] = 1
# Build main loop.
logger.info("Initialize extensions")
extensions = []
if use_load_ext and params:
extensions.append(Load(params, load_iteration_state=True, load_log=True))
if load_log and params:
extensions.append(LoadLog(params))
extensions += [
Timing(every_n_batches=mon_conf['primary_freq']),
CGStatistics(),
CodeVersion(['lvsr']),
]
# Monitoring
extensions.append(TrainingDataMonitoring(
primary_observables,
every_n_batches=mon_conf.get('primary_freq', 1)))
average_monitoring = TrainingDataMonitoring(
attach_aggregation_schemes(secondary_observables),
prefix="average",
every_n_batches=mon_conf.get('secondary_freq', 10))
extensions.append(average_monitoring)
validation_requested = (
mon_conf['validate_every_epochs'] or
mon_conf['validate_every_batches'])
if validation_requested:
validation = DataStreamMonitoring(
attach_aggregation_schemes(validation_observables),
data.get_stream("valid", shuffle=False),
prefix="valid", updates=validation_updates).set_conditions(
before_first_epoch=not fast_start,
every_n_epochs=mon_conf['validate_every_epochs'],
every_n_batches=mon_conf['validate_every_batches'],
after_training=False)
track_the_best_cost = TrackTheBest(
validation.record_name(cost_to_track),
choose_best=choose_best).set_conditions(
before_first_epoch=True,
every_n_epochs=mon_conf['validate_every_epochs'],
every_n_batches=mon_conf['validate_every_batches'])
extensions.append(validation)
extensions.append(track_the_best_cost)
if r.criterion['name'] == 'actor_critic':
mixed_validation = DataStreamMonitoring(
mixed_validation_observables,
data.get_stream("valid", shuffle=False),
prefix="mixed_valid", updates=mixed_validation_updates).set_conditions(
before_first_epoch=not fast_start,
every_n_epochs=mon_conf['validate_every_epochs'],
every_n_batches=mon_conf['validate_every_batches'],
after_training=False)
extensions.append(mixed_validation)
search_config = config['monitoring'].get('search')
search_requested = (search_config and (
mon_conf['search_every_epochs'] or
mon_conf['search_every_batches']))
if search_requested:
per = PhonemeErrorRate(
recognizer, data,
**config['monitoring']['search'])
frequency_kwargs = dict(
before_first_epoch=not fast_start,
every_n_epochs=mon_conf['search_every_epochs'],
every_n_batches=mon_conf['search_every_batches'],
after_training=False)
per_monitoring = DataStreamMonitoring(
[per], data.get_stream("valid", batches=False, shuffle=False),
prefix="valid").set_conditions(**frequency_kwargs)
extensions.append(per_monitoring)
track_the_best_per = TrackTheBest(
per_monitoring.record_name(per),
choose_best=min if search_config['metric'] == 'per' else max).set_conditions(
**frequency_kwargs)
extensions.append(track_the_best_per)
extensions.append(_OutputDecoded(
data, per, root_path + '_decoded',
**frequency_kwargs
))
if mon_conf.get('search_on_training'):
# We reuse PhonemeErrorRate object here, should not cause problems
training_per_monitoring = DataStreamMonitoring(
[per], data.get_stream(
"train", batches=False, shuffle=False,
num_examples=mon_conf['search_on_training']),
prefix="train").set_conditions(**frequency_kwargs)
track_the_best_training_per = TrackTheBest(
training_per_monitoring.record_name(per),
choose_best=min if search_config['metric'] == 'per' else max).set_conditions(
**frequency_kwargs)
extensions.append(training_per_monitoring)
extensions.append(track_the_best_training_per)
extensions.append(_OutputDecoded(
data, per, root_path + '_train_decoded',
**frequency_kwargs))
# Training control
if train_conf.get('external_predictions'):
extensions.append(GeneratePredictions(
train_conf['extra_generation_steps'],
train_conf.get('external_targets'),
config['net']['criterion'].get('trpo_coef', 0.0),
train_conf.get('force_generate_groundtruth'),
train_conf.get('catching_up_coof'),
train_conf.get('catching_up_freq')))
if clipping:
extensions.append(AdaptiveClipping(
algorithm.total_gradient_norm,
clipping[0], train_conf['gradient_threshold'],
decay_rate=0.998, burnin_period=500))
extensions += [
FinishAfter(after_n_batches=train_conf.get('num_batches'),
after_n_epochs=train_conf.get('num_epochs'))
.add_condition(["after_batch"], _gradient_norm_is_none),
]
if bokeh:
channels = [
# Plot 1: training and validation costs
[average_monitoring.record_name(train_cost),
validation.record_name(cost)],
# Plot 2: gradient norm,
[average_monitoring.record_name(algorithm.total_gradient_norm),
average_monitoring.record_name(clipping[0].threshold)]]
# Plot 3: phoneme error rate
if search_config:
channels += [per_monitoring.record_name(per)]
channels += [
# Plot 4: training and validation mean weight entropy
[average_monitoring._record_name('weights_entropy_per_label'),
validation._record_name('weights_entropy_per_label')],
# Plot 5: training and validation monotonicity penalty
[average_monitoring._record_name('weights_penalty_per_recording'),
validation._record_name('weights_penalty_per_recording')]]
extensions += [
Plot(bokeh_name if bokeh_name
else os.path.basename(save_path),
channels,
every_n_batches=10,
server_url=bokeh_server),]
checkpoint = Checkpoint(
save_path,
before_first_epoch=not fast_start,
every_n_epochs=train_conf.get('save_every_epochs'),
every_n_batches=train_conf.get('save_every_batches'),
save_main_loop=True,
save_separately=["log"],
use_cpickle=True)
if search_requested:
checkpoint.add_condition(
['after_batch', 'after_epoch'],
OnLogRecord(track_the_best_per.notification_name),
(root_path + "_best" + extension,))
if validation_requested:
checkpoint.add_condition(
['after_batch', 'after_epoch'],
OnLogRecord(track_the_best_cost.notification_name),
(root_path + "_best_ll" + extension,)),
extensions += [
checkpoint,
EmbedIPython(use_main_loop_run_caller_env=True)]
if train_conf.get('patience'):
patience_conf = train_conf['patience']
if not patience_conf.get('notification_names'):
# setdefault will not work for empty list
patience_conf['notification_names'] = [
track_the_best_per.notification_name,
track_the_best_cost.notification_name]
extensions.append(Patience(**patience_conf))
extensions.append(Printing(every_n_batches=mon_conf['primary_freq']))
return model, algorithm, data, extensions
def train(config, save_path, bokeh_name,
params, bokeh_server, bokeh, test_tag, use_load_ext,
load_log, fast_start, debug_mode):
model, algorithm, data, extensions = initialize_all(
config, save_path, bokeh_name,
params, bokeh_server, bokeh, test_tag, use_load_ext,
load_log, fast_start)
num_examples = config['training'].get('num_examples', None)
# Save the config into the status
log = TrainingLog()
log.status['_config'] = repr(config)
if debug_mode:
data_stream = data.get_stream(
"train", shuffle=False, num_examples=data.batch_size)
else:
data_stream = data.get_stream(
"train", shuffle=config['training'].get('shuffle', True),
num_examples=num_examples)
main_loop = MainLoop(
model=model, log=log, algorithm=algorithm,
data_stream=data_stream,
extensions=extensions)
main_loop.run()
if (main_loop.log.status['batch_interrupt_received']
or main_loop.log.status['epoch_interrupt_received']):
return 'interrupted'
return 'success'
def train_multistage(config, save_path, bokeh_name, params,
start_stage, final_stage, **kwargs):
"""Run multiple stages of the training procedure."""
if os.environ.get('SLURM_RESTART_COUNT') is not None:
logger.debug('This is a SLURM restart')
params = None
start_stage = None
if not config.multi_stage:
main_save_path = os.path.join(save_path, 'main.tar')
if os.path.exists(main_save_path):
logger.info("Training will be resumed")
params = main_save_path
kwargs['use_load_ext'] = True
train(config, main_save_path, bokeh_name, params, **kwargs)
return
stages = list(config.ordered_stages.items())
current_stage_path = save_path + '/current_stage.txt'
# Prepare the start stage
if start_stage:
start_stage = config.stage_number(start_stage)
elif os.path.exists(current_stage_path):
# If start stage has not be provided explicitly, assume that
# the current stage has to be continued
with open(current_stage_path) as file_:
start_stage_name = file_.read().strip()
start_stage = config.stage_number(start_stage_name)
logger.info("Training is resumed from stage " + start_stage_name)
# To continue the current stage we tell the training routine
# to use the log, the parameters and etc. from the old main loop dump
kwargs['use_load_ext'] = True
params = '{}/{}.tar'.format(save_path, start_stage_name)
else:
start_stage = 0
if final_stage is not None:
final_stage = config.stage_number(final_stage)
else:
final_stage = len(stages) - 1
# Run all stages
for number in range(start_stage, final_stage + 1):
stage_name, stage_config = stages[number]
logger.info("Stage \"{}\" config:\n".format(stage_name)
+ pprint.pformat(stage_config, width=120))
stage_save_path = '{}/{}.tar'.format(save_path, stage_name)
stage_bokeh_name = '{}_{}'.format(save_path, stage_name)
if params:
stage_params = params
# Avoid loading the params twice
params = None
elif number > 0:
stage_params = '{}/{}{}.tar'.format(
save_path, stages[number - 1][0],
stage_config['training'].get('restart_from', ''))
else:
stage_params = None
with open(current_stage_path, 'w') as dst:
print(stage_name, file=dst)
exit_code = train(
stage_config, stage_save_path, stage_bokeh_name, stage_params, **kwargs)
if exit_code != 'success':
return
# Using load only makes sense at the first stage of the stage loop.
kwargs['use_load_ext'] = False
def search(config, params, load_path, part, decode_only, report,
decoded_save, nll_only, seed):
from matplotlib import pyplot
from lvsr.notebook import show_alignment
data = Data(**config['data'])
search_conf = config['monitoring']['search']
logger.info("Recognizer initialization started")
recognizer = create_model(config, data, load_path)
recognizer.init_beam_search(search_conf['beam_size'])
logger.info("Recognizer is initialized")
has_uttids = 'uttids' in data.info_dataset.provides_sources
add_sources = ('uttids',) if has_uttids else ()
dataset = data.get_dataset(part, add_sources)
stream = data.get_stream(
part, batches=False,
shuffle=
config['training']['shuffle'] if part == 'train' else False,
add_sources=add_sources,
num_examples=
config['monitoring']['search_on_training'] if part == 'train' else None,
seed=seed)
it = stream.get_epoch_iterator(as_dict=True)
if decode_only is not None:
decode_only = eval(decode_only)
weights = tensor.matrix('weights')
weight_statistics = theano.function(
[weights],
[weights_std(weights.dimshuffle(0, 'x', 1)),
monotonicity_penalty(weights.dimshuffle(0, 'x', 1))])
print_to = sys.stdout
if report:
alignments_path = os.path.join(report, "alignments")
if not os.path.exists(report):
os.mkdir(report)
os.mkdir(alignments_path)
print_to = open(os.path.join(report, "report.txt"), 'w')
if decoded_save:
print_to = open(decoded_save, 'w')
num_examples = .0
total_nll = .0
total_errors = .0
total_length = .0
total_wer_errors = .0
total_word_length = 0.
if config.get('vocabulary'):
with open(os.path.expandvars(config['vocabulary'])) as f:
vocabulary = dict(line.split() for line in f.readlines())
def to_words(chars):
words = chars.split()
words = [vocabulary[word] if word in vocabulary
else vocabulary['<UNK>'] for word in words]
return words
for number, example in enumerate(it):
if decode_only and number not in decode_only:
continue
uttids = example.pop('uttids', None)
raw_groundtruth = example.pop('labels')
required_inputs = dict_subset(example, recognizer.inputs.keys())
print("Utterance {} ({})".format(number, uttids), file=print_to)
groundtruth = dataset.decode(raw_groundtruth)
groundtruth_text = dataset.pretty_print(raw_groundtruth, example)
costs_groundtruth, weights_groundtruth = recognizer.analyze(
inputs=required_inputs,
groundtruth=raw_groundtruth,
prediction=raw_groundtruth)[:2]
weight_std_groundtruth, mono_penalty_groundtruth = weight_statistics(
weights_groundtruth)
total_nll += costs_groundtruth.sum()
num_examples += 1
print("Groundtruth:", groundtruth_text, file=print_to)
print("Groundtruth cost:", costs_groundtruth.sum(), file=print_to)
print("Groundtruth weight std:", weight_std_groundtruth, file=print_to)
print("Groundtruth monotonicity penalty:", mono_penalty_groundtruth,
file=print_to)
print("Average groundtruth cost: {}".format(total_nll / num_examples),
file=print_to)
if nll_only:
print_to.flush()
continue
before = time.time()
search_kwargs = dict(
char_discount=search_conf.get('char_discount'),
round_to_inf=search_conf.get('round_to_inf'),
stop_on=search_conf.get('stop_on'),
validate_solution_function=getattr(
data.info_dataset, 'validate_solution', None),
consider_all_eos=search_conf.get('consider_all_eos'))
search_kwargs = {k: v for k, v in search_kwargs.items() if v}
outputs, search_costs = recognizer.beam_search(
required_inputs, **search_kwargs)
took = time.time() - before
recognized = dataset.decode(outputs[0])
recognized_text = dataset.pretty_print(outputs[0], example)
if recognized:
# Theano scan doesn't work with 0 length sequences
costs_recognized, weights_recognized = recognizer.analyze(
inputs=required_inputs,
groundtruth=raw_groundtruth,
prediction=outputs[0])[:2]
weight_std_recognized, mono_penalty_recognized = weight_statistics(
weights_recognized)
error = min(1, wer(groundtruth, recognized))
else:
error = 1
total_errors += len(groundtruth) * error
total_length += len(groundtruth)
if config.get('vocabulary'):
wer_error = min(1, wer(to_words(groundtruth_text),
to_words(recognized_text)))
total_wer_errors += len(groundtruth) * wer_error
total_word_length += len(groundtruth)
if report and recognized:
show_alignment(weights_groundtruth, groundtruth, bos_symbol=True)
pyplot.savefig(os.path.join(
alignments_path, "{}.groundtruth.png".format(number)))
show_alignment(weights_recognized, recognized, bos_symbol=True)
pyplot.savefig(os.path.join(
alignments_path, "{}.recognized.png".format(number)))
print("Decoding took:", took, file=print_to)
print("Beam search cost:", search_costs[0], file=print_to)
print("Recognized:", recognized_text, file=print_to)
if recognized:
print("Recognized cost:", costs_recognized.sum(), file=print_to)
print("Recognized weight std:", weight_std_recognized,
file=print_to)
print("Recognized monotonicity penalty:", mono_penalty_recognized,
file=print_to)
print("CER:", error, file=print_to)
print("Average CER:", total_errors / total_length, file=print_to)
if config.get('vocabulary'):
print("WER:", wer_error, file=print_to)
print("Average WER:", total_wer_errors / total_word_length, file=print_to)
print_to.flush()
#assert_allclose(search_costs[0], costs_recognized.sum(), rtol=1e-5)
def sample(config, params, load_path, part):
data = Data(**config['data'])
recognizer = create_model(config, data, load_path)
stream = data.get_stream(part, batches=False, shuffle=False)
it = stream.get_epoch_iterator(as_dict=True)
print_to = sys.stdout
for number, example in enumerate(it):
uttids = example.pop('uttids', None)
print("Utterance {} ({})".format(number, uttids),
file=print_to)
raw_groundtruth = example.pop('labels')
groundtruth_text = data.pretty_print(raw_groundtruth, example)
print("Groundtruth:", groundtruth_text, file=print_to)
sample = recognizer.sample(example)
recognized_text = data.pretty_print(sample, example)
print("Recognized:", recognized_text, file=print_to)
def show_config(config):
def _normalize(conf):
if isinstance(conf, (int, str, float)):
return conf
if isinstance(conf, dict):
result = {}
for key, value in conf.items():
normalized = _normalize(value)
if normalized is not None:
result[key] = normalized
else:
result[key] = str(value)
return result
if isinstance(conf, list):
result = []
for value in conf:
normalized = _normalize(value)
if normalized is not None:
result.append(normalized)
else:
result.append(str(value))
return result
return None
yaml.dump(_normalize(config), sys.stdout,
default_flow_style=False)
def show_data(config):
data = Data(**config['data'])
stream = data.get_stream("train")
batch = next(stream.get_epoch_iterator(as_dict=True))
import IPython; IPython.embed()
def test(config, **kwargs):
raise NotImplementedError()
| mit | 3,864,441,679,378,537,500 | 40.996787 | 110 | 0.608499 | false |
takeflight/wagtailannotatedimage | wagtailannotatedimage/edit_handlers.py | 1 | 2726 | import json
from django import forms
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from wagtail.wagtailadmin.edit_handlers import (BaseCompositeEditHandler,
FieldPanel, widget_with_script)
from wagtail.wagtailimages.edit_handlers import ImageChooserPanel
from wagtail.wagtailimages.widgets import AdminImageChooser
from .forms import BaseAnnotationForm
class HiddenJsonInput(forms.HiddenInput):
def render(self, name, value, attrs=None):
if value is None or value == '{}':
value = '{}'
elif isinstance(value, dict):
value = json.dumps(value)
return super(HiddenJsonInput, self).render(name, value, attrs)
class BaseAnnotatedImagePanel(BaseCompositeEditHandler):
template = 'annotated_image.html'
js_template = 'annotated_image.js'
@classmethod
def widget_overrides(cls):
return {
cls.children[0].field_name: AdminImageChooser,
cls.children[1].field_name: HiddenJsonInput}
def __init__(self, instance=None, form=None):
super(BaseAnnotatedImagePanel, self).__init__(instance=instance,
form=form)
self.image_field = self.children[0]
self.image_field_id = self.image_field.bound_field.auto_id
self.annotations_field = self.children[1]
def render(self):
html = mark_safe(render_to_string(self.template, {
'panel': self,
'image_field_id': self.image_field_id, # Used as js container id
'image_field': self.image_field,
'annotations_field': self.annotations_field,
'annotation_form': self.annotation_form.as_p(),
'heading': self.heading,
}))
js = self.render_js_init()
return widget_with_script(html, js)
def render_js_init(self):
return mark_safe(render_to_string(self.js_template, {
'image_field_id': self.image_field_id,
}))
class AnnotatedImagePanel(object):
def __init__(self, image_field, annotations_field,
annotation_form=BaseAnnotationForm(), heading=''):
self.children = [
ImageChooserPanel(image_field), FieldPanel(annotations_field)]
self.heading = heading
self.annotation_form = annotation_form
def bind_to_model(self, model):
return type(str('_AnnotatedImagePanel'), (BaseAnnotatedImagePanel,), {
'model': model,
'children': [child.bind_to_model(model) for child in self.children],
'heading': self.heading,
'annotation_form': self.annotation_form
})
| bsd-3-clause | -207,208,430,859,362,340 | 36.861111 | 80 | 0.628393 | false |
ultrabug/py3status | py3status/modules/wanda_the_fish.py | 1 | 5557 | """
Display a fortune-telling, swimming fish.
Wanda has no use what-so-ever. It only takes up disk space and compilation time,
and if loaded, it also takes up precious bar space, memory, and cpu cycles.
Anybody found using it should be promptly sent for a psychiatric evaluation.
Configuration parameters:
cache_timeout: refresh interval for this module (default 0)
format: display format for this module
(default '{nomotion}[{fortune} ]{wanda}{motion}')
fortune_timeout: refresh interval for fortune (default 60)
Format placeholders:
{fortune} one of many aphorisms or vague prophecies
{wanda} name of one of the most commonly kept freshwater aquarium fish
{motion} biologically propelled motion through a liquid medium
{nomotion} opposite behavior of motion to prevent modules from shifting
Optional:
fortune-mod: the fortune cookie program from bsd games
Examples:
```
# disable motions when not in use
wanda_the_fish {
format = '[\\?if=fortune {nomotion}][{fortune} ]'
format += '{wanda}[\\?if=fortune {motion}]'
}
# no updates, no motions, yes fortunes, you click
wanda_the_fish {
format = '[{fortune} ]{wanda}'
cache_timeout = -1
}
# wanda moves, fortunes stays
wanda_the_fish {
format = '[{fortune} ]{nomotion}{wanda}{motion}'
}
# wanda is swimming too fast, slow down wanda
wanda_the_fish {
cache_timeout = 2
}
```
@author lasers
SAMPLE OUTPUT
[
{'full_text': 'innovate, v.: To annoy people.'},
{'full_text': ' <', 'color': '#ffa500'},
{'full_text': '\xba', 'color': '#add8e6'},
{'full_text': ',', 'color': '#ff8c00'},
{'full_text': '))', 'color': '#ffa500'},
{'full_text': '))>< ', 'color': '#ff8c00'},
]
idle
[
{'full_text': ' <', 'color': '#ffa500'},
{'full_text': '\xba', 'color': '#add8e6'},
{'full_text': ',', 'color': '#ff8c00'},
{'full_text': '))', 'color': '#ffa500'},
{'full_text': '))>3', 'color': '#ff8c00'},
]
py3status
[
{'full_text': 'py3status is so cool!'},
{'full_text': ' <', 'color': '#ffa500'},
{'full_text': '\xba', 'color': '#add8e6'},
{'full_text': ',', 'color': '#ff8c00'},
{'full_text': '))', 'color': '#ffa500'},
{'full_text': '))>< ', 'color': '#ff8c00'},
]
"""
from time import time
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 0
format = "{nomotion}[{fortune} ]{wanda}{motion}"
fortune_timeout = 60
def post_config_hook(self):
body = (
r"[\?color=orange&show <"
r"[\?color=lightblue&show º]"
r"[\?color=darkorange&show ,]))"
r"[\?color=darkorange&show ))>{}]]"
)
wanda = [body.format(fin) for fin in ("<", ">", "<", "3")]
self.wanda = [self.py3.safe_format(x) for x in wanda]
self.wanda_length = len(self.wanda)
self.index = 0
self.fortune_command = ["fortune", "-as"]
self.fortune = self.py3.storage_get("fortune") or None
self.toggled = self.py3.storage_get("toggled") or False
self.motions = {"motion": " ", "nomotion": ""}
# deal with {new,old} timeout between storage
fortune_timeout = self.py3.storage_get("fortune_timeout")
timeout = None
if self.fortune_timeout != fortune_timeout:
timeout = time() + self.fortune_timeout
self.time = (
timeout or self.py3.storage_get("time") or (time() + self.fortune_timeout)
)
def _set_fortune(self, state=None, new=False):
if not self.fortune_command:
return
if new:
try:
fortune_data = self.py3.command_output(self.fortune_command)
except self.py3.CommandError:
self.fortune = ""
self.fortune_command = None
else:
self.fortune = " ".join(fortune_data.split())
self.time = time() + self.fortune_timeout
elif state is None:
if self.toggled and time() >= self.time:
self._set_fortune(new=True)
else:
self.toggled = state
if state:
self._set_fortune(new=True)
else:
self.fortune = None
def _set_motion(self):
for k in self.motions:
self.motions[k] = "" if self.motions[k] else " "
def _set_wanda(self):
self.index += 1
if self.index >= self.wanda_length:
self.index = 0
def wanda_the_fish(self):
self._set_fortune()
self._set_motion()
self._set_wanda()
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(
self.format,
{
"fortune": self.fortune,
"motion": self.motions["motion"],
"nomotion": self.motions["nomotion"],
"wanda": self.wanda[self.index],
},
),
}
def kill(self):
self.py3.storage_set("toggled", self.toggled)
self.py3.storage_set("fortune", self.fortune)
self.py3.storage_set("fortune_timeout", self.fortune_timeout)
self.py3.storage_set("time", self.time)
def on_click(self, event):
if not self.fortune_command:
return
self._set_fortune(not self.toggled)
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | -7,311,590,463,705,585,000 | 29.032432 | 86 | 0.559935 | false |
bmccary/csvu | csvu/tail.py | 1 | 1800 |
import traceback
from csvu import (
reader_make,
writer_make,
)
from csvu.cli import (
default_arg_parser,
positive_int,
)
def cli_arg_parser():
description = 'CSVU Tail is like GNU Tail, but for CSV files.'
parser = default_arg_parser(
description=description,
file0='input',
file1='output',
dialect0='input',
dialect1='output',
headless=True,
)
parser.add_argument(
'count',
type=positive_int,
help='''Return the last :count: rows.'''
)
return parser
def filter_g(row_g, count, debug=False):
L = list(row_g)
for row in L[-count:]:
yield row
def cli():
parser = cli_arg_parser()
args = parser.parse_args()
try:
reader_d = reader_make(
file_or_path=args.file0,
dialect=args.dialect0,
headless=args.headless,
)
dialect0 = reader_d['dialect']
fieldnames = reader_d['fieldnames']
reader_g = reader_d['reader']
g = filter_g(
row_g=reader_g,
count=args.count,
)
dialect1 = args.dialect1
if dialect1 == 'dialect0':
dialect1 = dialect0
writer_f = writer_make(
file_or_path=args.file1,
dialect=dialect1,
headless=args.headless,
fieldnames=fieldnames,
)
writer_f(g)
except Exception as exc:
m = traceback.format_exc()
parser.error(m)
| mit | -4,675,949,467,743,082,000 | 20.95122 | 66 | 0.455556 | false |
chkothe/pylsl | pylsl/pylsl.py | 1 | 51549 | """Python API for the lab streaming layer.
The lab streaming layer provides a set of functions to make instrument data
accessible in real time within a lab network. From there, streams can be
picked up by recording programs, viewing programs or custom experiment
applications that access data streams in real time.
The API covers two areas:
- The "push API" allows to create stream outlets and to push data (regular
or irregular measurement time series, event data, coded audio/video frames,
etc.) into them.
- The "pull API" allows to create stream inlets and read time-synched
experiment data from them (for recording, viewing or experiment control).
pylsl has been tested with Python 2.7 and 3.4.
"""
import os
import platform
import struct
from ctypes import CDLL, util, byref, c_char_p, c_void_p, c_double, c_int, \
c_long, c_float, c_short, c_byte, c_longlong
__all__ = ['IRREGULAR_RATE', 'DEDUCED_TIMESTAMP', 'FOREVER', 'cf_float32',
'cf_double64', 'cf_string', 'cf_int32', 'cf_int16', 'cf_int8',
'cf_int64', 'cf_undefined', 'protocol_version', 'library_version',
'local_clock', 'StreamInfo', 'StreamOutlet', 'resolve_streams',
'resolve_byprop', 'resolve_bypred', 'StreamInlet', 'XMLElement',
'ContinuousResolver', 'TimeoutError', 'LostError',
'InvalidArgumentError', 'InternalError', 'stream_info',
'stream_outlet', 'stream_inlet', 'xml_element', 'timeout_error',
'lost_error', 'vectorf', 'vectord', 'vectorl', 'vectori',
'vectors', 'vectorc', 'vectorstr', 'resolve_stream']
# =================
# === Constants ===
# =================
# Constant to indicate that a stream has variable sampling rate.
IRREGULAR_RATE = 0.0
# Constant to indicate that a sample has the next successive time stamp
# according to the stream's defined sampling rate. Optional optimization to
# transmit less data per sample.
DEDUCED_TIMESTAMP = -1.0
# A very large time value (ca. 1 year); can be used in timeouts.
FOREVER = 32000000.0
# Value formats supported by LSL. LSL data streams are sequences of samples,
# each of which is a same-size vector of values with one of the below types.
# For up to 24-bit precision measurements in the appropriate physical unit (
# e.g., microvolts). Integers from -16777216 to 16777216 are represented
# accurately.
cf_float32 = 1
# For universal numeric data as long as permitted by network and disk budget.
# The largest representable integer is 53-bit.
cf_double64 = 2
# For variable-length ASCII strings or data blobs, such as video frames,
# complex event descriptions, etc.
cf_string = 3
# For high-rate digitized formats that require 32-bit precision. Depends
# critically on meta-data to represent meaningful units. Useful for
# application event codes or other coded data.
cf_int32 = 4
# For very high bandwidth signals or CD quality audio (for professional audio
# float is recommended).
cf_int16 = 5
# For binary signals or other coded data.
cf_int8 = 6
# For now only for future compatibility. Support for this type is not
# available on all languages and platforms.
cf_int64 = 7
# Can not be transmitted.
cf_undefined = 0
# ==========================================================
# === Free Functions provided by the lab streaming layer ===
# ==========================================================
def protocol_version():
"""Protocol version.
The major version is protocol_version() / 100;
The minor version is protocol_version() % 100;
Clients with different minor versions are protocol-compatible with each
other while clients with different major versions will refuse to work
together.
"""
return lib.lsl_protocol_version()
def library_version():
"""Version of the underlying liblsl library.
The major version is library_version() / 100;
The minor version is library_version() % 100;
"""
return lib.lsl_library_version()
def local_clock():
"""Obtain a local system time stamp in seconds.
The resolution is better than a milisecond. This reading can be used to
assign time stamps to samples as they are being acquired.
If the "age" of a sample is known at a particular time (e.g., from USB
transmission delays), it can be used as an offset to lsl_local_clock() to
obtain a better estimate of when a sample was actually captured. See
StreamOutlet.push_sample() for a use case.
"""
return lib.lsl_local_clock()
# ==========================
# === Stream Declaration ===
# ==========================
class StreamInfo:
"""The StreamInfo object stores the declaration of a data stream.
Represents the following information:
a) stream data format (#channels, channel format)
b) core information (stream name, content type, sampling rate)
c) optional meta-data about the stream content (channel labels,
measurement units, etc.)
Whenever a program wants to provide a new stream on the lab network it will
typically first create a StreamInfo to describe its properties and then
construct a StreamOutlet with it to create the stream on the network.
Recipients who discover the outlet can query the StreamInfo; it is also
written to disk when recording the stream (playing a similar role as a file
header).
"""
def __init__(self, name='untitled', type='', channel_count=1,
nominal_srate=IRREGULAR_RATE, channel_format=cf_float32,
source_id='', handle=None):
"""Construct a new StreamInfo object.
Core stream information is specified here. Any remaining meta-data can
be added later.
Keyword arguments:
name -- Name of the stream. Describes the device (or product series)
that this stream makes available (for use by programs,
experimenters or data analysts). Cannot be empty.
type -- Content type of the stream. By convention LSL uses the content
types defined in the XDF file format specification where
applicable (code.google.com/p/xdf). The content type is the
preferred way to find streams (as opposed to searching by name).
channel_count -- Number of channels per sample. This stays constant for
the lifetime of the stream. (default 1)
nominal_srate -- The sampling rate (in Hz) as advertised by the data
source, regular (otherwise set to IRREGULAR_RATE).
(default IRREGULAR_RATE)
channel_format -- Format/type of each channel. If your channels have
different formats, consider supplying multiple
streams or use the largest type that can hold
them all (such as cf_double64). It is also allowed
to pass this as a string, without the cf_ prefix,
e.g., 'float32' (default cf_float32)
source_id -- Unique identifier of the device or source of the data, if
available (such as the serial number). This is critical
for system robustness since it allows recipients to
recover from failure even after the serving app, device or
computer crashes (just by finding a stream with the same
source id on the network again). Therefore, it is highly
recommended to always try to provide whatever information
can uniquely identify the data source itself.
(default '')
"""
if handle is not None:
self.obj = c_void_p(handle)
else:
if isinstance(channel_format, str):
channel_format = string2fmt[channel_format]
self.obj = lib.lsl_create_streaminfo(c_char_p(str.encode(name)),
c_char_p(str.encode(type)),
channel_count,
c_double(nominal_srate),
channel_format,
c_char_p(str.encode(source_id)))
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create stream description "
"object.")
def __del__(self):
""" Destroy a previously created StreamInfo object. """
# noinspection PyBroadException
try:
lib.lsl_destroy_streaminfo(self.obj)
except:
pass
# === Core Information (assigned at construction) ===
def name(self):
"""Name of the stream.
This is a human-readable name. For streams offered by device modules,
it refers to the type of device or product series that is generating
the data of the stream. If the source is an application, the name may
be a more generic or specific identifier. Multiple streams with the
same name can coexist, though potentially at the cost of ambiguity (for
the recording app or experimenter).
"""
return lib.lsl_get_name(self.obj).decode('utf-8')
def type(self):
"""Content type of the stream.
The content type is a short string such as "EEG", "Gaze" which
describes the content carried by the channel (if known). If a stream
contains mixed content this value need not be assigned but may instead
be stored in the description of channel types. To be useful to
applications and automated processing systems using the recommended
content types is preferred.
"""
return lib.lsl_get_type(self.obj).decode('utf-8')
def channel_count(self):
"""Number of channels of the stream.
A stream has at least one channel; the channel count stays constant for
all samples.
"""
return lib.lsl_get_channel_count(self.obj)
def nominal_srate(self):
"""Sampling rate of the stream, according to the source (in Hz).
If a stream is irregularly sampled, this should be set to
IRREGULAR_RATE.
Note that no data will be lost even if this sampling rate is incorrect
or if a device has temporary hiccups, since all samples will be
transmitted anyway (except for those dropped by the device itself).
However, when the recording is imported into an application, a good
data importer may correct such errors more accurately if the advertised
sampling rate was close to the specs of the device.
"""
return lib.lsl_get_nominal_srate(self.obj)
def channel_format(self):
"""Channel format of the stream.
All channels in a stream have the same format. However, a device might
offer multiple time-synched streams each with its own format.
"""
return lib.lsl_get_channel_format(self.obj)
def source_id(self):
"""Unique identifier of the stream's source, if available.
The unique source (or device) identifier is an optional piece of
information that, if available, allows that endpoints (such as the
recording program) can re-acquire a stream automatically once it is
back online.
"""
return lib.lsl_get_source_id(self.obj).decode('utf-8')
# === Hosting Information (assigned when bound to an outlet/inlet) ===
def version(self):
"""Protocol version used to deliver the stream."""
return lib.lsl_get_version(self.obj)
def created_at(self):
"""Creation time stamp of the stream.
This is the time stamp when the stream was first created
(as determined via local_clock() on the providing machine).
"""
return lib.lsl_get_created_at(self.obj)
def uid(self):
"""Unique ID of the stream outlet instance (once assigned).
This is a unique identifier of the stream outlet, and is guaranteed to
be different across multiple instantiations of the same outlet (e.g.,
after a re-start).
"""
return lib.lsl_get_uid(self.obj).decode('utf-8')
def session_id(self):
"""Session ID for the given stream.
The session id is an optional human-assigned identifier of the
recording session. While it is rarely used, it can be used to prevent
concurrent recording activitites on the same sub-network (e.g., in
multiple experiment areas) from seeing each other's streams
(can be assigned in a configuration file read by liblsl, see also
documentation on configuration files).
"""
return lib.lsl_get_session_id(self.obj).decode('utf-8')
def hostname(self):
"""Hostname of the providing machine."""
return lib.lsl_get_hostname(self.obj).decode('utf-8')
# === Data Description (can be modified) ===
def desc(self):
"""Extended description of the stream.
It is highly recommended that at least the channel labels are described
here. See code examples in the documentation. Other information, such
as amplifier settings, measurement units if deviating from defaults,
setup information, subject information, etc., can be specified here, as
well. See Meta-Data Recommendations in the docs.
Important: if you use a stream content type for which meta-data
recommendations exist, please try to lay out your meta-data in
agreement with these recommendations for compatibility with other
applications.
"""
return XMLElement(lib.lsl_get_desc(self.obj))
def as_xml(self):
"""Retrieve the entire stream_info in XML format.
This yields an XML document (in string form) whose top-level element is
<description>. The description element contains one element for each
field of the stream_info class, including:
a) the core elements <name>, <type>, <channel_count>, <nominal_srate>,
<channel_format>, <source_id>
b) the misc elements <version>, <created_at>, <uid>, <session_id>,
<v4address>, <v4data_port>, <v4service_port>, <v6address>,
<v6data_port>, <v6service_port>
c) the extended description element <desc> with user-defined
sub-elements.
"""
return lib.lsl_get_xml(self.obj).decode('utf-8')
# =====================
# === Stream Outlet ===
# =====================
class StreamOutlet:
"""A stream outlet.
Outlets are used to make streaming data (and the meta-data) available on
the lab network.
"""
def __init__(self, info, chunk_size=0, max_buffered=360):
"""Establish a new stream outlet. This makes the stream discoverable.
Keyword arguments:
description -- The StreamInfo object to describe this stream. Stays
constant over the lifetime of the outlet.
chunk_size --- Optionally the desired chunk granularity (in samples)
for transmission. If unspecified, each push operation
yields one chunk. Inlets can override this setting.
(default 0)
max_buffered -- Optionally the maximum amount of data to buffer (in
seconds if there is a nominal sampling rate, otherwise
x100 in samples). The default is 6 minutes of data.
Note that, for high-bandwidth data, you will want to
use a lower value here to avoid running out of RAM.
(default 360)
"""
self.obj = lib.lsl_create_outlet(info.obj, chunk_size, max_buffered)
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create stream outlet.")
self.channel_format = info.channel_format()
self.channel_count = info.channel_count()
self.do_push_sample = fmt2push_sample[self.channel_format]
self.do_push_chunk = fmt2push_chunk[self.channel_format]
self.value_type = fmt2type[self.channel_format]
self.sample_type = self.value_type*self.channel_count
def __del__(self):
"""Destroy an outlet.
The outlet will no longer be discoverable after destruction and all
connected inlets will stop delivering data.
"""
# noinspection PyBroadException
try:
lib.lsl_destroy_outlet(self.obj)
except:
pass
def push_sample(self, x, timestamp=0.0, pushthrough=True):
"""Push a sample into the outlet.
Each entry in the list corresponds to one channel.
Keyword arguments:
x -- A list of values to push (one per channel).
timestamp -- Optionally the capture time of the sample, in agreement
with local_clock(); if omitted, the current
time is used. (default 0.0)
pushthrough -- Whether to push the sample through to the receivers
instead of buffering it with subsequent samples.
Note that the chunk_size, if specified at outlet
construction, takes precedence over the pushthrough flag.
(default True)
"""
if len(x) == self.channel_count:
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
handle_error(self.do_push_sample(self.obj, self.sample_type(*x),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("length of the data must correspond to the "
"stream's channel count.")
def push_chunk(self, x, timestamp=0.0, pushthrough=True):
"""Push a list of samples into the outlet.
samples -- A list of samples, either as a list of lists or a list of
multiplexed values.
timestamp -- Optionally the capture time of the most recent sample, in
agreement with local_clock(); if omitted, the current
time is used. The time stamps of other samples are
automatically derived according to the sampling rate of
the stream. (default 0.0)
pushthrough Whether to push the chunk through to the receivers instead
of buffering it with subsequent samples. Note that the
chunk_size, if specified at outlet construction, takes
precedence over the pushthrough flag. (default True)
"""
if len(x):
if type(x[0]) is list:
x = [v for sample in x for v in sample]
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
if len(x) % self.channel_count == 0:
constructor = self.value_type*len(x)
# noinspection PyCallingNonCallable
handle_error(self.do_push_chunk(self.obj, constructor(*x),
c_long(len(x)),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("each sample must have the same number of "
"channels.")
def have_consumers(self):
"""Check whether consumers are currently registered.
While it does not hurt, there is technically no reason to push samples
if there is no consumer.
"""
return bool(lib.lsl_have_consumers(self.obj))
def wait_for_consumers(self, timeout):
"""Wait until some consumer shows up (without wasting resources).
Returns True if the wait was successful, False if the timeout expired.
"""
return bool(lib.lsl_wait_for_consumers(self.obj, c_double(timeout)))
# =========================
# === Resolve Functions ===
# =========================
def resolve_streams(wait_time=1.0):
"""Resolve all streams on the network.
This function returns all currently available streams from any outlet on
the network. The network is usually the subnet specified at the local
router, but may also include a group of machines visible to each other via
multicast packets (given that the network supports it), or list of
hostnames. These details may optionally be customized by the experimenter
in a configuration file (see configuration file in the documentation).
Keyword arguments:
wait_time -- The waiting time for the operation, in seconds, to search for
streams. Warning: If this is too short (<0.5s) only a subset
(or none) of the outlets that are present on the network may
be returned. (default 1.0)
Returns a list of StreamInfo objects (with empty desc field), any of which
can subsequently be used to open an inlet. The full description can be
retrieved from the inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_all(byref(buffer), 1024, c_double(wait_time))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def resolve_byprop(prop, value, minimum=1, timeout=FOREVER):
"""Resolve all streams with a specific value for a given property.
If the goal is to resolve a specific stream, this method is preferred over
resolving all streams and then selecting the desired one.
Keyword arguments:
prop -- The StreamInfo property that should have a specific value (e.g.,
"name", "type", "source_id", or "desc/manufaturer").
value -- The string value that the property should have (e.g., "EEG" as
the type property).
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
Example: results = resolve_Stream_byprop("type","EEG")
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_byprop(byref(buffer), 1024,
c_char_p(str.encode(prop)),
c_char_p(str.encode(value)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
def resolve_bypred(predicate, minimum=1, timeout=FOREVER):
"""Resolve all streams that match a given predicate.
Advanced query that allows to impose more conditions on the retrieved
streams; the given string is an XPath 1.0 predicate for the <description>
node (omitting the surrounding []'s), see also
http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951.
Keyword arguments:
predicate -- The predicate string, e.g. "name='BioSemi'" or
"type='EEG' and starts-with(name,'BioSemi') and
count(description/desc/channels/channel)=32"
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_bypred(byref(buffer), 1024,
c_char_p(str.encode(predicate)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
# ====================
# === Stream Inlet ===
# ====================
class StreamInlet:
"""A stream inlet.
Inlets are used to receive streaming data (and meta-data) from the lab
network.
"""
def __init__(self, info, max_buflen=360, max_chunklen=0, recover=True):
"""Construct a new stream inlet from a resolved stream description.
Keyword arguments:
description -- A resolved stream description object (as coming from one
of the resolver functions). Note: the stream_inlet may also be
constructed with a fully-specified stream_info, if the desired
channel format and count is already known up-front, but this is
strongly discouraged and should only ever be done if there is
no time to resolve the stream up-front (e.g., due to
limitations in the client program).
max_buflen -- Optionally the maximum amount of data to buffer (in
seconds if there is a nominal sampling rate, otherwise
x100 in samples). Recording applications want to use a
fairly large buffer size here, while real-time
applications would only buffer as much as they need to
perform their next calculation. (default 360)
max_chunklen -- Optionally the maximum size, in samples, at which
chunks are transmitted (the default corresponds to the
chunk sizes used by the sender). Recording programs
can use a generous size here (leaving it to the network
how to pack things), while real-time applications may
want a finer (perhaps 1-sample) granularity. If left
unspecified (=0), the sender determines the chunk
granularity. (default 0)
recover -- Try to silently recover lost streams that are recoverable
(=those that that have a source_id set). In all other cases
(recover is False or the stream is not recoverable)
functions may throw a lost_error if the stream's source is
lost (e.g., due to an app or computer crash). (default True)
"""
if type(info) is list:
raise TypeError("description needs to be of type StreamInfo, "
"got a list.")
self.obj = lib.lsl_create_inlet(info.obj, max_buflen, max_chunklen,
recover)
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create stream inlet.")
self.channel_format = info.channel_format()
self.channel_count = info.channel_count()
self.do_pull_sample = fmt2pull_sample[self.channel_format]
self.do_pull_chunk = fmt2pull_chunk[self.channel_format]
self.value_type = fmt2type[self.channel_format]
self.sample_type = self.value_type*self.channel_count
self.sample = self.sample_type()
self.buffers = {}
def __del__(self):
"""Destructor. The inlet will automatically disconnect if destroyed."""
# noinspection PyBroadException
try:
lib.lsl_destroy_inlet(self.obj)
except:
pass
def info(self, timeout=FOREVER):
"""Retrieve the complete information of the given stream.
This includes the extended description. Can be invoked at any time of
the stream's lifetime.
Keyword arguments:
timeout -- Timeout of the operation. (default FOREVER)
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
result = lib.lsl_get_fullinfo(self.obj, c_double(timeout),
byref(errcode))
handle_error(errcode)
return StreamInfo(handle=result)
def open_stream(self, timeout=FOREVER):
"""Subscribe to the data stream.
All samples pushed in at the other end from this moment onwards will be
queued and eventually be delivered in response to pull_sample() or
pull_chunk() calls. Pulling a sample without some preceding open_stream
is permitted (the stream will then be opened implicitly).
Keyword arguments:
timeout -- Optional timeout of the operation (default FOREVER).
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
lib.lsl_open_stream(self.obj, c_double(timeout), byref(errcode))
handle_error(errcode)
def close_stream(self):
"""Drop the current data stream.
All samples that are still buffered or in flight will be dropped and
transmission and buffering of data for this inlet will be stopped. If
an application stops being interested in data from a source
(temporarily or not) but keeps the outlet alive, it should call
lsl_close_stream() to not waste unnecessary system and network
resources.
"""
lib.lsl_close_stream(self.obj)
def time_correction(self, timeout=FOREVER):
"""Retrieve an estimated time correction offset for the given stream.
The first call to this function takes several miliseconds until a
reliable first estimate is obtained. Subsequent calls are instantaneous
(and rely on periodic background updates). The precision of these
estimates should be below 1 ms (empirically within +/-0.2 ms).
Keyword arguments:
timeout -- Timeout to acquire the first time-correction estimate
(default FOREVER).
Returns the current time correction estimate. This is the number that
needs to be added to a time stamp that was remotely generated via
local_clock() to map it into the local clock domain of this
machine.
Throws a TimeoutError (if the timeout expires), or LostError (if the
stream source has been lost).
"""
errcode = c_int()
result = lib.lsl_time_correction(self.obj, c_double(timeout),
byref(errcode))
handle_error(errcode)
return result
def pull_sample(self, timeout=FOREVER, sample=None):
"""Pull a sample from the inlet and return it.
Keyword arguments:
timeout -- The timeout for this operation, if any. (default FOREVER)
If this is passed as 0.0, then the function returns only a
sample if one is buffered for immediate pickup.
Returns a tuple (sample,timestamp) where sample is a list of channel
values and timestamp is the capture time of the sample on the remote
machine, or (None,None) if no new sample was available. To remap this
time stamp to the local clock, add the value returned by
.time_correction() to it.
Throws a LostError if the stream source has been lost. Note that, if
the timeout expires, no TimeoutError is thrown (because this case is
not considered an error).
"""
# support for the legacy API
if type(timeout) is list:
assign_to = timeout
timeout = sample if type(sample) is float else 0.0
else:
assign_to = None
errcode = c_int()
timestamp = self.do_pull_sample(self.obj, byref(self.sample),
self.channel_count, c_double(timeout),
byref(errcode))
handle_error(errcode)
if timestamp:
sample = [v for v in self.sample]
if self.channel_format == cf_string:
sample = [v.decode('utf-8') for v in sample]
if assign_to is not None:
assign_to[:] = sample
return sample, timestamp
else:
return None, None
def pull_chunk(self, timeout=0.0, max_samples=1024):
"""Pull a chunk of samples from the inlet.
Keyword arguments:
timeout -- The timeout of the operation; if passed as 0.0, then only
samples available for immediate pickup will be returned.
(default 0.0)
max_samples -- Maximum number of samples to return. (default
1024)
Returns a tuple (samples,timestamps) where samples is a list of samples
(each itself a list of values), and timestamps is a list of time-stamps.
Throws a LostError if the stream source has been lost.
"""
# look up a pre-allocated buffer of appropriate length
num_channels = self.channel_count
max_values = max_samples*num_channels
if max_samples not in self.buffers:
# noinspection PyCallingNonCallable
self.buffers[max_samples] = ((self.value_type*max_values)(),
(c_double*max_samples)())
buffer = self.buffers[max_samples]
# read data into it
errcode = c_int()
# noinspection PyCallingNonCallable
num_elements = self.do_pull_chunk(self.obj, byref(buffer[0]),
byref(buffer[1]), max_values,
max_samples, c_double(timeout),
byref(errcode))
handle_error(errcode)
# return results (note: could offer a more efficient format in the
# future, e.g., a numpy array)
num_samples = num_elements/num_channels
samples = [[buffer[0][s*num_channels+c] for c in range(num_channels)]
for s in range(int(num_samples))]
if self.channel_format == cf_string:
samples = [[v.decode('utf-8') for v in s] for s in samples]
timestamps = [buffer[1][s] for s in range(int(num_samples))]
return samples, timestamps
def samples_available(self):
"""Query whether samples are currently available for immediate pickup.
Note that it is not a good idea to use samples_available() to determine
whether a pull_*() call would block: to be sure, set the pull timeout
to 0.0 or an acceptably low value. If the underlying implementation
supports it, the value will be the number of samples available
(otherwise it will be 1 or 0).
"""
return lib.lsl_samples_available(self.obj)
def was_clock_reset(self):
"""Query whether the clock was potentially reset since the last call.
This is rarely-used function is only needed for applications that
combine multiple time_correction values to estimate precise clock
drift if they should tolerate cases where the source machine was
hot-swapped or restarted.
"""
return bool(lib.lsl_was_clock_reset(self.obj))
# ===================
# === XML Element ===
# ===================
class XMLElement:
"""A lightweight XML element tree modeling the .desc() field of StreamInfo.
Has a name and can have multiple named children or have text content as
value; attributes are omitted. Insider note: The interface is modeled after
a subset of pugixml's node type and is compatible with it. See also
http://pugixml.googlecode.com/svn/tags/latest/docs/manual/access.html for
additional documentation.
"""
def __init__(self, handle):
"""Construct new XML element from existing handle."""
self.e = c_void_p(handle)
# === Tree Navigation ===
def first_child(self):
"""Get the first child of the element."""
return XMLElement(lib.lsl_first_child(self.e))
def last_child(self):
"""Get the last child of the element."""
return XMLElement(lib.lsl_last_child(self.e))
def child(self, name):
"""Get a child with a specified name."""
return XMLElement(lib.lsl_child(self.e, str.encode(name)))
def next_sibling(self, name=None):
"""Get the next sibling in the children list of the parent node.
If a name is provided, the next sibling with the given name is returned.
"""
if name is None:
return XMLElement(lib.lsl_next_sibling(self.e))
else:
return XMLElement(lib.lsl_next_sibling_n(self.e, str.encode(name)))
def previous_sibling(self, name=None):
"""Get the previous sibling in the children list of the parent node.
If a name is provided, the previous sibling with the given name is
returned.
"""
if name is None:
return XMLElement(lib.lsl_previous_sibling(self.e))
else:
return XMLElement(lib.lsl_previous_sibling_n(self.e,
str.encode(name)))
def parent(self):
"""Get the parent node."""
return XMLElement(lib.lsl_parent(self.e))
# === Content Queries ===
def empty(self):
"""Whether this node is empty."""
return bool(lib.lsl_empty(self.e))
def is_text(self):
"""Whether this is a text body (instead of an XML element).
True both for plain char data and CData.
"""
return bool(lib.lsl_is_text(self.e))
def name(self):
"""Name of the element."""
return lib.lsl_name(self.e).decode('utf-8')
def value(self):
"""Value of the element."""
return lib.lsl_value(self.e).decode('utf-8')
def child_value(self, name=None):
"""Get child value (value of the first child that is text).
If a name is provided, then the value of the first child with the
given name is returned.
"""
if name is None:
res = lib.lsl_child_value(self.e)
else:
res = lib.lsl_child_value_n(self.e, str.encode(name))
return res.decode('utf-8')
# === Modification ===
def append_child_value(self, name, value):
"""Append a child node with a given name, which has a (nameless)
plain-text child with the given text value."""
return XMLElement(lib.lsl_append_child_value(self.e,
str.encode(name),
str.encode(value)))
def prepend_child_value(self, name, value):
"""Prepend a child node with a given name, which has a (nameless)
plain-text child with the given text value."""
return XMLElement(lib.lsl_prepend_child_value(self.e,
str.encode(name),
str.encode(value)))
def set_child_value(self, name, value):
"""Set the text value of the (nameless) plain-text child of a named
child node."""
return XMLElement(lib.lsl_set_child_value(self.e,
str.encode(name),
str.encode(value)))
def set_name(self, name):
"""Set the element's name. Returns False if the node is empty."""
return bool(lib.lsl_set_name(self.e, str.encode(name)))
def set_value(self, value):
"""Set the element's value. Returns False if the node is empty."""
return bool(lib.lsl_set_value(self.e, str.encode(value)))
def append_child(self, name):
"""Append a child element with the specified name."""
return XMLElement(lib.lsl_append_child(self.e, str.encode(name)))
def prepend_child(self, name):
"""Prepend a child element with the specified name."""
return XMLElement(lib.lsl_prepend_child(self.e, str.encode(name)))
def append_copy(self, elem):
"""Append a copy of the specified element as a child."""
return XMLElement(lib.lsl_append_copy(self.e, elem.e))
def prepend_copy(self, elem):
"""Prepend a copy of the specified element as a child."""
return XMLElement(lib.lsl_prepend_copy(self.e, elem.e))
def remove_child(self, rhs):
"""Remove a given child element, specified by name or as element."""
if type(rhs) is XMLElement:
lib.lsl_remove_child(self.e, rhs.e)
else:
lib.lsl_remove_child_n(self.e, rhs)
# ==========================
# === ContinuousResolver ===
# ==========================
class ContinuousResolver:
"""A convenience class resolving streams continuously in the background.
This object can be queried at any time for the set of streams that are
currently visible on the network.
"""
def __init__(self, prop=None, value=None, pred=None, forget_after=5.0):
"""Construct a new continuous_resolver.
Keyword arguments:
forget_after -- When a stream is no longer visible on the network
(e.g., because it was shut down), this is the time in
seconds after which it is no longer reported by the
resolver.
"""
if pred is not None:
if prop is not None or value is not None:
raise ValueError("you can only either pass the prop/value "
"argument or the pred argument, but not "
"both.")
self.obj = lib.lsl_create_continuous_resolver_bypred(str.encode(pred),
c_double(forget_after))
elif prop is not None and value is not None:
self.obj = lib.lsl_create_continuous_resolver_byprop(str.encode(prop),
str.encode(value),
c_double(forget_after))
elif prop is not None or value is not None:
raise ValueError("if prop is specified, then value must be "
"specified, too, and vice versa.")
else:
self.obj = lib.lsl_create_continuous_resolver(c_double(forget_after))
self.obj = c_void_p(self.obj)
if not self.obj:
raise RuntimeError("could not create continuous resolver.")
def __del__(self):
"""Destructor for the continuous resolver."""
# noinspection PyBroadException
try:
lib.lsl_destroy_continuous_resolver(self.obj)
except:
pass
def results(self):
"""Obtain the set of currently present streams on the network.
Returns a list of matching StreamInfo objects (with empty desc
field), any of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolver_results(self.obj, byref(buffer), 1024)
return [StreamInfo(handle=buffer[k]) for k in range(num_found)]
# =========================
# === Error Definitions ===
# =========================
# noinspection PyShadowingBuiltins
class TimeoutError(RuntimeError):
# note: although this overrides the name of a built-in exception,
# this API is retained here for compatiblity with the Python 2.x
# version of pylsl
pass
class LostError(RuntimeError):
pass
class InvalidArgumentError(RuntimeError):
pass
class InternalError(RuntimeError):
pass
def handle_error(errcode):
"""Error handler function. Translates an error code into an exception."""
if type(errcode) is c_int:
errcode = errcode.value
if errcode == 0:
pass # no error
elif errcode == -1:
raise TimeoutError("the operation failed due to a timeout.")
elif errcode == -2:
raise LostError("the stream has been lost.")
elif errcode == -3:
raise InvalidArgumentError("an argument was incorrectly specified.")
elif errcode == -4:
raise InternalError("an internal error has occurred.")
elif errcode < 0:
raise RuntimeError("an unknown error has occurred.")
# =================================================
# === Compatibility Interface for old pylsl API ===
# =================================================
# set class aliases
stream_info = StreamInfo
stream_outlet = StreamOutlet
stream_inlet = StreamInlet
xml_element = XMLElement
timeout_error = TimeoutError
lost_error = LostError
vectorf = vectord = vectorl = vectori = vectors = vectorc = vectorstr = list
def resolve_stream(*args):
if len(args) == 0:
return resolve_streams()
elif type(args[0]) in [int, float]:
return resolve_streams(args[0])
elif type(args[0]) is str:
if len(args) == 1:
return resolve_bypred(args[0])
elif type(args[1]) in [int, float]:
return resolve_bypred(args[0], args[1])
else:
if len(args) == 2:
return resolve_byprop(args[0], args[1])
else:
return resolve_byprop(args[0], args[1], args[2])
# ==================================
# === Module Initialization Code ===
# ==================================
# find and load library
os_name = platform.system()
bitness = 8 * struct.calcsize("P")
if os_name in ['Windows', 'Microsoft']:
libname = 'liblsl32.dll' if bitness == 32 else 'liblsl64.dll'
elif os_name == 'Darwin':
libname = 'liblsl32.dylib' if bitness == 32 else 'liblsl64.dylib'
elif os_name == 'Linux':
libname = 'liblsl32.so' if bitness == 32 else 'liblsl64.so'
else:
raise RuntimeError("unrecognized operating system:", os_name)
libpath = os.path.join(os.path.dirname(__file__), libname)
if not os.path.isfile(libpath):
libpath = util.find_library(libname)
if not libpath:
raise RuntimeError("library " + libname + " was not found - make sure "
"that it is on the search path (e.g., in the same "
"folder as pylsl.py).")
lib = CDLL(libpath)
# set function return types where necessary
lib.lsl_local_clock.restype = c_double
lib.lsl_create_streaminfo.restype = c_void_p
lib.lsl_get_name.restype = c_char_p
lib.lsl_get_type.restype = c_char_p
lib.lsl_get_nominal_srate.restype = c_double
lib.lsl_get_source_id.restype = c_char_p
lib.lsl_get_created_at.restype = c_double
lib.lsl_get_uid.restype = c_char_p
lib.lsl_get_session_id.restype = c_char_p
lib.lsl_get_hostname.restype = c_char_p
lib.lsl_get_desc.restype = c_void_p
lib.lsl_get_xml.restype = c_char_p
lib.lsl_create_outlet.restype = c_void_p
lib.lsl_create_inlet.restype = c_void_p
lib.lsl_get_fullinfo.restype = c_void_p
lib.lsl_open_stream.restype = c_void_p
lib.lsl_time_correction.restype = c_double
lib.lsl_pull_sample_f.restype = c_double
lib.lsl_pull_sample_d.restype = c_double
lib.lsl_pull_sample_l.restype = c_double
lib.lsl_pull_sample_i.restype = c_double
lib.lsl_pull_sample_s.restype = c_double
lib.lsl_pull_sample_c.restype = c_double
lib.lsl_pull_sample_str.restype = c_double
lib.lsl_pull_sample_buf.restype = c_double
lib.lsl_first_child.restype = c_void_p
lib.lsl_last_child.restype = c_void_p
lib.lsl_next_sibling.restype = c_void_p
lib.lsl_previous_sibling.restype = c_void_p
lib.lsl_parent.restype = c_void_p
lib.lsl_child.restype = c_void_p
lib.lsl_next_sibling_n.restype = c_void_p
lib.lsl_previous_sibling_n.restype = c_void_p
lib.lsl_name.restype = c_char_p
lib.lsl_value.restype = c_char_p
lib.lsl_child_value.restype = c_char_p
lib.lsl_child_value_n.restype = c_char_p
lib.lsl_append_child_value.restype = c_void_p
lib.lsl_prepend_child_value.restype = c_void_p
lib.lsl_append_child.restype = c_void_p
lib.lsl_prepend_child.restype = c_void_p
lib.lsl_append_copy.restype = c_void_p
lib.lsl_prepend_copy.restype = c_void_p
# noinspection PyBroadException
try:
lib.lsl_pull_chunk_f.restype = c_long
lib.lsl_pull_chunk_d.restype = c_long
lib.lsl_pull_chunk_l.restype = c_long
lib.lsl_pull_chunk_i.restype = c_long
lib.lsl_pull_chunk_s.restype = c_long
lib.lsl_pull_chunk_c.restype = c_long
lib.lsl_pull_chunk_str.restype = c_long
lib.lsl_pull_chunk_buf.restype = c_long
except:
print("pylsl: chunk transfer functions not available in your liblsl "
"version.")
# noinspection PyBroadException
try:
lib.lsl_create_continuous_resolver.restype = c_void_p
lib.lsl_create_continuous_resolver_bypred.restype = c_void_p
lib.lsl_create_continuous_resolver_byprop.restype = c_void_p
except:
print("pylsl: ContinuousResolver not (fully) available in your liblsl "
"version.")
# set up some type maps
string2fmt = {'float32': cf_float32, 'double64': cf_double64,
'string': cf_string, 'int32': cf_int32, 'int16': cf_int16,
'int8': cf_int8, 'int64': cf_int64}
fmt2string = ['undefined', 'float32', 'double64', 'string', 'int32', 'int16',
'int8', 'int64']
fmt2type = [[], c_float, c_double, c_char_p, c_int, c_short, c_byte, c_longlong]
fmt2push_sample = [[], lib.lsl_push_sample_ftp, lib.lsl_push_sample_dtp,
lib.lsl_push_sample_strtp, lib.lsl_push_sample_itp,
lib.lsl_push_sample_stp, lib.lsl_push_sample_ctp, []]
fmt2pull_sample = [[], lib.lsl_pull_sample_f, lib.lsl_pull_sample_d,
lib.lsl_pull_sample_str, lib.lsl_pull_sample_i,
lib.lsl_pull_sample_s, lib.lsl_pull_sample_c, []]
# noinspection PyBroadException
try:
fmt2push_chunk = [[], lib.lsl_push_chunk_ftp, lib.lsl_push_chunk_dtp,
lib.lsl_push_chunk_strtp, lib.lsl_push_chunk_itp,
lib.lsl_push_chunk_stp, lib.lsl_push_chunk_ctp, []]
fmt2pull_chunk = [[], lib.lsl_pull_chunk_f, lib.lsl_pull_chunk_d,
lib.lsl_pull_chunk_str, lib.lsl_pull_chunk_i,
lib.lsl_pull_chunk_s, lib.lsl_pull_chunk_c, []]
except:
# if not available
fmt2push_chunk = [None, None, None, None, None, None, None, None]
fmt2pull_chunk = [None, None, None, None, None, None, None, None]
| mit | 5,422,760,942,595,016,000 | 40.571774 | 88 | 0.599721 | false |
linebp/pandas | pandas/tests/plotting/test_datetimelike.py | 1 | 50792 | """ Test cases for time series specific (freq conversion, etc) """
from datetime import datetime, timedelta, date, time
import pytest
from pandas.compat import lrange, zip
import numpy as np
from pandas import Index, Series, DataFrame, NaT
from pandas.compat import is_platform_mac
from pandas.core.indexes.datetimes import date_range, bdate_range
from pandas.core.indexes.timedeltas import timedelta_range
from pandas.tseries.offsets import DateOffset
from pandas.core.indexes.period import period_range, Period, PeriodIndex
from pandas.core.resample import DatetimeIndex
from pandas.util.testing import assert_series_equal, ensure_clean, slow
import pandas.util.testing as tm
from pandas.tests.plotting.common import (TestPlotBase,
_skip_if_no_scipy_gaussian_kde)
tm._skip_if_no_mpl()
class TestTSPlot(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']
idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]
self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']
idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]
self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]
self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,
columns=['A', 'B', 'C'])
for x in idx]
def teardown_method(self, method):
tm.close()
@slow
def test_ts_plot_with_tz(self):
# GH2877
index = date_range('1/1/2011', periods=2, freq='H',
tz='Europe/Brussels')
ts = Series([188.5, 328.25], index=index)
_check_plot_works(ts.plot)
def test_fontsize_set_correctly(self):
# For issue #8765
df = DataFrame(np.random.randn(10, 9), index=range(10))
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
assert label.get_fontsize() == 2
@slow
def test_frame_inferred(self):
# inferred freq
idx = date_range('1/1/1987', freq='MS', periods=100)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
# axes freq
idx = idx[0:40].union(idx[45:99])
df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df2.plot)
# N > 1
idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)
idx = DatetimeIndex(idx.values, freq=None)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
_check_plot_works(df.plot)
def test_is_error_nozeroindex(self):
# GH11858
i = np.array([1, 2, 3])
a = DataFrame(i, index=i)
_check_plot_works(a.plot, xerr=a)
_check_plot_works(a.plot, yerr=a)
def test_nonnumeric_exclude(self):
idx = date_range('1/1/1987', freq='A', periods=3)
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]}, idx)
fig, ax = self.plt.subplots()
df.plot(ax=ax) # it works
assert len(ax.get_lines()) == 1 # B was plotted
self.plt.close(fig)
pytest.raises(TypeError, df['A'].plot)
@slow
def test_tsplot(self):
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
ts = tm.makeTimeSeries()
f = lambda *args, **kwds: tsplot(s, self.plt.Axes.plot, *args, **kwds)
for s in self.period_ser:
_check_plot_works(f, s.index.freq, ax=ax, series=s)
for s in self.datetime_ser:
_check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)
for s in self.period_ser:
_check_plot_works(s.plot, ax=ax)
for s in self.datetime_ser:
_check_plot_works(s.plot, ax=ax)
_, ax = self.plt.subplots()
ts.plot(style='k', ax=ax)
color = (0., 0., 0., 1) if self.mpl_ge_2_0_0 else (0., 0., 0.)
assert color == ax.get_lines()[0].get_color()
def test_both_style_and_color(self):
ts = tm.makeTimeSeries()
pytest.raises(ValueError, ts.plot, style='b-', color='#000099')
s = ts.reset_index(drop=True)
pytest.raises(ValueError, s.plot, style='b-', color='#000099')
@slow
def test_high_freq(self):
freaks = ['ms', 'us']
for freq in freaks:
_, ax = self.plt.subplots()
rng = date_range('1/1/2012', periods=100000, freq=freq)
ser = Series(np.random.randn(len(rng)), rng)
_check_plot_works(ser.plot, ax=ax)
def test_get_datevalue(self):
from pandas.plotting._converter import get_datevalue
assert get_datevalue(None, 'D') is None
assert get_datevalue(1987, 'A') == 1987
assert (get_datevalue(Period(1987, 'A'), 'M') ==
Period('1987-12', 'M').ordinal)
assert (get_datevalue('1/1/1987', 'D') ==
Period('1987-1-1', 'D').ordinal)
@slow
def test_ts_plot_format_coord(self):
def check_format_of_first_point(ax, expected_string):
first_line = ax.get_lines()[0]
first_x = first_line.get_xdata()[0].ordinal
first_y = first_line.get_ydata()[0]
try:
assert expected_string == ax.format_coord(first_x, first_y)
except (ValueError):
pytest.skip("skipping test because issue forming "
"test comparison GH7664")
annual = Series(1, index=date_range('2014-01-01', periods=3,
freq='A-DEC'))
_, ax = self.plt.subplots()
annual.plot(ax=ax)
check_format_of_first_point(ax, 't = 2014 y = 1.000000')
# note this is added to the annual plot already in existence, and
# changes its freq field
daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))
daily.plot(ax=ax)
check_format_of_first_point(ax,
't = 2014-01-01 y = 1.000000')
tm.close()
# tsplot
_, ax = self.plt.subplots()
from pandas.tseries.plotting import tsplot
tsplot(annual, self.plt.Axes.plot, ax=ax)
check_format_of_first_point(ax, 't = 2014 y = 1.000000')
tsplot(daily, self.plt.Axes.plot, ax=ax)
check_format_of_first_point(ax, 't = 2014-01-01 y = 1.000000')
@slow
def test_line_plot_period_series(self):
for s in self.period_ser:
_check_plot_works(s.plot, s.index.freq)
@slow
def test_line_plot_datetime_series(self):
for s in self.datetime_ser:
_check_plot_works(s.plot, s.index.freq.rule_code)
@slow
def test_line_plot_period_frame(self):
for df in self.period_df:
_check_plot_works(df.plot, df.index.freq)
@slow
def test_line_plot_datetime_frame(self):
for df in self.datetime_df:
freq = df.index.to_period(df.index.freq.rule_code).freq
_check_plot_works(df.plot, freq)
@slow
def test_line_plot_inferred_freq(self):
for ser in self.datetime_ser:
ser = Series(ser.values, Index(np.asarray(ser.index)))
_check_plot_works(ser.plot, ser.index.inferred_freq)
ser = ser[[0, 3, 5, 6]]
_check_plot_works(ser.plot)
def test_fake_inferred_business(self):
_, ax = self.plt.subplots()
rng = date_range('2001-1-1', '2001-1-10')
ts = Series(lrange(len(rng)), rng)
ts = ts[:3].append(ts[5:])
ts.plot(ax=ax)
assert not hasattr(ax, 'freq')
@slow
def test_plot_offset_freq(self):
ser = tm.makeTimeSeries()
_check_plot_works(ser.plot)
dr = date_range(ser.index[0], freq='BQS', periods=10)
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_plot_multiple_inferred_freq(self):
dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(
2000, 1, 11)])
ser = Series(np.random.randn(len(dr)), dr)
_check_plot_works(ser.plot)
@slow
def test_uhf(self):
import pandas.plotting._converter as conv
idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)
df = DataFrame(np.random.randn(len(idx), 2), idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
axis = ax.get_xaxis()
tlocs = axis.get_ticklocs()
tlabels = axis.get_ticklabels()
for loc, label in zip(tlocs, tlabels):
xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')
rs = str(label.get_text())
if len(rs):
assert xp == rs
@slow
def test_irreg_hf(self):
idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)
df = DataFrame(np.random.randn(len(idx), 2), idx)
irreg = df.iloc[[0, 1, 3, 4]]
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
sec = 1. / 24 / 60 / 60
assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()
_, ax = self.plt.subplots()
df2 = df.copy()
df2.index = df.index.asobject
df2.plot(ax=ax)
diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()
assert (np.fabs(diffs[1:] - sec) < 1e-8).all()
def test_irregular_datetime64_repr_bug(self):
ser = tm.makeTimeSeries()
ser = ser[[0, 1, 2, 7]]
_, ax = self.plt.subplots()
ret = ser.plot(ax=ax)
assert ret is not None
for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):
assert rs == xp
def test_business_freq(self):
bts = tm.makePeriodSeries()
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == 'B'
@slow
def test_business_freq_convert(self):
n = tm.N
tm.N = 300
bts = tm.makeTimeSeries().asfreq('BM')
tm.N = n
ts = bts.to_period('M')
_, ax = self.plt.subplots()
bts.plot(ax=ax)
assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal
idx = ax.get_lines()[0].get_xdata()
assert PeriodIndex(data=idx).freqstr == 'M'
def test_nonzero_base(self):
# GH2571
idx = (date_range('2012-12-20', periods=24, freq='H') + timedelta(
minutes=30))
df = DataFrame(np.arange(24), index=idx)
_, ax = self.plt.subplots()
df.plot(ax=ax)
rs = ax.get_lines()[0].get_xdata()
assert not Index(rs).is_normalized
def test_dataframe(self):
bts = DataFrame({'a': tm.makeTimeSeries()})
_, ax = self.plt.subplots()
bts.plot(ax=ax)
idx = ax.get_lines()[0].get_xdata()
tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))
@slow
def test_axis_limits(self):
def _test(ax):
xlim = ax.get_xlim()
ax.set_xlim(xlim[0] - 5, xlim[1] + 10)
ax.get_figure().canvas.draw()
result = ax.get_xlim()
assert result[0] == xlim[0] - 5
assert result[1] == xlim[1] + 10
# string
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim('1/1/2000', '4/1/2000')
ax.get_figure().canvas.draw()
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
# datetim
expected = (Period('1/1/2000', ax.freq),
Period('4/1/2000', ax.freq))
ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))
ax.get_figure().canvas.draw()
result = ax.get_xlim()
assert int(result[0]) == expected[0].ordinal
assert int(result[1]) == expected[1].ordinal
fig = ax.get_figure()
self.plt.close(fig)
ser = tm.makeTimeSeries()
_, ax = self.plt.subplots()
ser.plot(ax=ax)
_test(ax)
_, ax = self.plt.subplots()
df = DataFrame({'a': ser, 'b': ser + 1})
df.plot(ax=ax)
_test(ax)
df = DataFrame({'a': ser, 'b': ser + 1})
axes = df.plot(subplots=True)
for ax in axes:
_test(ax)
def test_get_finder(self):
import pandas.plotting._converter as conv
assert conv.get_finder('B') == conv._daily_finder
assert conv.get_finder('D') == conv._daily_finder
assert conv.get_finder('M') == conv._monthly_finder
assert conv.get_finder('Q') == conv._quarterly_finder
assert conv.get_finder('A') == conv._annual_finder
assert conv.get_finder('W') == conv._daily_finder
@slow
def test_finder_daily(self):
xp = Period('1999-1-1', freq='B').ordinal
day_lst = [10, 40, 252, 400, 950, 2750, 10000]
for n in day_lst:
rng = bdate_range('1999-1-1', periods=n)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
self.plt.close(ax.get_figure())
@slow
def test_finder_quarterly(self):
xp = Period('1988Q1').ordinal
yrs = [3.5, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 4), freq='Q')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert rs == xp
(vmin, vmax) = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
self.plt.close(ax.get_figure())
@slow
def test_finder_monthly(self):
xp = Period('Jan 1988').ordinal
yrs = [1.15, 2.5, 4, 11]
for n in yrs:
rng = period_range('1987Q2', periods=int(n * 12), freq='M')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert rs == xp
vmin, vmax = ax.get_xlim()
ax.set_xlim(vmin + 0.9, vmax)
rs = xaxis.get_majorticklocs()[0]
assert xp == rs
self.plt.close(ax.get_figure())
def test_finder_monthly_long(self):
rng = period_range('1988Q1', periods=24 * 12, freq='M')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1989Q1', 'M').ordinal
assert rs == xp
@slow
def test_finder_annual(self):
xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]
for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):
rng = period_range('1987', periods=nyears, freq='A')
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
assert rs == Period(xp[i], freq='A').ordinal
self.plt.close(ax.get_figure())
@slow
def test_finder_minutely(self):
nminutes = 50 * 24 * 60
rng = date_range('1/1/1999', freq='Min', periods=nminutes)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='Min').ordinal
assert rs == xp
def test_finder_hourly(self):
nhours = 23
rng = date_range('1/1/1999', freq='H', periods=nhours)
ser = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
ser.plot(ax=ax)
xaxis = ax.get_xaxis()
rs = xaxis.get_majorticklocs()[0]
xp = Period('1/1/1999', freq='H').ordinal
assert rs == xp
@slow
def test_gaps(self):
ts = tm.makeTimeSeries()
ts[5:25] = np.nan
_, ax = self.plt.subplots()
ts.plot(ax=ax)
lines = ax.get_lines()
tm._skip_if_mpl_1_5()
assert len(lines) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
self.plt.close(ax.get_figure())
# irregular
ts = tm.makeTimeSeries()
ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]
ts[2:5] = np.nan
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
self.plt.close(ax.get_figure())
# non-ts
idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]
ser = Series(np.random.randn(len(idx)), idx)
ser[2:5] = np.nan
_, ax = self.plt.subplots()
ser.plot(ax=ax)
lines = ax.get_lines()
assert len(lines) == 1
l = lines[0]
data = l.get_xydata()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[2:5, 1].all()
@slow
def test_gap_upsample(self):
low = tm.makeTimeSeries()
low[5:25] = np.nan
_, ax = self.plt.subplots()
low.plot(ax=ax)
idxh = date_range(low.index[0], low.index[-1], freq='12h')
s = Series(np.random.randn(len(idxh)), idxh)
s.plot(secondary_y=True)
lines = ax.get_lines()
assert len(lines) == 1
assert len(ax.right_ax.get_lines()) == 1
l = lines[0]
data = l.get_xydata()
tm._skip_if_mpl_1_5()
assert isinstance(data, np.ma.core.MaskedArray)
mask = data.mask
assert mask[5:25, 1].all()
@slow
def test_secondary_y(self):
ser = Series(np.random.randn(10))
ser2 = Series(np.random.randn(10))
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata())
assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == 'right'
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert (ax2.get_yaxis().get_ticks_position() ==
self.default_tick_position)
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
assert not hasattr(ax, 'left_ax')
assert hasattr(ax, 'right_ax')
assert hasattr(ax2, 'left_ax')
assert not hasattr(ax2, 'right_ax')
@slow
def test_secondary_y_ts(self):
idx = date_range('1/1/2000', periods=10)
ser = Series(np.random.randn(10), idx)
ser2 = Series(np.random.randn(10), idx)
fig, _ = self.plt.subplots()
ax = ser.plot(secondary_y=True)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
l = ax.get_lines()[0]
xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()
assert_series_equal(ser, xp)
assert ax.get_yaxis().get_ticks_position() == 'right'
assert not axes[0].get_yaxis().get_visible()
self.plt.close(fig)
_, ax2 = self.plt.subplots()
ser2.plot(ax=ax2)
assert (ax2.get_yaxis().get_ticks_position() ==
self.default_tick_position)
self.plt.close(ax2.get_figure())
ax = ser2.plot()
ax2 = ser.plot(secondary_y=True)
assert ax.get_yaxis().get_visible()
@slow
def test_secondary_kde(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ax = ser.plot(secondary_y=True, kind='density', ax=ax)
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == 'right'
@slow
def test_secondary_bar(self):
ser = Series(np.random.randn(10))
fig, ax = self.plt.subplots()
ser.plot(secondary_y=True, kind='bar', ax=ax)
axes = fig.get_axes()
assert axes[1].get_yaxis().get_ticks_position() == 'right'
@slow
def test_secondary_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(secondary_y=['a', 'c'], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == 'right'
assert (axes[1].get_yaxis().get_ticks_position() ==
self.default_tick_position)
assert axes[2].get_yaxis().get_ticks_position() == 'right'
@slow
def test_secondary_bar_frame(self):
df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])
axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)
assert axes[0].get_yaxis().get_ticks_position() == 'right'
assert (axes[1].get_yaxis().get_ticks_position() ==
self.default_tick_position)
assert axes[2].get_yaxis().get_ticks_position() == 'right'
def test_mixed_freq_regular_first(self):
# TODO
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
# it works!
s1.plot()
ax2 = s2.plot(style='g')
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
assert idx1.equals(s1.index.to_period('B'))
assert idx2.equals(s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left == pidx[0].ordinal
assert right == pidx[-1].ordinal
@slow
def test_mixed_freq_irregular_first(self):
s1 = tm.makeTimeSeries()
s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]
_, ax = self.plt.subplots()
s2.plot(style='g', ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_regular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s1.plot(ax=ax)
ax2 = s2.plot(style='g', ax=ax)
lines = ax2.get_lines()
idx1 = PeriodIndex(lines[0].get_xdata())
idx2 = PeriodIndex(lines[1].get_xdata())
assert idx1.equals(s1.index.to_period('B'))
assert idx2.equals(s2.index.to_period('B'))
left, right = ax2.get_xlim()
pidx = s1.index.to_period()
assert left == pidx[0].ordinal
assert right == pidx[-1].ordinal
@slow
def test_mixed_freq_irregular_first_df(self):
# GH 9852
s1 = tm.makeTimeSeries().to_frame()
s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]
_, ax = self.plt.subplots()
s2.plot(style='g', ax=ax)
s1.plot(ax=ax)
assert not hasattr(ax, 'freq')
lines = ax.get_lines()
x1 = lines[0].get_xdata()
tm.assert_numpy_array_equal(x1, s2.index.asobject.values)
x2 = lines[1].get_xdata()
tm.assert_numpy_array_equal(x2, s1.index.asobject.values)
def test_mixed_freq_hf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'D'
@slow
def test_mixed_freq_alignment(self):
ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')
ts_data = np.random.randn(12)
ts = Series(ts_data, index=ts_ind)
ts2 = ts.asfreq('T').interpolate()
_, ax = self.plt.subplots()
ax = ts.plot(ax=ax)
ts2.plot(style='r', ax=ax)
assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0]
@slow
def test_mixed_freq_lf_first(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(legend=True, ax=ax)
high.plot(legend=True, ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'D'
leg = ax.get_legend()
assert len(leg.texts) == 2
self.plt.close(ax.get_figure())
idxh = date_range('1/1/1999', periods=240, freq='T')
idxl = date_range('1/1/1999', periods=4, freq='H')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'T'
def test_mixed_freq_irreg_period(self):
ts = tm.makeTimeSeries()
irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]
rng = period_range('1/3/2000', periods=30, freq='B')
ps = Series(np.random.randn(len(rng)), rng)
_, ax = self.plt.subplots()
irreg.plot(ax=ax)
ps.plot(ax=ax)
def test_mixed_freq_shared_ax(self):
# GH13341, using sharex=True
idx1 = date_range('2015-01-01', periods=3, freq='M')
idx2 = idx1[:1].union(idx1[2:])
s1 = Series(range(len(idx1)), idx1)
s2 = Series(range(len(idx2)), idx2)
fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert ax1.freq == 'M'
assert ax2.freq == 'M'
assert (ax1.lines[0].get_xydata()[0, 0] ==
ax2.lines[0].get_xydata()[0, 0])
# using twinx
fig, ax1 = self.plt.subplots()
ax2 = ax1.twinx()
s1.plot(ax=ax1)
s2.plot(ax=ax2)
assert (ax1.lines[0].get_xydata()[0, 0] ==
ax2.lines[0].get_xydata()[0, 0])
# TODO (GH14330, GH14322)
# plotting the irregular first does not yet work
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# s2.plot(ax=ax1)
# s1.plot(ax=ax2)
# assert (ax1.lines[0].get_xydata()[0, 0] ==
# ax2.lines[0].get_xydata()[0, 0])
def test_nat_handling(self):
_, ax = self.plt.subplots()
dti = DatetimeIndex(['2015-01-01', NaT, '2015-01-03'])
s = Series(range(len(dti)), dti)
s.plot(ax=ax)
xdata = ax.get_lines()[0].get_xdata()
# plot x data is bounded by index values
assert s.index.min() <= Series(xdata).min()
assert Series(xdata).max() <= s.index.max()
@slow
def test_to_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
# tsplot
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
tsplot(high, self.plt.Axes.plot, ax=ax)
lines = tsplot(low, self.plt.Axes.plot, ax=ax)
for l in lines:
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
@slow
def test_from_weekly_resampling(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
expected_h = idxh.to_period().asi8.astype(np.float64)
expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544,
1549, 1553, 1558, 1562], dtype=np.float64)
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
else:
tm.assert_numpy_array_equal(xdata, expected_h)
tm.close()
# tsplot
from pandas.tseries.plotting import tsplot
_, ax = self.plt.subplots()
tsplot(low, self.plt.Axes.plot, ax=ax)
lines = tsplot(high, self.plt.Axes.plot, ax=ax)
for l in lines:
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
xdata = l.get_xdata(orig=False)
if len(xdata) == 12: # idxl lines
tm.assert_numpy_array_equal(xdata, expected_l)
else:
tm.assert_numpy_array_equal(xdata, expected_h)
@slow
def test_from_resampling_area_line_mixed(self):
idxh = date_range('1/1/1999', periods=52, freq='W')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = DataFrame(np.random.rand(len(idxh), 3),
index=idxh, columns=[0, 1, 2])
low = DataFrame(np.random.rand(len(idxl), 3),
index=idxl, columns=[0, 1, 2])
# low to high
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
_, ax = self.plt.subplots()
low.plot(kind=kind1, stacked=True, ax=ax)
high.plot(kind=kind2, stacked=True, ax=ax)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
assert PeriodIndex(l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
# check stacked values are correct
expected_y += low[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# high to low
for kind1, kind2 in [('line', 'area'), ('area', 'line')]:
_, ax = self.plt.subplots()
high.plot(kind=kind1, stacked=True, ax=ax)
low.plot(kind=kind2, stacked=True, ax=ax)
# check high dataframe result
expected_x = idxh.to_period().asi8.astype(np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[i]
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += high[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
# check low dataframe result
expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,
1544, 1549, 1553, 1558, 1562],
dtype=np.float64)
expected_y = np.zeros(len(expected_x), dtype=np.float64)
for i in range(3):
l = ax.lines[3 + i]
assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq
tm.assert_numpy_array_equal(l.get_xdata(orig=False),
expected_x)
expected_y += low[i].values
tm.assert_numpy_array_equal(l.get_ydata(orig=False),
expected_y)
@slow
def test_mixed_freq_second_millisecond(self):
# GH 7772, GH 7760
idxh = date_range('2014-07-01 09:00', freq='S', periods=50)
idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
# high to low
_, ax = self.plt.subplots()
high.plot(ax=ax)
low.plot(ax=ax)
assert len(ax.get_lines()) == 2
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'L'
tm.close()
# low to high
_, ax = self.plt.subplots()
low.plot(ax=ax)
high.plot(ax=ax)
assert len(ax.get_lines()) == 2
for l in ax.get_lines():
assert PeriodIndex(data=l.get_xdata()).freq == 'L'
@slow
def test_irreg_dtypes(self):
# date
idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]
df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))
_check_plot_works(df.plot)
# np.datetime64
idx = date_range('1/1/2000', periods=10)
idx = idx[[0, 2, 5, 9]].asobject
df = DataFrame(np.random.randn(len(idx), 3), idx)
_, ax = self.plt.subplots()
_check_plot_works(df.plot, ax=ax)
@slow
def test_time(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
_, ax = self.plt.subplots()
df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
assert xp == rs
# change xlim
ax.set_xlim('1:30', '5:00')
# check tick labels again
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S')
assert xp == rs
@slow
def test_time_musec(self):
t = datetime(1, 1, 1, 3, 30, 0)
deltas = np.random.randint(1, 20, 3).cumsum()
ts = np.array([(t + timedelta(microseconds=int(x))).time()
for x in deltas])
df = DataFrame({'a': np.random.randn(len(ts)),
'b': np.random.randn(len(ts))},
index=ts)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
# verify tick labels
ticks = ax.get_xticks()
labels = ax.get_xticklabels()
for t, l in zip(ticks, labels):
m, s = divmod(int(t), 60)
# TODO: unused?
# us = int((t - int(t)) * 1e6)
h, m = divmod(m, 60)
xp = l.get_text()
if len(xp) > 0:
rs = time(h, m, s).strftime('%H:%M:%S.%f')
assert xp == rs
@slow
def test_secondary_upsample(self):
idxh = date_range('1/1/1999', periods=365, freq='D')
idxl = date_range('1/1/1999', periods=12, freq='M')
high = Series(np.random.randn(len(idxh)), idxh)
low = Series(np.random.randn(len(idxl)), idxl)
_, ax = self.plt.subplots()
low.plot(ax=ax)
ax = high.plot(secondary_y=True, ax=ax)
for l in ax.get_lines():
assert PeriodIndex(l.get_xdata()).freq == 'D'
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
for l in ax.left_ax.get_lines():
assert PeriodIndex(l.get_xdata()).freq == 'D'
@slow
def test_secondary_legend(self):
fig = self.plt.figure()
ax = fig.add_subplot(211)
# ts
df = tm.makeTimeDataFrame()
df.plot(secondary_y=['A', 'B'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == 'A (right)'
assert leg.get_texts()[1].get_text() == 'B (right)'
assert leg.get_texts()[2].get_text() == 'C'
assert leg.get_texts()[3].get_text() == 'D'
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df.plot(secondary_y=['A', 'C'], mark_right=False, ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert leg.get_texts()[0].get_text() == 'A'
assert leg.get_texts()[1].get_text() == 'B'
assert leg.get_texts()[2].get_text() == 'C'
assert leg.get_texts()[3].get_text() == 'D'
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind='bar', secondary_y=['A'], ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == 'A (right)'
assert leg.get_texts()[1].get_text() == 'B'
self.plt.close(fig)
fig, ax = self.plt.subplots()
df.plot(kind='bar', secondary_y=['A'], mark_right=False, ax=ax)
leg = ax.get_legend()
assert leg.get_texts()[0].get_text() == 'A'
assert leg.get_texts()[1].get_text() == 'B'
self.plt.close(fig)
fig = self.plt.figure()
ax = fig.add_subplot(211)
df = tm.makeTimeDataFrame()
ax = df.plot(secondary_y=['C', 'D'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close(fig)
# non-ts
df = tm.makeDataFrame()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['A', 'B'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
self.plt.close()
fig = self.plt.figure()
ax = fig.add_subplot(211)
ax = df.plot(secondary_y=['C', 'D'], ax=ax)
leg = ax.get_legend()
assert len(leg.get_lines()) == 4
assert ax.right_ax.get_legend() is None
colors = set()
for line in leg.get_lines():
colors.add(line.get_color())
# TODO: color cycle problems
assert len(colors) == 4
def test_format_date_axis(self):
rng = date_range('1/1/2012', periods=12, freq='M')
df = DataFrame(np.random.randn(len(rng), 3), rng)
_, ax = self.plt.subplots()
ax = df.plot(ax=ax)
xaxis = ax.get_xaxis()
for l in xaxis.get_ticklabels():
if len(l.get_text()) > 0:
assert l.get_rotation() == 30
@slow
def test_ax_plot(self):
x = DatetimeIndex(start='2012-01-02', periods=10, freq='D')
y = lrange(len(x))
_, ax = self.plt.subplots()
lines = ax.plot(x, y, label='Y')
tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)
@slow
def test_mpl_nopandas(self):
dates = [date(2008, 12, 31), date(2009, 1, 31)]
values1 = np.arange(10.0, 11.0, 0.5)
values2 = np.arange(11.0, 12.0, 0.5)
kw = dict(fmt='-', lw=4)
_, ax = self.plt.subplots()
ax.plot_date([x.toordinal() for x in dates], values1, **kw)
ax.plot_date([x.toordinal() for x in dates], values2, **kw)
line1, line2 = ax.get_lines()
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp)
exp = np.array([x.toordinal() for x in dates], dtype=np.float64)
tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)
@slow
def test_irregular_ts_shared_ax_xlim(self):
# GH 2960
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
# plot the left section of the irregular series, then the right section
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
ts_irregular[5:].plot(ax=ax)
# check that axis limits are correct
left, right = ax.get_xlim()
assert left == ts_irregular.index.min().toordinal()
assert right == ts_irregular.index.max().toordinal()
@slow
def test_secondary_y_non_ts_xlim(self):
# GH 3490 - non-timeseries with secondary y
index_1 = [1, 2, 3, 4]
index_2 = [5, 6, 7, 8]
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before == left_after
assert right_before < right_after
@slow
def test_secondary_y_regular_ts_xlim(self):
# GH 3490 - regular-timeseries with secondary y
index_1 = date_range(start='2000-01-01', periods=4, freq='D')
index_2 = date_range(start='2000-01-05', periods=4, freq='D')
s1 = Series(1, index=index_1)
s2 = Series(2, index=index_2)
_, ax = self.plt.subplots()
s1.plot(ax=ax)
left_before, right_before = ax.get_xlim()
s2.plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
assert left_before == left_after
assert right_before < right_after
@slow
def test_secondary_y_mixed_freq_ts_xlim(self):
# GH 3490 - mixed frequency timeseries with secondary y
rng = date_range('2000-01-01', periods=10000, freq='min')
ts = Series(1, index=rng)
_, ax = self.plt.subplots()
ts.plot(ax=ax)
left_before, right_before = ax.get_xlim()
ts.resample('D').mean().plot(secondary_y=True, ax=ax)
left_after, right_after = ax.get_xlim()
# a downsample should not have changed either limit
assert left_before == left_after
assert right_before == right_after
@slow
def test_secondary_y_irregular_ts_xlim(self):
# GH 3490 - irregular-timeseries with secondary y
ts = tm.makeTimeSeries()[:20]
ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]
_, ax = self.plt.subplots()
ts_irregular[:5].plot(ax=ax)
# plot higher-x values on secondary axis
ts_irregular[5:].plot(secondary_y=True, ax=ax)
# ensure secondary limits aren't overwritten by plot on primary
ts_irregular[:5].plot(ax=ax)
left, right = ax.get_xlim()
assert left == ts_irregular.index.min().toordinal()
assert right == ts_irregular.index.max().toordinal()
def test_plot_outofbounds_datetime(self):
# 2579 - checking this does not raise
values = [date(1677, 1, 1), date(1677, 1, 2)]
_, ax = self.plt.subplots()
ax.plot(values)
values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]
ax.plot(values)
def test_format_timedelta_ticks_narrow(self):
if is_platform_mac():
pytest.skip("skip on mac for precision display issue on older mpl")
expected_labels = [
'00:00:00.00000000{:d}'.format(i)
for i in range(10)]
rng = timedelta_range('0', periods=10, freq='ns')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
df.plot(fontsize=2, ax=ax)
fig.canvas.draw()
labels = ax.get_xticklabels()
assert len(labels) == len(expected_labels)
for l, l_expected in zip(labels, expected_labels):
assert l.get_text() == l_expected
def test_format_timedelta_ticks_wide(self):
if is_platform_mac():
pytest.skip("skip on mac for precision display issue on older mpl")
expected_labels = [
'00:00:00',
'1 days 03:46:40',
'2 days 07:33:20',
'3 days 11:20:00',
'4 days 15:06:40',
'5 days 18:53:20',
'6 days 22:40:00',
'8 days 02:26:40',
''
]
rng = timedelta_range('0', periods=10, freq='1 d')
df = DataFrame(np.random.randn(len(rng), 3), rng)
fig, ax = self.plt.subplots()
ax = df.plot(fontsize=2, ax=ax)
fig.canvas.draw()
labels = ax.get_xticklabels()
assert len(labels) == len(expected_labels)
for l, l_expected in zip(labels, expected_labels):
assert l.get_text() == l_expected
def test_timedelta_plot(self):
# test issue #8711
s = Series(range(5), timedelta_range('1day', periods=5))
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test long period
index = timedelta_range('1 day 2 hr 30 min 10 s',
periods=10, freq='1 d')
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
# test short period
index = timedelta_range('1 day 2 hr 30 min 10 s',
periods=10, freq='1 ns')
s = Series(np.random.randn(len(index)), index)
_, ax = self.plt.subplots()
_check_plot_works(s.plot, ax=ax)
def test_hist(self):
# https://github.com/matplotlib/matplotlib/issues/8459
rng = date_range('1/1/2011', periods=10, freq='H')
x = rng
w1 = np.arange(0, 1, .1)
w2 = np.arange(0, 1, .1)[::-1]
_, ax = self.plt.subplots()
ax.hist([x, x], weights=[w1, w2])
@slow
def test_overlapping_datetime(self):
# GB 6608
s1 = Series([1, 2, 3], index=[datetime(1995, 12, 31),
datetime(2000, 12, 31),
datetime(2005, 12, 31)])
s2 = Series([1, 2, 3], index=[datetime(1997, 12, 31),
datetime(2003, 12, 31),
datetime(2008, 12, 31)])
# plot first series, then add the second series to those axes,
# then try adding the first series again
_, ax = self.plt.subplots()
s1.plot(ax=ax)
s2.plot(ax=ax)
s1.plot(ax=ax)
def _check_plot_works(f, freq=None, series=None, *args, **kwargs):
import matplotlib.pyplot as plt
fig = plt.gcf()
try:
plt.clf()
ax = fig.add_subplot(211)
orig_ax = kwargs.pop('ax', plt.gca())
orig_axfreq = getattr(orig_ax, 'freq', None)
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
ax = kwargs.pop('ax', plt.gca())
if series is not None:
dfreq = series.index.freq
if isinstance(dfreq, DateOffset):
dfreq = dfreq.rule_code
if orig_axfreq is None:
assert ax.freq == dfreq
if freq is not None and orig_axfreq is None:
assert ax.freq == freq
ax = fig.add_subplot(212)
try:
kwargs['ax'] = ax
ret = f(*args, **kwargs)
assert ret is not None # do something more intelligent
except Exception:
pass
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
plt.close(fig)
| bsd-3-clause | -2,776,164,112,799,334,400 | 34.819464 | 79 | 0.528883 | false |
ArcAwe/coseq | Software/RaspberryPi/main/xbox_read.py | 1 | 1990 |
# from https://github.com/zephod/lego-pi/blob/master/lib/xbox_read.py
from os import popen
from sys import stdin
import re
import time
s = re.compile('[ :]')
class Event:
def __init__(self,key,value,old_value):
self.key = key
self.value = value
self.old_value = old_value
def is_press(self):
return self.value==1 and self.old_value==0
def __str__(self):
return 'Event(%s,%d,%d)' % (self.key,self.value,self.old_value)
def apply_deadzone(x, deadzone, scale):
if x < 0:
return (scale * min(0,x+deadzone)) / (32768-deadzone)
return (scale * max(0,x-deadzone)) / (32768-deadzone)
def event_stream(deadzone=0,scale=32768):
_data = None
pid = 65536
subprocess = popen('nohup xboxdrv','r',pid)
tryRmmod = False
print "Starting..."
while (True):
line = subprocess.readline()
if 'Error' in line:
if(not tryRmmod):
pid+=3
subprocess = popen('nohup rmmod xpad','r',pid)
time.sleep(1)
pid+=3
subprocess = popen('nohup xboxdrv','r',pid)
tryRmmod = True
continue
raise ValueError(line)
data = filter(bool,s.split(line[:-1]))
if len(data)==42:
# Break input string into a data dict
data = { data[x]:int(data[x+1]) for x in range(0,len(data),2) }
if not _data:
_data = data
continue
for key in data:
if key=='X1' or key=='X2' or key=='Y1' or key=='Y2':
data[key] = apply_deadzone(data[key],deadzone,scale)
if data[key]==_data[key]: continue
event = Event(key,data[key],_data[key])
yield event
_data = data
# Appendix: Keys
# --------------
# X1
# Y1
# X2
# Y2
# du
# dd
# dl
# dr
# back
# guide
# start
# TL
# TR
# A
# B
# X
# Y
# LB
# RB
# LT
# RT | mit | 3,269,753,871,665,201,700 | 22.702381 | 75 | 0.513568 | false |
igorvc/iso8583py | examples/echoClient.py | 1 | 2943 | """
(C) Copyright 2009 Igor V. Custodio
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from ISO8583.ISO8583 import ISO8583
from ISO8583.ISOErrors import *
import socket
import sys
import time
# Configure the client
serverIP = "127.0.0.1"
serverPort = 8583
numberEcho = 5
timeBetweenEcho = 5 # in seconds
bigEndian = True
#bigEndian = False
s = None
for res in socket.getaddrinfo(serverIP, serverPort, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
s = None
continue
try:
s.connect(sa)
except socket.error, msg:
s.close()
s = None
continue
break
if s is None:
print ('Could not connect :(')
sys.exit(1)
for req in range(0,numberEcho):
iso = ISO8583()
iso.setMTI('0800')
iso.setBit(3,'300000')
iso.setBit(24,'045')
iso.setBit(41,'11111111')
iso.setBit(42,'222222222222222')
iso.setBit(63,'This is a Test Message')
if bigEndian:
try:
message = iso.getNetworkISO()
s.send(message)
print ('Sending ... %s' % message)
ans = s.recv(2048)
print ("\nInput ASCII |%s|" % ans)
isoAns = ISO8583()
isoAns.setNetworkISO(ans)
v1 = isoAns.getBitsAndValues()
for v in v1:
print ('Bit %s of type %s with value = %s' % (v['bit'],v['type'],v['value']))
if isoAns.getMTI() == '0810':
print ("\tThat's great !!! The server understand my message !!!")
else:
print ("The server dosen't understand my message!")
except InvalidIso8583, ii:
print ii
break
time.sleep(timeBetweenEcho)
else:
try:
message = iso.getNetworkISO(False)
s.send(message)
print ('Sending ... %s' % message)
ans = s.recv(2048)
print ("\nInput ASCII |%s|" % ans)
isoAns = ISO8583()
isoAns.setNetworkISO(ans,False)
v1 = isoAns.getBitsAndValues()
for v in v1:
print ('Bit %s of type %s with value = %s' % (v['bit'],v['type'],v['value']))
if isoAns.getMTI() == '0810':
print ("\tThat's great !!! The server understand my message !!!")
else:
print ("The server dosen't understand my message!")
except InvalidIso8583, ii:
print ii
break
time.sleep(timeBetweenEcho)
print ('Closing...')
s.close()
| gpl-3.0 | 1,896,742,353,546,762,200 | 22.731092 | 90 | 0.638464 | false |
w1z2g3/crossbar | crossbar/adapter/rest/test/test_signature.py | 1 | 11871 | #####################################################################################
#
# Copyright (C) Tavendo GmbH
#
# Unless a separate license agreement exists between you and Tavendo GmbH (e.g. you
# have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import
import json
from twisted.internet.defer import inlineCallbacks
from crossbar.test import TestCase
from crossbar._compat import native_string
from crossbar._logging import LogCapturer
from crossbar.adapter.rest import PublisherResource
from crossbar.adapter.rest.test import MockPublisherSession, renderResource, makeSignedArguments
resourceOptions = {
"secret": "foobar",
"key": "bazapp"
}
publishBody = b'{"topic": "com.test.messages", "args": [1]}'
class SignatureTestCase(TestCase):
"""
Unit tests for the signature authentication part of L{_CommonResource}.
"""
@inlineCallbacks
def test_good_signature(self):
"""
A valid, correct signature will mean the request is processed.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody,
sign=True, signKey="bazapp", signSecret="foobar")
self.assertEqual(request.code, 202)
self.assertEqual(json.loads(native_string(request.get_written_data())),
{"id": session._published_messages[0]["id"]})
logs = l.get_category("AR203")
self.assertEqual(len(logs), 1)
@inlineCallbacks
def test_incorrect_secret(self):
"""
An incorrect secret (but an otherwise well-formed signature) will mean
the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
with LogCapturer() as l:
request = yield renderResource(
resource, b"/",
method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody,
sign=True, signKey="bazapp", signSecret="foobar2")
self.assertEqual(request.code, 401)
errors = l.get_category("AR459")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 401)
@inlineCallbacks
def test_unknown_key(self):
"""
An unknown key in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody,
sign=True, signKey="spamapp", signSecret="foobar")
self.assertEqual(request.code, 401)
errors = l.get_category("AR460")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 401)
@inlineCallbacks
def test_no_timestamp(self):
"""
No timestamp in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'timestamp']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_wrong_timestamp(self):
"""
An invalid timestamp in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
signedParams[b'timestamp'] = [b"notatimestamp"]
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR462")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_outdated_delta(self):
"""
If the delta between now and the timestamp in the request is larger than
C{timestamp_delta_limit}, the request is rejected.
"""
custOpts = {"timestamp_delta_limit": 1}
custOpts.update(resourceOptions)
session = MockPublisherSession(self)
resource = PublisherResource(custOpts, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
signedParams[b'timestamp'] = [b"2011-10-14T16:59:51.123Z"]
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR464")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_invalid_nonce(self):
"""
An invalid nonce in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
signedParams[b'nonce'] = [b"notanonce"]
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR462")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_no_nonce(self):
"""
A missing nonce in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'nonce']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_no_signature(self):
"""
A missing signature in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'signature']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_no_key(self):
"""
A missing key in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'key']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_no_seq(self):
"""
A missing sequence in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
del signedParams[b'seq']
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR461")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
@inlineCallbacks
def test_wrong_seq(self):
"""
A missing sequence in a request should mean the request is rejected.
"""
session = MockPublisherSession(self)
resource = PublisherResource(resourceOptions, session)
signedParams = makeSignedArguments({}, "bazapp", "foobar", publishBody)
signedParams[b'seq'] = [b"notaseq"]
with LogCapturer() as l:
request = yield renderResource(
resource, b"/", method=b"POST",
headers={b"Content-Type": [b"application/json"]},
body=publishBody, params=signedParams)
self.assertEqual(request.code, 400)
errors = l.get_category("AR462")
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0]["code"], 400)
| agpl-3.0 | 3,433,306,572,119,676,000 | 35.082067 | 96 | 0.61486 | false |
Southpaw-TACTIC/TACTIC | src/tactic/startup/first_run_init.py | 1 | 4352 | ###########################################################
#
# Copyright (c) 2005-2008, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['FirstRunInit']
from pyasm.common import Common, Config, Environment, Common, TacticException, Container
import os, shutil
import sys
class FirstRunInit(object):
def execute(self):
self.copy_start()
# check to see if there is a server code
server_code = Config.get_value("install", "server")
#if not server_code:
# # generate one
# server_code = Common.generate_alphanum_key(3)
# Config.set_value("install", "server", server_code)
# Config.save_config()
def copy_start(self):
data_dir = Environment.get_data_dir(manual=True)
# check to see if the data folder already exists
print("\n")
print("Data Directory [%s]" % data_dir)
install_dir = Environment.get_install_dir()
# find criteria for initializing
initialize = False
if data_dir and not os.path.exists(data_dir):
initialize = True
if data_dir and not os.path.exists("%s/config" % data_dir):
initialize = True
if initialize:
# copy the template over. This should exist even if it is not used
print("... not found: initializing\n")
install_data_path = "%s/src/install/start" % (install_dir)
if os.path.exists(install_data_path):
dirnames = os.listdir(install_data_path)
for dirname in dirnames:
to_dir = "%s/%s" % (data_dir, dirname)
if os.path.exists(to_dir):
print("WARNING: path [%s] exists ... skipping copying" % to_dir)
continue
print("Copying to [%s]" % to_dir)
from_dir = "%s/%s" % (install_data_path, dirname)
shutil.copytree(from_dir, to_dir)
else:
shutil.copytree(install_data_path, data_dir)
# create the dist folder
to_dir = "%s/dist" % (data_dir)
if not os.path.exists(to_dir):
os.makedirs(to_dir)
# copy the appropriate config file
if os.name == 'nt':
filename = 'standalone_win32-conf.xml'
else:
filename = 'standalone_linux-conf.xml'
install_config_path = "%s/src/install/config/%s" % (install_dir,filename)
to_config_path = "%s/config/tactic-conf.xml" % data_dir
if not os.path.exists(to_config_path):
dirname = os.path.dirname(to_config_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copy(install_config_path, to_config_path)
# some backwards compatibility issues
old_config_path = "%s/config/tactic_linux-conf.xml" % data_dir
if os.path.exists(old_config_path):
new_config_path = "%s/config/tactic-conf.xml" % data_dir
shutil.move(old_config_path, new_config_path)
config_path = Config.get_config_path()
config_exists = False
if os.path.exists(config_path):
config_exists = True
# insert the plugin path to run get_asset_dir()
plugin_dir = Environment.get_plugin_dir()
sys.path.insert(0, plugin_dir)
asset_dir = Environment.get_asset_dir()
print("Asset Directory [%s]" % asset_dir)
tmp_dir = Environment.get_tmp_dir()
print("Temp Directory [%s]" % tmp_dir)
# check if there is a config path already exists. If it does,
# then don't do anything further. This is likely a previous
# installation
if config_exists:
print("Config path [%s]" % config_path)
return
else:
# if there is no config, retrieve data_dir in non-manual mode
data_dir = Environment.get_data_dir()
f = open("%s/first_run" % data_dir, 'w')
f.write("")
f.close()
return
| epl-1.0 | 8,725,499,631,576,695,000 | 31.969697 | 88 | 0.55239 | false |
alfa-addon/addon | plugin.video.alfa/channels/help.py | 1 | 11830 | # -*- coding: utf-8 -*-
import os
from core.item import Item
from platformcode import config, logger, platformtools
from channelselector import get_thumb
if config.is_xbmc():
import xbmcgui
class TextBox(xbmcgui.WindowXMLDialog):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
self.title = kwargs.get('title')
self.text = kwargs.get('text')
self.doModal()
def onInit(self):
try:
self.getControl(5).setText(self.text)
self.getControl(1).setLabel(self.title)
except:
pass
def onClick(self, control_id):
pass
def onFocus(self, control_id):
pass
def onAction(self, action):
# self.close()
if action in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]:
self.close()
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="", title="FAQ:",
thumbnail=get_thumb("help.png"),
folder=False))
if config.is_xbmc():
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Cómo reportar un error?",
thumbnail=get_thumb("help.png"),
folder=False, extra="report_error"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Se pueden activar/desactivar los canales?",
thumbnail=get_thumb("help.png"),
folder=False, extra="onoff_canales"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible la sincronización automática con Trakt?",
thumbnail=get_thumb("help.png"),
folder=False, extra="trakt_sync"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible mostrar todos los resultados juntos en el buscador global?",
thumbnail=get_thumb("help.png"),
folder=False, extra="buscador_juntos"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces tardan en aparecer.",
thumbnail=get_thumb("help.png"),
folder=False, extra="tiempo_enlaces"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - La búsqueda de contenido no se hace correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_busquedacont"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Algún canal no funciona correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="canal_fallo"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces Torrent no funcionan.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_torrent"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - No se actualiza correctamente la videoteca.",
thumbnail=get_thumb("help.png"),
folder=True, extra="prob_bib"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Enlaces de interés",
thumbnail=get_thumb("help.png"),
folder=False, extra=""))
return itemlist
def faq(item):
if item.extra == "onoff_canales":
respuesta = platformtools.dialog_yesno("Alfa",
"Esto se puede hacer en 'Configuración'>'Activar/Desactivar canales'. "
"Puedes activar/desactivar los canales uno por uno o todos a la vez. ",
"¿Deseas gestionar ahora los canales?")
if respuesta == 1:
from channels import setting
setting.conf_tools(Item(extra='channels_onoff'))
elif item.extra == "trakt_sync":
respuesta = platformtools.dialog_yesno("Alfa",
"Actualmente se puede activar la sincronización (silenciosa) "
"tras marcar como visto un episodio (esto se hace automáticamente). "
"Esta opción se puede activar en 'Configuración'>'Ajustes "
"de la videoteca'.",
"¿Deseas acceder a dichos ajustes?")
if respuesta == 1:
from channels import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "tiempo_enlaces":
respuesta = platformtools.dialog_yesno("Alfa",
"Esto puede mejorarse limitando el número máximo de "
"enlaces o mostrandolos en una ventana emergente. "
"Estas opciones se encuentran en 'Configuración'>'Ajustes "
"de la videoteca'.",
"¿Deseas acceder a dichos ajustes?")
if respuesta == 1:
from channels import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "prob_busquedacont":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puede que no hayas escrito la ruta de la librería correctamente en "
"'Configuración'>'Preferencias'.\n"
"La ruta específicada debe ser exactamente la misma de la 'fuente' "
"introducida en 'Archivos' de la videoteca de Kodi.\n"
"AVANZADO: Esta ruta también se encuentra en 'sources.xml'.\n"
"También puedes estar experimentando problemas por estar "
"usando algun fork de Kodi y rutas con 'special://'. "
"SPMC, por ejemplo, tiene problemas con esto, y no parece tener solución, "
"ya que es un problema ajeno a Alfa que existe desde hace mucho.\n"
"Puedes intentar subsanar estos problemas en 'Configuración'>'Ajustes de "
"la videoteca', cambiando el ajuste 'Realizar búsqueda de contenido en' "
"de 'La carpeta de cada serie' a 'Toda la videoteca'."
"También puedes acudir a 'http://alfa-addon.com' en busca de ayuda.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "canal_fallo":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puede ser que la página web del canal no funcione. "
"En caso de que funcione la página web puede que no seas el primero"
" en haberlo visto y que el canal este arreglado. "
"Puedes mirar en 'alfa-addon.com' o en el "
"repositorio de GitHub (github.com/alfa-addon/addon). "
"Si no encuentras el canal arreglado puedes reportar un "
"problema en el foro.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "prob_bib":
platformtools.dialog_ok("Alfa",
"Puede ser que hayas actualizado el plugin recientemente "
"y que las actualizaciones no se hayan aplicado del todo "
"bien. Puedes probar en 'Configuración'>'Otras herramientas', "
"comprobando los archivos *_data.json o "
"volviendo a añadir toda la videoteca.")
respuesta = platformtools.dialog_yesno("Alfa",
"¿Deseas acceder ahora a esa seccion?")
if respuesta == 1:
itemlist = []
from channels import setting
new_item = Item(channel="setting", action="submenu_tools", folder=True)
itemlist.extend(setting.submenu_tools(new_item))
return itemlist
elif item.extra == "prob_torrent":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puedes probar descargando el modulo 'libtorrent' de Kodi o "
"instalando algun addon como 'Quasar' o 'Torrenter', "
"los cuales apareceran entre las opciones de la ventana emergente "
"que aparece al pulsar sobre un enlace torrent. "
"'Torrenter' es más complejo pero también más completo "
"y siempre funciona.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "buscador_juntos":
respuesta = platformtools.dialog_yesno("Alfa",
"Si. La opcion de mostrar los resultados juntos "
"o divididos por canales se encuentra en "
"'setting'>'Ajustes del buscador global'>"
"'Otros ajustes'.",
"¿Deseas acceder a ahora dichos ajustes?")
if respuesta == 1:
from channels import search
search.settings("")
elif item.extra == "report_error":
from core import filetools
if config.get_platform(True)['num_version'] < 14:
log_name = "xbmc.log"
else:
log_name = "kodi.log"
ruta = filetools.translatePath("special://logpath") + log_name
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Para reportar un problema en 'http://alfa-addon.com' es necesario:\n"
" - Versión que usas de Alfa.\n"
" - Versión que usas de kodi, mediaserver, etc.\n"
" - Versión y nombre del sistema operativo que usas.\n"
" - Nombre del skin (en el caso que uses Kodi) y si se "
"te ha resuelto el problema al usar el skin por defecto.\n"
" - Descripción del problema y algún caso de prueba.\n"
" - Agregar el log en modo detallado, una vez hecho esto, "
"zipea el log y lo puedes adjuntar en un post.\n\n"
"Para activar el log en modo detallado, ingresar a:\n"
" - Configuración.\n"
" - Preferencias.\n"
" - En la pestaña General - Marcar la opción: Generar log detallado.\n\n"
"El archivo de log detallado se encuentra en la siguiente ruta: \n\n"
"%s" % ruta)
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
else:
platformtools.dialog_ok("Alfa",
"Entérate de novedades, consejos u opciones que desconoces en Telegram: @alfa_addon.\n"
"Si tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.com")
| gpl-3.0 | -6,673,242,564,879,324,000 | 50.598214 | 119 | 0.517484 | false |
kkamkou/gitmostwanted.com | tests/unit/lib/test_bigquery.py | 1 | 2352 | from unittest import TestCase
from gitmostwanted.lib.bigquery.result import ResultJob
class LibBigQueryResultTestCase(TestCase):
def setUp(self):
pass
def test_convert_incoming_obj(self):
result = ResultJob(self.response_example())
self.assertEqual(len(result), 2)
self.assertEqual(next(result), ['29028775', 'facebook/react-native', '225'])
self.assertEqual(next(result), ['29028776', 'facebook/react-native2', '226'])
self.assertRaises(StopIteration, next, result)
def test_convert_incoming_empty_obj(self):
result = ResultJob(self.response_example_empty())
self.assertEqual(len(result), 0)
self.assertRaises(StopIteration, next, result)
def response_example_empty(self):
data = self.response_example()
data['rows'] = []
data['totalRows'] = 0
return data
def response_example(self):
return {
'cacheHit': False,
'jobComplete': True,
'jobReference': {
'jobId': 'job_123-4567',
'projectId': 'my-project-1234567890'
},
'kind': 'bigquery#queryResponse',
'rows': [
{
'f': [
{'v': '29028775'},
{'v': 'facebook/react-native'},
{'v': '225'}
]
},
{
'f': [
{'v': '29028776'},
{'v': 'facebook/react-native2'},
{'v': '226'}
]
}
],
'schema': {
'fields': [
{
'mode': 'NULLABLE',
'name': 'repo_id',
'type': 'INTEGER'
},
{
'mode': 'NULLABLE',
'name': 'repo_name',
'type': 'STRING'
},
{
'mode': 'NULLABLE',
'name': 'cnt',
'type': 'INTEGER'
}
]
},
'totalBytesProcessed': '5568164',
'totalRows': '2'
}
| mit | -4,338,712,916,941,077,500 | 30.36 | 85 | 0.39966 | false |
jakobzhao/qq-xingyun | qqcrawler/report.py | 1 | 2335 | # !/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on Oct 16, 2015
# @author: Bo Zhao
# @email: [email protected]
# @website: http://yenching.org
# @organization: Harvard Kennedy School
# libraries
import socket
import smtplib
from pymongo import MongoClient
from qqcrawler.settings import EMAIL_PASSWORD
from log import *
# receiver string
# example rief_report('[email protected];[email protected]', "weibo")
# funcs
def brief_report(settings):
pis = settings['pis']
project = settings['project']
address = settings['address']
port = settings['port']
sender = '[email protected]'
username = '[email protected]'
t = datetime.datetime.now().strftime('%Y-%m-%d')
pi_str = ''
for pi in pis:
pi_str += (pi + ';')
now = datetime.datetime.now()
utc_now_1 = now - datetime.timedelta(days=1)
utc_now_2 = now - datetime.timedelta(days=2)
utc_now_5 = now - datetime.timedelta(days=5)
# For post information
client = MongoClient(address, port)
db = client[project]
total_posts = db.pages.find().count()
count_1 = db.pages.find({"timestamp": {"$gt": utc_now_1}}).count()
count_2 = db.pages.find({"timestamp": {"$gt": utc_now_2}}).count()
count_5 = db.pages.find({"timestamp": {"$gt": utc_now_5}}).count()
line_2 = "Total posts: %d" % total_posts
line_3 = "Within the past 24 hours: %d collected" % count_1
line_4 = "Within the past 2 days: %d collected" % count_2
line_5 = "Within the past 5 days: %d collected" % count_5
msg = '''From: Weibo Crawler Server <[email protected]>
To: ''' + pi_str[:-1] + '''
Subject: [''' + t + '''] Daily Briefing for ''' + project.capitalize() + ''' Project
MIME-Version: 1.0
Dear PI(s),
Here is a briefing about the progress of Weibo data harvest:
''' + line_2 + '''
''' + line_3 + '''
''' + line_4 + '''
''' + line_5 + '''
--
Sent from the Weibo Cralwer Server.'''
# The actual mail send
try:
server = smtplib.SMTP()
server.connect('smtp.gmail.com', '587')
server.ehlo()
server.starttls()
server.login(username, EMAIL_PASSWORD)
server.sendmail(sender, pis, msg)
server.quit()
except socket.gaierror, e:
print str(e) + "/n error raises when sending E-mails."
| lgpl-3.0 | -3,129,869,230,584,553,500 | 27.47561 | 84 | 0.604711 | false |
eustislab/horton | doc/conf.py | 1 | 9564 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
#
# HORTON documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 17 11:14:50 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'breathe']
mathjax_path = "https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HORTON'
copyright = u'2011-2015, The HORTON Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
def get_version_release():
# get a version string
import re, subprocess
try:
# The full version number, including alpha/beta/rc tags.
release = subprocess.check_output(['git', 'describe']).strip()
except (subprocess.CalledProcessError, OSError) as e:
# fall back to the defaul release
release = '2.0.0-nogit'
# Chop of at the first dash, if any, i.e. X.Y.Z
if '-' in release:
version = release.split('-')[0]
else:
version = release
return version, release
# version = short X.Y.Z, release = full thing
version, release = get_version_release()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'logo_only': True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'horton.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HORTONdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'horton.tex', u'HORTON Documentation',
u'The HORTON Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'horton', u'HORTON Documentation',
[u'The HORTON Development Team'], 1)
]
# -- Custom HORTON-specific settings -------------------------------------------
breathe_projects = { "horton": "doxyxml" }
breathe_default_project = "horton"
pngmath_latex_preamble = r"\usepackage{color,amsmath}"
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma 1.6', '-D 120']
sys.path.append('../')
os.environ['HORTONDATA'] = '../data'
autoclass_content = "class"
autodoc_member_order = "groupwise"
autodoc_default_flags = ['members', 'undoc-members', 'inherited-members', 'show-inheritance']
def autodoc_skip_member(app, what, name, obj, skip, options):
if what=="class" and name=="__init__":
return False
if what=="class" and name=="__call__":
return False
if what=="class" and name=="__getitem__":
return False
if name.startswith("_"):
return True
return False
def setup(app):
from sphinx.ext.autodoc import cut_lines
app.connect("autodoc-skip-member", autodoc_skip_member)
app.connect('autodoc-process-docstring', cut_lines(2, what=['module']))
app.add_stylesheet("custom.css")
| gpl-3.0 | -3,263,852,963,252,831,000 | 32.795053 | 95 | 0.700544 | false |
chipaca/snapcraft | snapcraft/internal/project_loader/_extensions/kde_neon.py | 1 | 4701 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2018-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Import types and tell flake8 to ignore the "unused" List.
from collections import namedtuple
from typing import Any, Dict, Optional, Tuple
from ._extension import Extension
_ExtensionInfo = namedtuple("ExtensionInfo", "cmake_args content provider build_snaps")
_Info = dict(
core18=_ExtensionInfo(
cmake_args=None,
content="kde-frameworks-5-core18-all",
provider="kde-frameworks-5-core18",
build_snaps=["kde-frameworks-5-core18-sdk/latest/stable"],
),
core20=_ExtensionInfo(
cmake_args="-DCMAKE_FIND_ROOT_PATH=/snap/kde-frameworks-5-qt-5-15-core20-sdk/current",
content="kde-frameworks-5-qt-5-15-core20-all",
provider="kde-frameworks-5-qt-5-15-core20",
build_snaps=["kde-frameworks-5-qt-5-15-core20-sdk/latest/candidate"],
),
)
class ExtensionImpl(Extension):
"""The KDE Neon extension.
This extension makes it easy to assemble KDE based applications
using the Neon stack.
It configures each application with the following plugs:
\b
- Common Icon Themes.
- Common Sound Themes.
- The Qt5 and KDE Frameworks runtime libraries and utilities.
For easier desktop integration, it also configures each application
entry with these additional plugs:
\b
- desktop (https://snapcraft.io/docs/desktop-interface)
- desktop-legacy (https://snapcraft.io/docs/desktop-legacy-interface)
- opengl (https://snapcraft.io/docs/opengl-interface)
- wayland (https://snapcraft.io/docs/wayland-interface)
- x11 (https://snapcraft.io/docs/x11-interface)
"""
@staticmethod
def is_experimental(base: Optional[str]) -> bool:
# TODO: remove experimental once sdk is on stable
return base == "core20"
@staticmethod
def get_supported_bases() -> Tuple[str, ...]:
return ("core18", "core20")
@staticmethod
def get_supported_confinement() -> Tuple[str, ...]:
return ("strict", "devmode")
def __init__(self, *, extension_name: str, yaml_data: Dict[str, Any]) -> None:
super().__init__(extension_name=extension_name, yaml_data=yaml_data)
info = _Info[yaml_data["base"]]
self.root_snippet = {
"assumes": ["snapd2.43"], # for 'snapctl is-connected'
"plugs": {
"icon-themes": {
"interface": "content",
"target": "$SNAP/data-dir/icons",
"default-provider": "gtk-common-themes",
},
"sound-themes": {
"interface": "content",
"target": "$SNAP/data-dir/sounds",
"default-provider": "gtk-common-themes",
},
"kde-frameworks-5-plug": {
"content": info.content,
"interface": "content",
"default-provider": info.provider,
"target": "$SNAP/kf5",
},
},
"environment": {"SNAP_DESKTOP_RUNTIME": "$SNAP/kf5"},
"hooks": {
"configure": {
"plugs": ["desktop"],
"command-chain": ["snap/command-chain/hooks-configure-desktop"],
}
},
}
if info.cmake_args is not None:
self.part_snippet = {
"build-environment": [{"SNAPCRAFT_CMAKE_ARGS": info.cmake_args}]
}
self.app_snippet = {
"command-chain": ["snap/command-chain/desktop-launch"],
"plugs": ["desktop", "desktop-legacy", "opengl", "wayland", "x11"],
}
self.parts = {
"kde-neon-extension": {
"source": "$SNAPCRAFT_EXTENSIONS_DIR/desktop",
"source-subdir": "kde-neon",
"plugin": "make",
"make-parameters": ["PLATFORM_PLUG=kde-frameworks-5-plug"],
"build-packages": ["g++"],
"build-snaps": info.build_snaps,
}
}
| gpl-3.0 | -2,491,254,435,194,227,700 | 34.613636 | 94 | 0.581578 | false |
mitodl/open-discussions | notifications/api.py | 1 | 9247 | """Notifications API"""
import logging
from django.conf import settings
from django.db.models import Q
from django.contrib.auth.models import User
from channels.models import Subscription, ChannelGroupRole, Channel
from channels.api import get_admin_api
from channels.constants import ROLE_MODERATORS
from notifications.notifiers.exceptions import (
UnsupportedNotificationTypeError,
CancelNotificationError,
)
from notifications.models import (
EmailNotification,
PostEvent,
NotificationSettings,
NOTIFICATION_TYPE_FRONTPAGE,
NOTIFICATION_TYPE_COMMENTS,
NOTIFICATION_TYPE_MODERATOR,
FREQUENCY_IMMEDIATE,
FREQUENCY_DAILY,
FREQUENCY_WEEKLY,
FREQUENCY_NEVER,
)
from notifications.notifiers import comments, frontpage, moderator_posts
from notifications import tasks
from open_discussions.utils import chunks
log = logging.getLogger()
def ensure_notification_settings(user, skip_moderator_setting=False):
"""
Populates user with notification settings
Args:
user (User): user to create settings for
skip_moderator_setting (boolean): Skip moderator notifaction creation
"""
existing_notification_types = NotificationSettings.objects.filter(
user=user
).values_list("notification_type", flat=True)
if NOTIFICATION_TYPE_FRONTPAGE not in existing_notification_types:
NotificationSettings.objects.get_or_create(
user=user,
notification_type=NOTIFICATION_TYPE_FRONTPAGE,
defaults={"trigger_frequency": FREQUENCY_DAILY},
)
if NOTIFICATION_TYPE_COMMENTS not in existing_notification_types:
NotificationSettings.objects.get_or_create(
user=user,
notification_type=NOTIFICATION_TYPE_COMMENTS,
defaults={"trigger_frequency": FREQUENCY_IMMEDIATE},
)
if not skip_moderator_setting:
for channel_group_role in ChannelGroupRole.objects.filter(
group__user=user, role=ROLE_MODERATORS
):
NotificationSettings.objects.get_or_create(
user=user,
notification_type=NOTIFICATION_TYPE_MODERATOR,
channel=channel_group_role.channel,
defaults={"trigger_frequency": FREQUENCY_IMMEDIATE},
)
def attempt_send_notification_batch(notification_settings_ids):
"""
Attempts to send notification for the given batch of ids
Args:
notification_settings_ids (list of int): list of NotificationSettings.ids
"""
notification_settings = NotificationSettings.objects.filter(
id__in=notification_settings_ids
)
for notification_setting in notification_settings:
try:
notifier = frontpage.FrontpageDigestNotifier(notification_setting)
notifier.attempt_notify()
except: # pylint: disable=bare-except
log.exception(
"Error attempting notification for user %s", notification_setting.user
)
def get_daily_frontpage_settings_ids():
"""Returns daily frontpage digest NotificationSettings"""
return (
NotificationSettings.frontpage_settings()
.filter(trigger_frequency=FREQUENCY_DAILY)
.filter(user__is_active=True)
.values_list("id", flat=True)
.order_by("id")
.iterator()
)
def get_weekly_frontpage_settings_ids():
"""Returns weekly frontpage digest NotificationSettings"""
return (
NotificationSettings.frontpage_settings()
.filter(trigger_frequency=FREQUENCY_WEEKLY)
.filter(user__is_active=True)
.values_list("id", flat=True)
.order_by("id")
.iterator()
)
def _get_notifier_for_notification(notification):
"""
Get the notifier for the notification's type
Args:
notification (NotificationBase): the notification to get a notifier for
Returns:
Notifier: instance of the notifier to use
"""
if notification.notification_type == NOTIFICATION_TYPE_MODERATOR:
channel_api = get_admin_api()
event = PostEvent.objects.get(email_notification=notification)
channel_name = channel_api.get_post(event.post_id).subreddit.display_name
notification_settings = NotificationSettings.objects.get(
user=notification.user,
notification_type=notification.notification_type,
channel__name=channel_name,
)
else:
notification_settings = NotificationSettings.objects.get(
user=notification.user, notification_type=notification.notification_type
)
if notification.notification_type == NOTIFICATION_TYPE_FRONTPAGE:
return frontpage.FrontpageDigestNotifier(notification_settings)
elif notification.notification_type == NOTIFICATION_TYPE_COMMENTS:
return comments.CommentNotifier(notification_settings)
elif notification.notification_type == NOTIFICATION_TYPE_MODERATOR:
return moderator_posts.ModeratorPostsNotifier(notification_settings)
else:
raise UnsupportedNotificationTypeError(
"Notification type '{}' is unsupported".format(
notification.notification_type
)
)
def send_unsent_email_notifications():
"""
Send all notifications that haven't been sent yet
"""
for notification_ids in chunks(
EmailNotification.objects.filter(state=EmailNotification.STATE_PENDING)
.exclude(notification_type=NOTIFICATION_TYPE_FRONTPAGE)
.values_list("id", flat=True),
chunk_size=settings.NOTIFICATION_SEND_CHUNK_SIZE,
):
EmailNotification.objects.filter(id__in=notification_ids).update(
state=EmailNotification.STATE_SENDING
)
tasks.send_email_notification_batch.delay(notification_ids)
for notification_ids in chunks(
EmailNotification.objects.filter(
state=EmailNotification.STATE_PENDING,
notification_type=NOTIFICATION_TYPE_FRONTPAGE,
).values_list("id", flat=True),
chunk_size=settings.NOTIFICATION_SEND_CHUNK_SIZE,
):
EmailNotification.objects.filter(id__in=notification_ids).update(
state=EmailNotification.STATE_SENDING
)
tasks.send_frontpage_email_notification_batch.delay(notification_ids)
def send_email_notification_batch(notification_ids):
"""
Sends a batch of notifications
Args:
notification_ids (list of int): notification ids to send
"""
for notification in EmailNotification.objects.filter(id__in=notification_ids):
try:
notifier = _get_notifier_for_notification(notification)
notifier.send_notification(notification)
except CancelNotificationError:
log.debug("EmailNotification canceled: %s", notification.id)
notification.state = EmailNotification.STATE_CANCELED
notification.save()
except: # pylint: disable=bare-except
log.exception("Error sending notification %s", notification)
def send_comment_notifications(post_id, comment_id, new_comment_id):
"""
Sends notifications for a reply to a given post notification
Args:
post_id (str): base36 post id
comment_id (str): base36 comment id
new_comment_id (str): base36 comment id of the new comment
"""
for subscription in (
Subscription.objects.filter(post_id=post_id)
.filter(Q(comment_id=comment_id) | Q(comment_id=None))
.distinct("user")
.iterator()
):
try:
notification_settings = NotificationSettings.objects.get(
user_id=subscription.user_id,
notification_type=NOTIFICATION_TYPE_COMMENTS,
)
except NotificationSettings.DoesNotExist:
log.exception(
"NotificationSettings didn't exist for subscription %s", subscription.id
)
continue
notifier = comments.CommentNotifier(notification_settings)
notifier.create_comment_event(subscription, new_comment_id)
def send_moderator_notifications(post_id, channel_name):
"""
Sends post notifications to channel moderators
Args:
post_id (str): base36 post id
channel_name (str): channel_name
"""
channel_api = get_admin_api()
for moderator in channel_api.list_moderators(channel_name):
self_user = User.objects.get(username=moderator.name)
try:
notification_setting = NotificationSettings.objects.get(
user=self_user,
notification_type=NOTIFICATION_TYPE_MODERATOR,
channel__name=channel_name,
)
except NotificationSettings.DoesNotExist:
channel = Channel.objects.get(name=channel_name)
notification_setting = NotificationSettings.objects.create(
user=self_user,
notification_type=NOTIFICATION_TYPE_MODERATOR,
channel=channel,
trigger_frequency=FREQUENCY_NEVER,
)
notifier = moderator_posts.ModeratorPostsNotifier(notification_setting)
notifier.create_moderator_post_event(self_user, post_id)
| bsd-3-clause | 3,742,379,848,601,232,400 | 34.293893 | 88 | 0.670704 | false |
datafiniti/Diamond | src/collectors/postgres/postgres.py | 1 | 15406 | # coding=utf-8
"""
Collect metrics from postgresql
#### Dependencies
* psycopg2
"""
import diamond.collector
from diamond.collector import str_to_bool
try:
import psycopg2
import psycopg2.extras
psycopg2 # workaround for pyflakes issue #13
except ImportError:
psycopg2 = None
class PostgresqlCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PostgresqlCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname',
'user': 'Username',
'password': 'Password',
'port': 'Port number',
'underscore': 'Convert _ to .',
'extended': 'Enable collection of extended database stats.',
'metrics': 'List of enabled metrics to collect'
})
return config_help
def get_default_config(self):
"""
Return default config.
"""
config = super(PostgresqlCollector, self).get_default_config()
config.update({
'path': 'postgres',
'host': 'localhost',
'user': 'postgres',
'password': 'postgres',
'port': 5432,
'underscore': False,
'extended': False,
'method': 'Threaded',
'metrics': []
})
return config
def collect(self):
if psycopg2 is None:
self.log.error('Unable to import module psycopg2')
return {}
# Create database-specific connections
self.connections = {}
for db in self._get_db_names():
self.connections[db] = self._connect(database=db)
if self.config['metrics']:
metrics = self.config['metrics']
elif str_to_bool(self.config['extended']):
metrics = registry['extended']
else:
metrics = registry['basic']
# Iterate every QueryStats class
for metric_name in set(metrics):
if metric_name not in metrics_registry:
continue
klass = metrics_registry[metric_name]
stat = klass(self.connections, underscore=self.config['underscore'])
stat.fetch()
for metric, value in stat:
if value is not None:
self.publish(metric, value)
# Cleanup
[conn.close() for conn in self.connections.itervalues()]
def _get_db_names(self):
query = """
SELECT datname FROM pg_database
WHERE datallowconn AND NOT datistemplate
AND NOT datname='postgres' ORDER BY 1
"""
conn = self._connect()
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(query)
datnames = [d['datname'] for d in cursor.fetchall()]
conn.close()
# Exclude `postgres` database list, unless it is the
# only database available (required for querying pg_stat_database)
if not datnames:
datnames = ['postgres']
return datnames
def _connect(self, database=None):
conn_args = {
'host': self.config['host'],
'user': self.config['user'],
'password': self.config['password'],
'port': self.config['port']
}
if database:
conn_args['database'] = database
else:
conn_args['database'] = 'postgres'
conn = psycopg2.connect(**conn_args)
# Avoid using transactions, set isolation level to autocommit
conn.set_isolation_level(0)
return conn
class QueryStats(object):
def __init__(self, conns, parameters=None, underscore=False):
self.connections = conns
self.underscore = underscore
self.parameters = parameters
def _translate_datname(self, db):
if self.underscore:
db = db.replace("_", ".")
return db
def fetch(self):
self.data = list()
for db, conn in self.connections.iteritems():
cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute(self.query, self.parameters)
for row in cursor.fetchall():
# If row is length 2, assume col1, col2 forms key: value
if len(row) == 2:
self.data.append({
'datname': self._translate_datname(db),
'metric': row[0],
'value': row[1],
})
# If row > length 2, assume each column name maps to
# key => value
else:
for key, value in row.iteritems():
if key in ('datname', 'schemaname', 'relname',
'indexrelname',):
continue
self.data.append({
'datname': self._translate_datname(row.get(
'datname', db)),
'schemaname': row.get('schemaname', None),
'relname': row.get('relname', None),
'indexrelname': row.get('indexrelname', None),
'metric': key,
'value': value,
})
# Setting multi_db to True will run this query on all known
# databases. This is bad for queries that hit views like
# pg_database, which are shared across databases.
#
# If multi_db is False, bail early after the first query
# iteration. Otherwise, continue to remaining databases.
if not self.multi_db:
break
def __iter__(self):
for data_point in self.data:
yield (self.path % data_point, data_point['value'])
class DatabaseStats(QueryStats):
"""
Database-level summary stats
"""
path = "database.%(datname)s.%(metric)s"
multi_db = False
query = """
SELECT pg_stat_database.datname as datname,
pg_stat_database.numbackends as numbackends,
pg_stat_database.xact_commit as xact_commit,
pg_stat_database.xact_rollback as xact_rollback,
pg_stat_database.blks_read as blks_read,
pg_stat_database.blks_hit as blks_hit,
pg_stat_database.tup_returned as tup_returned,
pg_stat_database.tup_fetched as tup_fetched,
pg_stat_database.tup_inserted as tup_inserted,
pg_stat_database.tup_updated as tup_updated,
pg_stat_database.tup_deleted as tup_deleted,
pg_database_size(pg_database.datname) AS size
FROM pg_database
JOIN pg_stat_database
ON pg_database.datname = pg_stat_database.datname
WHERE pg_stat_database.datname
NOT IN ('template0','template1','postgres')
"""
class UserTableStats(QueryStats):
path = "%(datname)s.tables.%(schemaname)s.%(relname)s.%(metric)s"
multi_db = True
query = """
SELECT relname,
schemaname,
seq_scan,
seq_tup_read,
idx_scan,
idx_tup_fetch,
n_tup_ins,
n_tup_upd,
n_tup_del,
n_tup_hot_upd,
n_live_tup,
n_dead_tup
FROM pg_stat_user_tables
"""
class UserIndexStats(QueryStats):
path = "%(datname)s.indexes.%(schemaname)s.%(relname)s." \
"%(indexrelname)s.%(metric)s"
multi_db = True
query = """
SELECT relname,
schemaname,
indexrelname,
idx_scan,
idx_tup_read,
idx_tup_fetch
FROM pg_stat_user_indexes
"""
class UserTableIOStats(QueryStats):
path = "%(datname)s.tables.%(schemaname)s.%(relname)s.%(metric)s"
multi_db = True
query = """
SELECT relname,
schemaname,
heap_blks_read,
heap_blks_hit,
idx_blks_read,
idx_blks_hit
FROM pg_statio_user_tables
"""
class UserIndexIOStats(QueryStats):
path = "%(datname)s.indexes.%(schemaname)s.%(relname)s." \
"%(indexrelname)s.%(metric)s"
multi_db = True
query = """
SELECT relname,
schemaname,
indexrelname,
idx_blks_read,
idx_blks_hit
FROM pg_statio_user_indexes
"""
class ConnectionStateStats(QueryStats):
path = "%(datname)s.connections.%(metric)s"
multi_db = True
query = """
SELECT tmp.state AS key,COALESCE(count,0) FROM
(VALUES ('active'),
('waiting'),
('idle'),
('idletransaction'),
('unknown')
) AS tmp(state)
LEFT JOIN
(SELECT CASE WHEN waiting THEN 'waiting'
WHEN current_query='<IDLE>' THEN 'idle'
WHEN current_query='<IDLE> in transaction'
THEN 'idletransaction'
WHEN current_query='<insufficient privilege>'
THEN 'unknown'
ELSE 'active' END AS state,
count(*) AS count
FROM pg_stat_activity
WHERE procpid != pg_backend_pid()
GROUP BY CASE WHEN waiting THEN 'waiting'
WHEN current_query='<IDLE>' THEN 'idle'
WHEN current_query='<IDLE> in transaction'
THEN 'idletransaction'
WHEN current_query='<insufficient privilege>'
THEN 'unknown' ELSE 'active' END
) AS tmp2
ON tmp.state=tmp2.state ORDER BY 1
"""
class LockStats(QueryStats):
path = "%(datname)s.locks.%(metric)s"
multi_db = False
query = """
SELECT lower(mode) AS key,
count(*) AS value
FROM pg_locks
WHERE database IS NOT NULL
GROUP BY mode ORDER BY 1
"""
class RelationSizeStats(QueryStats):
path = "%(datname)s.sizes.%(schemaname)s.%(relname)s.%(metric)s"
multi_db = True
query = """
SELECT pg_class.relname,
pg_namespace.nspname as schemaname,
pg_relation_size(pg_class.oid) as relsize
FROM pg_class
INNER JOIN
pg_namespace
ON pg_namespace.oid = pg_class.relnamespace
WHERE reltype != 0
AND relkind != 'S'
AND nspname NOT IN ('pg_catalog', 'information_schema')
"""
class BackgroundWriterStats(QueryStats):
path = "bgwriter.%(metric)s"
multi_db = False
query = """
SELECT checkpoints_timed,
checkpoints_req,
buffers_checkpoint,
buffers_clean,
maxwritten_clean,
buffers_backend,
buffers_alloc
FROM pg_stat_bgwriter
"""
class WalSegmentStats(QueryStats):
path = "wals.%(metric)s"
multi_db = False
query = """
SELECT count(*) AS segments
FROM pg_ls_dir('pg_xlog') t(fn)
WHERE fn ~ '^[0-9A-Z]{24}\$'
"""
class TransactionCount(QueryStats):
path = "transactions.%(metric)s"
multi_db = False
query = """
SELECT 'commit' AS type,
sum(pg_stat_get_db_xact_commit(oid))
FROM pg_database
UNION ALL
SELECT 'rollback',
sum(pg_stat_get_db_xact_rollback(oid))
FROM pg_database
"""
class IdleInTransactions(QueryStats):
path = "%(datname)s.longest_running.%(metric)s"
multi_db = True
query = """
SELECT 'idle_in_transaction',
max(COALESCE(ROUND(EXTRACT(epoch FROM now()-query_start)),0))
AS idle_in_transaction
FROM pg_stat_activity
WHERE current_query = '<IDLE> in transaction'
GROUP BY 1
"""
class LongestRunningQueries(QueryStats):
path = "%(datname)s.longest_running.%(metric)s"
multi_db = True
query = """
SELECT 'query',
COALESCE(max(extract(epoch FROM CURRENT_TIMESTAMP-query_start)),0)
FROM pg_stat_activity
WHERE current_query NOT LIKE '<IDLE%'
UNION ALL
SELECT 'transaction',
COALESCE(max(extract(epoch FROM CURRENT_TIMESTAMP-xact_start)),0)
FROM pg_stat_activity
WHERE 1=1
"""
class UserConnectionCount(QueryStats):
path = "%(datname)s.user_connections.%(metric)s"
multi_db = True
query = """
SELECT usename,
count(*) as count
FROM pg_stat_activity
WHERE procpid != pg_backend_pid()
GROUP BY usename
ORDER BY 1
"""
class DatabaseConnectionCount(QueryStats):
path = "database.%(metric)s.connections"
multi_db = False
query = """
SELECT datname,
count(datname) as connections
FROM pg_stat_activity
GROUP BY pg_stat_activity.datname
"""
class TableScanStats(QueryStats):
path = "%(datname)s.scans.%(metric)s"
multi_db = True
query = """
SELECT 'relname' AS relname,
COALESCE(sum(seq_scan),0) AS sequential,
COALESCE(sum(idx_scan),0) AS index
FROM pg_stat_user_tables
"""
class TupleAccessStats(QueryStats):
path = "%(datname)s.tuples.%(metric)s"
multi_db = True
query = """
SELECT COALESCE(sum(seq_tup_read),0) AS seqread,
COALESCE(sum(idx_tup_fetch),0) AS idxfetch,
COALESCE(sum(n_tup_ins),0) AS inserted,
COALESCE(sum(n_tup_upd),0) AS updated,
COALESCE(sum(n_tup_del),0) AS deleted,
COALESCE(sum(n_tup_hot_upd),0) AS hotupdated
FROM pg_stat_user_tables
"""
metrics_registry = {
'DatabaseStats': DatabaseStats,
'DatabaseConnectionCount': DatabaseConnectionCount,
'UserTableStats': UserTableStats,
'UserIndexStats': UserIndexStats,
'UserTableIOStats': UserTableIOStats,
'UserIndexIOStats': UserIndexIOStats,
'ConnectionStateStats': ConnectionStateStats,
'LockStats': LockStats,
'RelationSizeStats': RelationSizeStats,
'BackgroundWriterStats': BackgroundWriterStats,
'WalSegmentStats': WalSegmentStats,
'TransactionCount': TransactionCount,
'IdleInTransactions': IdleInTransactions,
'LongestRunningQueries': LongestRunningQueries,
'UserConnectionCount': UserConnectionCount,
'TableScanStats': TableScanStats,
'TupleAccessStats': TupleAccessStats,
}
registry = {
'basic': (
'DatabaseStats',
'DatabaseConnectionCount',
),
'extended': (
'DatabaseStats',
'DatabaseConnectionCount',
'UserTableStats',
'UserIndexStats',
'UserTableIOStats',
'UserIndexIOStats',
'ConnectionStateStats',
'LockStats',
'RelationSizeStats',
'BackgroundWriterStats',
'WalSegmentStats',
'TransactionCount',
'IdleInTransactions',
'LongestRunningQueries',
'UserConnectionCount',
'TableScanStats',
'TupleAccessStats',
),
}
| mit | 6,480,879,459,344,446,000 | 29.997988 | 80 | 0.541867 | false |
kosqx/srcnip | tests/tests.py | 1 | 10084 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import random
import unittest
from srcnip.languages import languages
from srcnip.storage import Snippet, MemoryStorage, parse_timediff
from srcnip.parser import parse, simplify, parse_tags, ParseError, LexerError, SyntaxError
class LanguagesTestCase(unittest.TestCase):
def testGetItemPython(self):
py = languages['py']
self.assertEquals(py.name, 'Python')
self.assertEquals(py.code, 'python')
self.assertTrue('python' in py.codes)
self.assertTrue('py' in py.codes)
self.assertTrue('.py' in py.codes)
self.assertTrue('.pyw' in py.codes)
def testGetItemUnexisting(self):
def do_get():
return languages['unexisting']
self.assertRaises(KeyError, do_get)
def testContains(self):
self.assertTrue('Python' in languages)
self.assertTrue('python' in languages)
self.assertTrue('py' in languages)
self.assertTrue('.py' in languages)
self.assertFalse('Foo' in languages)
self.assertFalse(None in languages)
def testNames(self):
names = languages.get_names()
self.assertTrue('Python' in names)
self.assertTrue('JavaScript' in names)
self.assertFalse('python' in names)
self.assertFalse('javascript' in names)
def testNamesSort(self):
names = languages.get_names()
self.assertEquals(names, sorted(names))
class SnippetTestCase(unittest.TestCase):
def testAttr(self):
sn = Snippet('a = "żółw"', 'py var', 'py')
self.assertEquals(sn.id, None)
self.assertEquals(sn.code, u'a = "żółw"')
self.assertEquals(sn.tags, set([u'py', u'var']))
self.assertEquals(sn.lang, u'python')
def testTags(self):
self.assertEquals(Snippet('', '', 'py').tags, set())
self.assertEquals(Snippet('', u'', 'py').tags, set())
self.assertEquals(Snippet('', (), 'py').tags, set())
self.assertEquals(Snippet('', [], 'py').tags, set())
self.assertEquals(Snippet('', set(), 'py').tags, set())
self.assertEquals(Snippet('', 'py var', 'py').tags, set([u'py', u'var']))
self.assertEquals(Snippet('', u'py var', 'py').tags, set([u'py', u'var']))
self.assertEquals(Snippet('', ['py', 'var'], 'py').tags, set([u'py', u'var']))
self.assertEquals(Snippet('', [u'py', u'var'], 'py').tags, set([u'py', u'var']))
self.assertEquals(Snippet('', ('py', 'var'), 'py').tags, set([u'py', u'var']))
self.assertEquals(Snippet('', (u'py', u'var'), 'py').tags, set([u'py', u'var']))
self.assertEquals(Snippet('', set(['py', 'var']), 'py').tags, set([u'py', u'var']))
self.assertEquals(Snippet('', set([u'py', u'var']), 'py').tags, set([u'py', u'var']))
def testLang(self):
self.assertEquals(Snippet('', '', 'Python').lang, 'python')
self.assertEquals(Snippet('', '', 'python').lang, 'python')
self.assertEquals(Snippet('', '', 'py' ).lang, 'python')
self.assertEquals(Snippet('', '', '.py' ).lang, 'python')
self.assertEquals(Snippet('', '', '' ).lang, None)
self.assertEquals(Snippet('', '', None ).lang, None)
class ParserTestCase(unittest.TestCase):
# TODO: more test: exceptions, more complex expresons
def testAtom(self):
self.assertEquals(parse('*'), ('all', ))
self.assertEquals(parse(' '), ('none', ))
self.assertEquals(parse('foo'), ('tag', 'foo'))
self.assertEquals(parse('*oo'), ('ltag', 'oo'))
self.assertEquals(parse('fo*'), ('rtag', 'fo'))
self.assertEquals(parse('*o*'), ('btag', 'o'))
self.assertEquals(parse('"foo"'), ('text', 'foo'))
self.assertEquals(parse('"foo\\"bar\\"baz"'), ('text', 'foo"bar"baz'))
self.assertEquals(parse('/foo/'), ('regexp', 'foo'))
self.assertEquals(parse('/foo\\/bar/'), ('regexp', 'foo/bar'))
self.assertEquals(parse('.foo'), ('ext', '.foo'))
self.assertEquals(parse('foo:bar'), ('foo', 'bar'))
def testSpace(self):
self.assertEquals(parse('foo'), ('tag', 'foo'))
self.assertEquals(parse('\t \tfoo'), ('tag', 'foo'))
self.assertEquals(parse('foo \t'), ('tag', 'foo'))
self.assertEquals(parse(' \tfoo\t '), ('tag', 'foo'))
def testNot(self):
self.assertEquals(parse('!foo'), ('not', ('tag', 'foo')))
self.assertEquals(parse('NOT foo'), ('not', ('tag', 'foo')))
self.assertEquals(parse('!NOT foo'), ('not', ('not', ('tag', 'foo'))))
def testAnd(self):
self.assertEquals(parse('foo.bar'), ('and', ('tag', 'foo'), ('ext', '.bar')))
self.assertEquals(parse('foo bar'), ('and', ('tag', 'foo'), ('tag', 'bar')))
self.assertEquals(parse('foo bar baz'), ('and', ('tag', 'foo'), ('tag', 'bar'), ('tag', 'baz')))
self.assertEquals(parse('a b c d'), ('and', ('tag', 'a'), ('tag', 'b'), ('tag', 'c'), ('tag', 'd')))
self.assertEquals(parse('a AND b & c && d'), ('and', ('tag', 'a'), ('tag', 'b'), ('tag', 'c'), ('tag', 'd')))
self.assertEquals(parse('a OR b | c || d'), ('or', ('tag', 'a'), ('tag', 'b'), ('tag', 'c'), ('tag', 'd')))
def testOr(self):
self.assertEquals(parse('a b OR c d'), ('or', ('and', ('tag', 'a'), ('tag', 'b')), ('and', ('tag', 'c'), ('tag', 'd'))))
self.assertEquals(parse('a b | c d'), ('or', ('and', ('tag', 'a'), ('tag', 'b')), ('and', ('tag', 'c'), ('tag', 'd'))))
self.assertEquals(parse('a b || c d'), ('or', ('and', ('tag', 'a'), ('tag', 'b')), ('and', ('tag', 'c'), ('tag', 'd'))))
self.assertEquals(parse('a b ||| c d'), ('or', ('and', ('tag', 'a'), ('tag', 'b')), ('and', ('tag', 'c'), ('tag', 'd'))))
def testParens(self):
self.assertEquals(parse('(a) (b) (c) (d)'), ('and', ('tag', 'a'), ('tag', 'b'), ('tag', 'c'), ('tag', 'd')))
self.assertEquals(parse('(a b c d)'), ('and', ('tag', 'a'), ('tag', 'b'), ('tag', 'c'), ('tag', 'd')))
self.assertEquals(parse('(a b) (c d)'), ('and', ('and', ('tag', 'a'), ('tag', 'b')), ('and', ('tag', 'c'), ('tag', 'd'))))
self.assertEquals(parse('(a b | c) d'), ('and', ('or', ('and', ('tag', 'a'), ('tag', 'b')), ('tag', 'c')), ('tag', 'd')))
self.assertEquals(parse('a (b | c) d'), ('and', ('tag', 'a'), ('or', ('tag', 'b'), ('tag', 'c')), ('tag', 'd')))
self.assertEquals(parse('a (b | c d)'), ('and', ('tag', 'a'), ('or', ('tag', 'b'), ('and', ('tag', 'c'), ('tag', 'd')))))
def testParseTags(self):
self.assertEquals(parse_tags('a b c'), (set(['a', 'b', 'c']), None))
self.assertEquals(parse_tags('foo bar'), (set(['foo', 'bar']), None))
self.assertEquals(parse_tags('foo, bar; baz'), (set(['foo', 'bar', 'baz']), None))
self.assertEquals(parse_tags('foo#bar$baz'), (set(['foo', 'bar', 'baz']), None))
self.assertEquals(parse_tags('foo bar .baz'), (set(['foo', 'bar']), '.baz'))
self.assertEquals(parse_tags('foo bar lang:.baz'), (set(['foo', 'bar']), '.baz'))
self.assertEquals(parse_tags('foo bar lang:baz'), (set(['foo', 'bar']), 'baz'))
self.assertEquals(parse_tags('foo .bar .baz'), (set(['foo']), '.baz'))
self.assertEquals(parse_tags('foo.bar.baz'), (set(['foo']), '.baz'))
self.assertEquals(parse_tags('foo .bar baz'), (set(['foo', 'baz']), '.bar'))
def random_snippet():
atags = 'foo bar baz code block function start setup'.split()
langs = 'c cpp java python js ruby html'.split()
words = 'self this assert ( ) [ ] . -> # + - * / ^ && || < <= >= > if for while'.split() + ['\n', '\t', ' ']
lang = random.choice(langs)
tags = [random.choice(atags) for i in xrange(random.randint(0, 4))]
code = ' '.join([random.choice(atags) for i in xrange(random.randint(10, 100))])
return Snippet(code, tags, lang)
class UtilsTestCase(unittest.TestCase):
def testParseTimediff(self):
self.assertEquals(parse_timediff('123s'), 123.0)
self.assertEquals(parse_timediff('12m'), 720.0)
self.assertEquals(parse_timediff('1h'), 3600.0)
self.assertEquals(parse_timediff('12.3s'), 12.3)
self.assertEquals(parse_timediff('1.2m'), 72.0)
self.assertEquals(parse_timediff('.1h'), 360.0)
self.assertEquals(parse_timediff('0'), 0)
self.assertEquals(parse_timediff('1'), 60 * 60 * 24.0)
self.assertEquals(parse_timediff('s1'), None)
self.assertEquals(parse_timediff('1x'), None)
self.assertEquals(parse_timediff(''), None)
self.assertEquals(parse_timediff('foo'), None)
class StorageTestCase(unittest.TestCase):
def setUp(self):
self.storage = MemoryStorage()
def testAttr(self):
for i in xrange(100):
random_snippet()
#self.storage.save(random_snippet())
def testAttr2(self):
pass
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 5,710,369,025,244,942,000 | 48.64532 | 145 | 0.483528 | false |
natethedrummer/Bail | release/ModelDiagnostics.py | 1 | 3387 | # import packages
import matplotlib; matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.feature_selection import chi2
from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_score, f1_score, mean_squared_error, accuracy_score
# report coefficients
def coef(model, X, X_train, y_train):
df_coef = pd.DataFrame(list(zip(X.columns, np.transpose(model.coef_))))
score, pvalues = chi2(X_train, y_train)
df_coef['p-value'] = pd.DataFrame(list(zip(np.transpose(pvalues))))
df_coef = df_coef.rename(columns = {0:'feature', 1:'coefficient'})
df_coef['coefficient'] = df_coef['coefficient'].str[0]
# intercept
df_intercept = pd.DataFrame(data=model.intercept_,
index=[0],
columns=['coefficient'])
df_intercept['feature'] = 'Intercept'
df_intercept = df_intercept[['feature', 'coefficient']]
df_coef.update(df_intercept)
df_coef['intercept'] = df_coef.iloc[0,1]
df_coef = df_coef[df_coef['feature'] != 'Intercept']
df_coef['log_odds'] = df_coef['intercept'] + df_coef['coefficient']
df_coef['odds'] = np.exp(df_coef['log_odds'])
df_coef['probability'] = df_coef['odds'] / (1 + df_coef['odds'])
df_coef.sort_values('probability', ascending=False, inplace=True)
return df_coef
# report predictions
def pred(model, X, y, df_offenses):
df_pred = X
df_pred['predicted'] = model.predict(X)
df_pred['actual'] = y
df_pred['spn'] = df_offenses['SPN']
return df_pred
# report accuracy
def accuracy(model, X_test, y_test):
accuracy_model = model.score(X_test, y_test)
accuracy_baseline = 1-y_test.mean()
accuracy_change = accuracy_model - accuracy_baseline
df_accuracy = pd.DataFrame({'Baseline Accuracy': [accuracy_baseline],
'Model Accuracy': [accuracy_model],
'Change in Accuracy': [accuracy_change]})
df_accuracy['Baseline Accuracy'] = round(df_accuracy['Baseline Accuracy'],2)
df_accuracy['Model Accuracy'] = round(df_accuracy['Model Accuracy'],2)
df_accuracy['Change in Accuracy'] = round(df_accuracy['Change in Accuracy'],2)
# ROC
y_true = y_test
y_pred = model.predict(X_test)
df_accuracy['roc_auc_score'] = round(
roc_auc_score(y_true, y_pred)
,2)
fpr, tpr, threshold = roc_curve(y_true, y_pred)
roc_auc = auc(fpr, tpr)
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.savefig('plot_roc.png')
# precision score
df_accuracy['precision_score'] = round(
precision_score(y_true, y_pred)
,2)
# f1 score
df_accuracy['f1_score'] = round(
f1_score(y_true, y_pred)
,2)
# mean squared error
df_accuracy['mean_squared_error'] = round(
mean_squared_error(y_true, y_pred)
,2)
# accuracy score
df_accuracy['accuracy_score'] = round(
accuracy_score(y_true, y_pred)
,2)
return df_accuracy
| mit | -5,627,800,905,781,793,000 | 29.241071 | 120 | 0.598169 | false |
jaredscarr/django-imager | imagersite/imagersite/settings.py | 1 | 4051 | """
Django settings for imagersite project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# from configurations import Configuration
# class Dev(Configuration):
# DEBUG = True
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'not the secret')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DJANGO_DEBUG')
THUMBNAIL_DEBUG = os.environ.get('DJANGO_DEBUG')
ALLOWED_HOSTS = [
'.us-west-2.compute.amazonaws.com',
'localhost',
]
# Application definition
INSTALLED_APPS = [
'sorl.thumbnail',
'imager_images',
'imager_profile',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'imagersite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'imagersite', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'imagersite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default=os.environ.get('DATABASE_URL')
)
}
# caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'thumbnails',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# security
# CSRF_COOKIE_SECURE = True
# SESSION_COOKIE_SECURE = True
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'imagersite', 'static'), ]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# activation
ACCOUNT_ACTIVATION_DAYS = 7
# console backend
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| mit | 2,117,890,535,857,599,700 | 24.639241 | 91 | 0.691681 | false |
xswxm/nrf24-injection | Experiments/Scripts/ping.py | 1 | 1628 | #!/usr/bin/env python2
'''
Author: xswxm
Blog: xswxm.com
This script will measure the successful pings per seconds.
e.g.: sudo python ping.py -l -a 61:8E:9C:CD:03 -f 74 -t 0 -r 0
'''
import sys, time, threading
from lib import common
common.init_args('./ping.py')
common.parser.add_argument('-a', '--address', type=str, help='Address to sniff, following as it changes channels', required=True)
common.parser.add_argument('-f', '--channel', type=int, help='RF channel', default=0)
common.parse_and_init()
channel = common.args.channel
# Total number of payloads sent
count = 0
# Parse the prefix address
address = common.args.address.replace(':', '').decode('hex')[::-1][:5]
# Put the radio in sniffer mode (ESB w/o auto ACKs)
common.radio.enter_sniffer_mode(address)
# Set channel
common.radio.set_channel(channel)
stop_event = threading.Event()
stop_event.set()
# Update per milliseconds
def display():
global count, stop_event
# To record the number of payloads sent for every 100 milliseconds
pings = [0]*10
# Highest rate
max_rate = 0
while stop_event.isSet():
pings = pings[1:] + [count]
rate = pings[-1] - pings[0]
if max_rate < rate: max_rate = rate
msg = 'Maximum Rate: {0:>4}pks/s Current Rate: {1:>4}pks/s'.format(max_rate, rate)
sys.stdout.write('\r'+msg)
sys.stdout.flush()
time.sleep(0.1)
if __name__ == "__main__":
t = threading.Thread(target=display,args=())
t.start()
try:
while True:
if common.radio.transmit_payload(common.ping_payload, common.ack_timeout, common.retries):
count += 1
except KeyboardInterrupt:
stop_event.clear() | gpl-3.0 | 6,024,926,676,848,217,000 | 28.089286 | 129 | 0.677518 | false |
REGOVAR/Regovar | regovar/core/managers/pipeline_manager.py | 1 | 12599 | #!env/python3
# coding: utf-8
try:
import ipdb
except ImportError:
pass
import os
import shutil
import json
import zipfile
import datetime
import time
import uuid
import subprocess
import requests
from config import *
from core.framework.common import *
from core.framework.postgresql import execute
from core.model import *
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# PIPELINE MANAGER
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class PipelineManager:
def __init__(self):
pass
def list(self):
"""
List all pipelines with minimum of data
"""
sql = "SELECT id, name, type, status, description, version, image_file_id, starred, installation_date, manifest, documents FROM pipeline ORDER BY id"
result = []
for res in execute(sql):
result.append({
"id": res.id,
"name": res.name,
"description": res.description,
"type": res.type,
"status": res.status,
"version": res.version,
"image_file_id": res.image_file_id,
"starred": res.starred,
"installation_date": res.installation_date.isoformat(),
"manifest": res.manifest,
"documents": res.documents
})
return result
def get(self, fields=None, query=None, order=None, offset=None, limit=None, depth=0):
"""
Generic method to get pipelines according provided filtering options
"""
if not isinstance(fields, dict):
fields = None
if query is None:
query = {}
if order is None:
order = "name, installation_date desc"
if offset is None:
offset = 0
if limit is None:
limit = RANGE_MAX
pipes = Session().query(Pipeline).filter_by(**query).order_by(order).limit(limit).offset(offset).all()
for p in pipes: p.init(depth)
return pipes
def install_init (self, name, metadata={}):
pipe = Pipeline.new()
pipe.name = name
pipe.status = "initializing"
pipe.save()
if metadata and len(metadata) > 0:
pipe.load(metadata)
log('core.PipeManager.register : New pipe registered with the id {}'.format(pipe.id))
return pipe
def install_init_image_upload(self, filepath, file_size, pipe_metadata={}):
"""
Initialise a pipeline installation.
To use if the image have to be uploaded on the server.
Create an entry for the pipeline and the file (image that will be uploaded) in the database.
Return the Pipeline and the File objects created
This method shall be used to init a resumable upload of a pipeline
(the pipeline/image are not yet installed and available, but we need to manipulate them)
"""
from core.core import core
pfile = core.files.upload_init(filepath, file_size)
pipe = self.install_init(filepath, pipe_metadata)
pipe.image_file_id = pfile.id
pipe.save()
return pipe, pfile
async def install_init_image_url(self, url, pipe_metadata={}):
"""
Initialise a pipeline installation.
To use if the image have to be retrieved via an url.
Create an entry for the pipeline and the file (image) in the database.
Async method as the download start immediatly, followed by the installation when it's done
Return the Pipeline object ready to be used
"""
raise NotImplementedError("TODO")
def install_init_image_local(self, filepath, move=False, pipe_metadata={}):
"""
Initialise a pipeline installation.
To use if the image have to be retrieved on the local server.
Create an entry for the pipeline and the file (image) in the database.
Copy the local file into dedicated directory and start the installation of the Pipeline
Return the Pipeline object ready to be used
"""
from core.core import core
pfile = core.files.from_local(filepath, move)
pipe = self.install_init(os.path.basename(filepath), pipe_metadata)
# FIXME: Sometime getting sqlalchemy error 'is not bound to a Session'
# why it occure here ... why sometime :/
check_session(pfile)
check_session(pipe)
pipe.image_file_id = pfile.id
pipe.save()
return pipe
def install_init_image(self, file_id, pipe_metadata={}):
"""
Initialise a pipeline installation.
To use if the image have already been uploaded the local server via the regovar file api.
Create an entry for the pipeline in the database.
Return the Pipeline object ready to be used
"""
from core.core import core
pfile = File.from_id(file_id)
if pfile:
pipe = self.install_init(os.path.basename(pfile.path), pipe_metadata)
pipe.image_file_id = file_id
pipe.save()
return pipe
return None
def check_manifest(self, manifest):
"""
Check that manifest (json) is valid and return the full version completed
with default values if needed
"""
missing = ""
for k in ["name", "version"]:
if k not in manifest.keys():
missing += k + ", "
if missing != "":
missing = missing[:-2]
raise RegovarException("FAILLED Checking validity of manifest (missing : {})".format(missing))
# 2- Default value for optional fields in mandatory file
default = {
"description": "",
"type": "job",
"contacts": [],
"regovar_db_access": False,
"inputs": "/pipeline/inputs",
"outputs": "/pipeline/outputs",
"databases": "/pipeline/databases",
"logs": "/pipeline/logs"
}
for k in default.keys():
if k not in manifest.keys():
manifest[k] = default[k]
# 3- check type
if manifest["type"] not in ["job", "importer", "exporter", "reporter"]:
raise RegovarException("FAILLED Checking validity of manifest (type '{}' not supported)".format(manifest["type"]))
log('Validity of manifest checked')
return manifest
def install(self, pipeline_id, asynch=True):
"""
Start the installation of the pipeline. (done in another thread)
The initialization shall be done (image ready to be used)
"""
from core.core import core
pipeline = Pipeline.from_id(pipeline_id, 1)
if not pipeline :
raise RegovarException("Pipeline not found (id={}).".format(pipeline_id))
if pipeline.status != "initializing":
raise RegovarException("Pipeline status ({}) is not \"initializing\". Cannot perform another installation.".format(pipeline.status))
if pipeline.image_file and pipeline.image_file.status not in ["uploaded", "checked"]:
raise RegovarException("Wrong pipeline image (status={}).".format(pipeline.image_file.status))
if not pipeline.image_file or pipeline.image_file.status in ["uploaded", "checked"]:
if asynch:
run_async(self.__install, pipeline)
else:
pipeline = self.__install(pipeline)
return pipeline
def __install(self, pipeline):
from core.core import core
# Dezip pirus package in the pirus pipeline directory
root_path = os.path.join(PIPELINES_DIR, str(pipeline.id))
log('Installation of the pipeline package : ' + root_path)
os.makedirs(root_path)
os.chmod(pipeline.image_file.path, 0o777)
# TODO: Check zip integrity and security before extracting it
# see python zipfile official doc
with zipfile.ZipFile(pipeline.image_file.path,"r") as zip_ref:
zip_ref.extractall(root_path)
# check package tree
# find root folder
files = [i.filename for i in zip_ref.infolist()]
for f in files:
if f.endswith("manifest.json"): break
zip_root = os.path.dirname(f)
# remove intermediate folder
if zip_root != "":
zip_root = os.path.join(root_path, zip_root)
for filename in os.listdir(zip_root):
shutil.move(os.path.join(zip_root, filename), os.path.join(root_path, filename))
os.rmdir(zip_root)
# Load manifest
try:
log(os.path.join(root_path, "manifest.json"))
with open(os.path.join(root_path, "manifest.json"), "r") as f:
data = f.read()
log(data)
# Fix common parsing problem regarding json syntaxe
data = data.replace("False", "false")
data = data.replace("True", "true")
manifest = json.loads(data)
manifest = self.check_manifest(manifest)
pipeline.developpers = manifest.pop("contacts")
pipeline.manifest = manifest
# list documents available
pipeline.documents = {
"about": os.path.join(root_path, "doc/about.html"),
"help": os.path.join(root_path, "doc/help.html"),
"icon": os.path.join(root_path, "doc/icon.png"),
"icon2": os.path.join(root_path, "doc/icon.jpg"),
"form": os.path.join(root_path, "form.json"),
"license":os.path.join(root_path, "LICENSE"),
"readme": os.path.join(root_path, "README")
}
for k in pipeline.documents.keys():
if not os.path.exists(pipeline.documents[k]):
pipeline.documents[k] = None
p = pipeline.documents.pop("icon2")
if not pipeline.documents["icon"]:
pipeline.documents["icon"] = p
pipeline.load(manifest)
pipeline.save()
except Exception as ex:
pipeline.status = "error"
pipeline.save()
raise RegovarException("Unable to open and read manifest.json. The pipeline package is wrong or corrupt.", exception=ex)
# Update and save pipeline status
pipeline.type = manifest["type"]
pipeline.installation_date = datetime.datetime.now()
pipeline.status = "installing"
pipeline.save()
# Install pipeline
result = core.container_manager.install_pipeline(pipeline)
return result
def delete(self, pipeline_id, asynch=True):
"""
Start the uninstallation of the pipeline. (done in another thread)
Remove image file if exists.
"""
from core.core import core
result = None
pipeline = Pipeline.from_id(pipeline_id, 1)
if pipeline:
result = pipeline.to_json()
# Clean container
try:
if asynch:
run_async(self.__delete, pipeline)
else:
self.__delete(pipeline)
except Exception as ex:
war("core.PipelineManager.delete : Container manager failed to delete the container with id {}.".format(pipeline.id))
try:
# Clean filesystem
shutil.rmtree(pipeline.path, True)
# Clean DB
core.files.delete(pipeline.image_file_id)
Pipeline.delete(pipeline.id)
except Exception as ex:
raise RegovarException("core.PipelineManager.delete : Unable to delete the pipeline's pirus data for the pipeline {}.".format(pipeline.id), ex)
return result
def __delete(self, pipeline):
from core.core import core
try:
core.container_manager.uninstall_pipeline(pipeline)
except Exception as ex:
raise RegovarException("Error occured during uninstallation of the pipeline. Uninstallation aborded.", ex)
| agpl-3.0 | -2,622,789,942,192,497,000 | 35.625 | 159 | 0.560124 | false |
asanfilippo7/osf.io | website/addons/wiki/model.py | 1 | 8106 | # -*- coding: utf-8 -*-
import datetime
import functools
import logging
from bleach import linkify
from bleach.callbacks import nofollow
from website.models import NodeLog
import markdown
from markdown.extensions import codehilite, fenced_code, wikilinks
from modularodm import fields
from framework.forms.utils import sanitize
from framework.guid.model import GuidStoredObject
from framework.mongo import utils as mongo_utils
from website import settings
from website.addons.base import AddonNodeSettingsBase
from website.addons.wiki import utils as wiki_utils
from website.addons.wiki.settings import WIKI_CHANGE_DATE
from website.project.commentable import Commentable
from website.project.signals import write_permissions_revoked
from website.exceptions import NodeStateError
from .exceptions import (
NameEmptyError,
NameInvalidError,
NameMaximumLengthError,
)
logger = logging.getLogger(__name__)
class AddonWikiNodeSettings(AddonNodeSettingsBase):
complete = True
has_auth = True
is_publicly_editable = fields.BooleanField(default=False, index=True)
def set_editing(self, permissions, auth=None, log=False):
"""Set the editing permissions for this node.
:param auth: All the auth information including user, API key
:param bool permissions: True = publicly editable
:param bool save: Whether to save the privacy change
:param bool log: Whether to add a NodeLog for the privacy change
if true the node object is also saved
"""
node = self.owner
if permissions and not self.is_publicly_editable:
if node.is_public:
self.is_publicly_editable = True
else:
raise NodeStateError('Private components cannot be made publicly editable.')
elif not permissions and self.is_publicly_editable:
self.is_publicly_editable = False
else:
raise NodeStateError('Desired permission change is the same as current setting.')
if log:
node.add_log(
action=(NodeLog.MADE_WIKI_PUBLIC
if self.is_publicly_editable
else NodeLog.MADE_WIKI_PRIVATE),
params={
'project': node.parent_id,
'node': node._primary_key,
},
auth=auth,
save=False,
)
node.save()
self.save()
def after_register(self, node, registration, user, save=True):
"""Copy wiki settings to registrations."""
clone = self.clone()
clone.owner = registration
if save:
clone.save()
return clone, None
def after_set_privacy(self, node, permissions):
"""
:param Node node:
:param str permissions:
:return str: Alert message
"""
if permissions == 'private':
if self.is_publicly_editable:
self.set_editing(permissions=False, log=False)
return (
'The wiki of {name} is now only editable by write contributors.'.format(
name=node.title,
)
)
def to_json(self, user):
return {}
@write_permissions_revoked.connect
def subscribe_on_write_permissions_revoked(node):
# Migrate every page on the node
for wiki_name in node.wiki_private_uuids:
wiki_utils.migrate_uuid(node, wiki_name)
def build_wiki_url(node, label, base, end):
return '/{pid}/wiki/{wname}/'.format(pid=node._id, wname=label)
def validate_page_name(value):
value = (value or '').strip()
if not value:
raise NameEmptyError('Page name cannot be blank.')
if value.find('/') != -1:
raise NameInvalidError('Page name cannot contain forward slashes.')
if len(value) > 100:
raise NameMaximumLengthError('Page name cannot be greater than 100 characters.')
return True
def render_content(content, node):
html_output = markdown.markdown(
content,
extensions=[
wikilinks.WikiLinkExtension(
configs=[
('base_url', ''),
('end_url', ''),
('build_url', functools.partial(build_wiki_url, node))
]
),
fenced_code.FencedCodeExtension(),
codehilite.CodeHiliteExtension(
[('css_class', 'highlight')]
)
]
)
# linkify gets called after santize, because we're adding rel="nofollow"
# to <a> elements - but don't want to allow them for other elements.
sanitized_content = sanitize(html_output, **settings.WIKI_WHITELIST)
return sanitized_content
class NodeWikiPage(GuidStoredObject, Commentable):
_id = fields.StringField(primary=True)
page_name = fields.StringField(validate=validate_page_name)
version = fields.IntegerField()
date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
is_current = fields.BooleanField()
content = fields.StringField(default='')
user = fields.ForeignField('user')
node = fields.ForeignField('node')
@property
def deep_url(self):
return '{}wiki/{}/'.format(self.node.deep_url, self.page_name)
@property
def url(self):
return '{}wiki/{}/'.format(self.node.url, self.page_name)
@property
def rendered_before_update(self):
return self.date < WIKI_CHANGE_DATE
# For Comment API compatibility
@property
def target_type(self):
"""The object "type" used in the OSF v2 API."""
return 'wiki'
@property
def root_target_page(self):
"""The comment page type associated with NodeWikiPages."""
return 'wiki'
@property
def is_deleted(self):
key = mongo_utils.to_mongo_key(self.page_name)
return key not in self.node.wiki_pages_current
def belongs_to_node(self, node_id):
"""Check whether the wiki is attached to the specified node."""
return self.node._id == node_id
def get_extra_log_params(self, comment):
return {'wiki': {'name': self.page_name, 'url': comment.get_comment_page_url()}}
# used by django and DRF - use v1 url since there are no v2 wiki routes
def get_absolute_url(self):
return '{}wiki/{}/'.format(self.node.absolute_url, self.page_name)
def html(self, node):
"""The cleaned HTML of the page"""
sanitized_content = render_content(self.content, node=node)
try:
return linkify(
sanitized_content,
[nofollow, ],
)
except TypeError:
logger.warning('Returning unlinkified content.')
return sanitized_content
def raw_text(self, node):
""" The raw text of the page, suitable for using in a test search"""
return sanitize(self.html(node), tags=[], strip=True)
def get_draft(self, node):
"""
Return most recently edited version of wiki, whether that is the
last saved version or the most recent sharejs draft.
"""
db = wiki_utils.share_db()
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, self.page_name)
doc_item = db['docs'].find_one({'_id': sharejs_uuid})
if doc_item:
sharejs_version = doc_item['_v']
sharejs_timestamp = doc_item['_m']['mtime']
sharejs_timestamp /= 1000 # Convert to appropriate units
sharejs_date = datetime.datetime.utcfromtimestamp(sharejs_timestamp)
if sharejs_version > 1 and sharejs_date > self.date:
return doc_item['_data']
return self.content
def save(self, *args, **kwargs):
rv = super(NodeWikiPage, self).save(*args, **kwargs)
if self.node:
self.node.update_search()
return rv
def rename(self, new_name, save=True):
self.page_name = new_name
if save:
self.save()
def to_json(self):
return {}
| apache-2.0 | 2,979,544,810,616,633,300 | 30.297297 | 93 | 0.613003 | false |
FedoraScientific/salome-kernel | bin/appliskel/salome_tester/salome_instance.py | 1 | 3184 | # Copyright (C) 2015-2016 CEA/DEN, EDF R&D, OPEN CASCADE
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
import sys
import os
# Example of args:
# args=["--gui", "--show-desktop=1", "--splash=0"]
# args=["--terminal","--modules=MED,PARAVIS,GUI"]
class SalomeInstance(object):
def __init__(self):
self.port = None
#
def get_port(self):
return self.port
#
@staticmethod
def start(shutdown_servers=False, with_gui=False, args=[]):
import tempfile
log = tempfile.NamedTemporaryFile(suffix='_nsport.log', delete=False)
log.close()
instance_args = [
"--ns-port-log=%s"%log.name,
"--shutdown-servers=%d"%shutdown_servers
] + args
salome_instance = SalomeInstance()
salome_instance.__run(args=instance_args, with_gui=with_gui)
with open(log.name) as f:
salome_instance.port = int(f.readline())
os.remove(log.name)
return salome_instance
#
def __run(self, args=None, with_gui=False):
if args is None:
args = []
sys.argv = ['runSalome'] + args
if with_gui:
# :WARNING: NOT TESTED YET
sys.argv += ["--gui"]
sys.argv += ["--show-desktop=1"]
sys.argv += ["--splash=0"]
#sys.argv += ["--standalone=study"]
#sys.argv += ["--embedded=SalomeAppEngine,cppContainer,registry,moduleCatalog"]
else:
sys.argv += ["--terminal"]
#sys.argv += ["--shutdown-servers=1"]
#sys.argv += ["--modules=MED,PARAVIS,GUI"]
pass
import setenv
setenv.main(True)
import runSalome
runSalome.runSalome()
if not with_gui:
import salome
salome.salome_init()
session_server = salome.naming_service.Resolve('/Kernel/Session')
if session_server:
session_server.emitMessage("connect_to_study")
session_server.emitMessage("activate_viewer/ParaView")
pass
#
def stop(self):
from multiprocessing import Process
from killSalomeWithPort import killMyPort
import tempfile
with tempfile.NamedTemporaryFile():
p = Process(target = killMyPort, args=(self.port,))
p.start()
p.join()
pass
#
#
if __name__ == "__main__":
print "##### Start instance..."
salome_instance = SalomeInstance.start()
port = salome_instance.get_port()
print "##### ...instance started on port %s"%port
print "##### Terminate instance running on port %s"%port
salome_instance.stop()
#
| lgpl-2.1 | -4,801,765,214,694,821,000 | 27.428571 | 85 | 0.652952 | false |
dtroyer/python-openstacksdk | openstack/network/v2/router.py | 1 | 5820 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network import network_service
from openstack.network.v2 import tag
from openstack import resource
from openstack import utils
class Router(resource.Resource, tag.TagMixin):
resource_key = 'router'
resources_key = 'routers'
base_path = '/routers'
service = network_service.NetworkService()
# capabilities
allow_create = True
allow_get = True
allow_update = True
allow_delete = True
allow_list = True
# NOTE: We don't support query on datetime, list or dict fields
_query_mapping = resource.QueryParameters(
'description', 'flavor_id', 'name', 'status',
is_admin_state_up='admin_state_up',
is_distributed='distributed',
is_ha='ha',
project_id='tenant_id',
**tag.TagMixin._tag_query_parameters
)
# Properties
#: Availability zone hints to use when scheduling the router.
#: *Type: list of availability zone names*
availability_zone_hints = resource.Body('availability_zone_hints',
type=list)
#: Availability zones for the router.
#: *Type: list of availability zone names*
availability_zones = resource.Body('availability_zones', type=list)
#: Timestamp when the router was created.
created_at = resource.Body('created_at')
#: The router description.
description = resource.Body('description')
#: The ``network_id``, for the external gateway. *Type: dict*
external_gateway_info = resource.Body('external_gateway_info', type=dict)
#: The ID of the flavor.
flavor_id = resource.Body('flavor_id')
#: The administrative state of the router, which is up ``True``
#: or down ``False``. *Type: bool*
is_admin_state_up = resource.Body('admin_state_up', type=bool)
#: The distributed state of the router, which is distributed ``True``
#: or not ``False``. *Type: bool* *Default: False*
is_distributed = resource.Body('distributed', type=bool, default=False)
#: The highly-available state of the router, which is highly available
#: ``True`` or not ``False``. *Type: bool* *Default: False*
is_ha = resource.Body('ha', type=bool, default=False)
#: The router name.
name = resource.Body('name')
#: The ID of the project this router is associated with.
project_id = resource.Body('tenant_id')
#: Revision number of the router. *Type: int*
revision_number = resource.Body('revision', type=int)
#: The extra routes configuration for the router.
routes = resource.Body('routes', type=list)
#: The router status.
status = resource.Body('status')
#: Timestamp when the router was created.
updated_at = resource.Body('updated_at')
def add_interface(self, session, **body):
"""Add an internal interface to a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id, 'add_router_interface')
resp = session.put(url, json=body)
return resp.json()
def remove_interface(self, session, **body):
"""Remove an internal interface from a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id, 'remove_router_interface')
resp = session.put(url, json=body)
return resp.json()
def add_gateway(self, session, **body):
"""Add an external gateway to a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id,
'add_gateway_router')
resp = session.put(url, json=body)
return resp.json()
def remove_gateway(self, session, **body):
"""Remove an external gateway from a logical router.
:param session: The session to communicate through.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param dict body: The body requested to be updated on the router
:returns: The body of the response as a dictionary.
"""
url = utils.urljoin(self.base_path, self.id,
'remove_gateway_router')
resp = session.put(url, json=body)
return resp.json()
class L3AgentRouter(Router):
resource_key = 'router'
resources_key = 'routers'
base_path = '/agents/%(agent_id)s/l3-routers'
resource_name = 'l3-router'
service = network_service.NetworkService()
# capabilities
allow_create = False
allow_retrieve = True
allow_update = False
allow_delete = False
allow_list = True
# NOTE: No query parameter is supported
| apache-2.0 | -3,754,145,010,826,601,500 | 38.060403 | 79 | 0.656873 | false |
dparks1134/GenomeTreeTk | scripts/checkm_compare.py | 1 | 4871 | #!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__prog_name__ = 'checkm_compare.py'
__prog_desc__ = 'compare CheckM estimates'
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2018'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '0.0.1'
__maintainer__ = 'Donovan Parks'
__email__ = '[email protected]'
__status__ = 'Development'
import os
import sys
import argparse
import tempfile
import ntpath
import shutil
class Compare(object):
"""Compare CheckM estimates."""
def __init__(self):
"""Initialization."""
pass
def run(self, qc_failed_file, checkm_qa_files, output_file):
"""compare CheckM estimates."""
orig_estimates = {}
with open(qc_failed_file) as f:
header = f.readline().strip().split('\t')
acc_index = header.index('Accession')
comp_index = header.index('Completeness (%)')
cont_index = header.index('Contamination (%)')
for line in f:
line_split = line.strip().split('\t')
gid = line_split[acc_index]
comp = float(line_split[comp_index])
cont = float(line_split[cont_index])
orig_estimates[gid] = (comp, cont)
new_estimates = {}
with open(checkm_qa_files) as f:
header = f.readline().strip().split('\t')
comp_index = header.index('Completeness')
cont_index = header.index('Contamination')
for line in f:
line_split = line.strip().split('\t')
gid = line_split[0].replace('_ncbi_proteins', '')
comp = float(line_split[comp_index])
cont = float(line_split[cont_index])
new_estimates[gid] = (comp, cont)
fout = open(output_file, 'w')
fout.write('Accession\tOriginal completeness\tNew completeness\tOriginal contamination\tNew contamination\n')
for gid in new_estimates:
orig_comp, orig_cont = orig_estimates[gid]
new_comp, new_cont = new_estimates[gid]
orig_quality = orig_comp - 5*orig_cont
if orig_quality >= 50:
continue
new_quality = new_comp - 5*new_cont
if new_quality < 50:
continue
if (new_comp - orig_comp > 5
or new_cont - orig_cont < -1):
print(gid, orig_comp, new_comp, orig_cont, new_cont)
fout.write('%s\t%.2f\t%.2f\t%.2f\t%.2f\n' % (gid, orig_comp, new_comp, orig_cont, new_cont))
fout.close()
if __name__ == '__main__':
print(__prog_name__ + ' v' + __version__ + ': ' + __prog_desc__)
print(' by ' + __author__ + ' (' + __email__ + ')' + '\n')
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('qc_failed_file', help='file indicating genomes that failed QC')
parser.add_argument('checkm_qa_files', help='file with alternative CheckM estimates')
parser.add_argument('output_file', help='output directory')
args = parser.parse_args()
try:
p = Compare()
p.run(args.qc_failed_file, args.checkm_qa_files, args.output_file)
except SystemExit:
print("\nControlled exit resulting from an unrecoverable error or warning.")
except:
print("\nUnexpected error:", sys.exc_info()[0])
raise
| gpl-3.0 | -6,658,379,957,902,644,000 | 39.256198 | 117 | 0.49497 | false |
pablocscode/TFG-CAEBAT | simulacion_basica_v1.py | 1 | 2696 | # -*- coding: utf-8 -*-
'''
Creado por Pablo Castro
28/03/17
Objetivo:
Automatizar todo el proceso de simulacion desde el terminal de linux.
Funciona para CAEBAT 1.0
Como usarlo:
1- Se situa el script en la carpeta 'examples'.
2- Se crea una carpeta llamada 'Mis simulaciones' en el escritorio.
3- Se ejecuta normalmente desde la terminal: 'python simulacion_basica.py'
4- El programa te pide el nombre exacto de la carpeta a simular.
Acciones:
-Ejecutar la simulacion de un caso que elijamos
-Copiar la carpeta con los resultados de la simulaciones en otra carpeta
situada en el escritorio y ponerle un nombre segun el caso simulado y la fecha de simulacion.
-Despues de realizar esto, eliminamos las carpetas generadas por la simulacion
en la carpeta ejemplo.
'''
import os
import shutil
from datetime import datetime
def copiar_simulacion(Nombre_simulacion):
#Calculamos la fecha en la que la carpeta fue creada
fecha = os.stat(Nombre_simulacion).st_mtime
#La convertimos a un formato legible y nombramos la nueva carpeta
nombre_carpeta_copia = Nombre_simulacion + ' ' + str(datetime.fromtimestamp(fecha))
shutil.copytree(Nombre_simulacion,nombre_carpeta_copia)
shutil.move(nombre_carpeta_copia,'/home/batsim/Desktop/Mis simulaciones/')
def eliminar_carpetas(Nombre_simulacion):
shutil.rmtree('/home/batsim/caebat/vibe/examples/'+Nombre_simulacion+'/simulation_log')
shutil.rmtree('/home/batsim/caebat/vibe/examples/'+Nombre_simulacion+'/simulation_results')
shutil.rmtree('/home/batsim/caebat/vibe/examples/'+Nombre_simulacion+'/simulation_setup')
shutil.rmtree('/home/batsim/caebat/vibe/examples/'+Nombre_simulacion+'/work')
#Seleccionamos desde el terminal nuestra carpeta de simulacion
print('Introduce el nombre de la carpeta que quieres simular:')
nombre = raw_input()
#Seleccionamos el archivo .conf que vamos a simular
if nombre == 'case2':
modelo = 'thermal_electrical_chartran_cell_twoway.conf'
elif nombre == 'case3':
modelo = 'thermal_electrical_chartran_battery_twoway.conf'
elif nombre == 'case6':
modelo = 'thermal_electrical_chartran_farasis.conf'
elif nombre == 'case7':
modelo = 'thermal_electrical_chartran_module_4P.conf'
else:
print('Error al introducir el nombre de la carpeta')
quit()
#Cambiamos el path a la carpeta seleccionada
os.chdir('/home/batsim/caebat/vibe/examples/'+nombre)
#Ejectuamos la simulacion
os.system('/home/batsim/caebat/oas/install/bin/ips.py --simulation='+modelo+' --log=temp.log --platform=../config/batsim.conf -a')
os.chdir('/home/batsim/caebat/vibe/examples')
copiar_simulacion(nombre)
eliminar_carpetas(nombre)
print('Fin de la simulación')
| gpl-3.0 | -8,891,207,074,465,465,000 | 37.057971 | 130 | 0.751763 | false |
indashnet/InDashNet.Open.UN2000 | android/external/llvm/utils/lit/lit/ShUtil.py | 1 | 12179 | from __future__ import absolute_import
import itertools
import lit.Util
from lit.ShCommands import Command, Pipeline, Seq
class ShLexer:
def __init__(self, data, win32Escapes = False):
self.data = data
self.pos = 0
self.end = len(data)
self.win32Escapes = win32Escapes
def eat(self):
c = self.data[self.pos]
self.pos += 1
return c
def look(self):
return self.data[self.pos]
def maybe_eat(self, c):
"""
maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. """
if self.data[self.pos] == c:
self.pos += 1
return True
return False
def lex_arg_fast(self, c):
# Get the leading whitespace free section.
chunk = self.data[self.pos - 1:].split(None, 1)[0]
# If it has special characters, the fast path failed.
if ('|' in chunk or '&' in chunk or
'<' in chunk or '>' in chunk or
"'" in chunk or '"' in chunk or
';' in chunk or '\\' in chunk):
return None
self.pos = self.pos - 1 + len(chunk)
return chunk
def lex_arg_slow(self, c):
if c in "'\"":
str = self.lex_arg_quoted(c)
else:
str = c
while self.pos != self.end:
c = self.look()
if c.isspace() or c in "|&;":
break
elif c in '><':
# This is an annoying case; we treat '2>' as a single token so
# we don't have to track whitespace tokens.
# If the parse string isn't an integer, do the usual thing.
if not str.isdigit():
break
# Otherwise, lex the operator and convert to a redirection
# token.
num = int(str)
tok = self.lex_one_token()
assert isinstance(tok, tuple) and len(tok) == 1
return (tok[0], num)
elif c == '"':
self.eat()
str += self.lex_arg_quoted('"')
elif c == "'":
self.eat()
str += self.lex_arg_quoted("'")
elif not self.win32Escapes and c == '\\':
# Outside of a string, '\\' escapes everything.
self.eat()
if self.pos == self.end:
lit.Util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
str += self.eat()
else:
str += self.eat()
return str
def lex_arg_quoted(self, delim):
str = ''
while self.pos != self.end:
c = self.eat()
if c == delim:
return str
elif c == '\\' and delim == '"':
# Inside a '"' quoted string, '\\' only escapes the quote
# character and backslash, otherwise it is preserved.
if self.pos == self.end:
lit.Util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
c = self.eat()
if c == '"': #
str += '"'
elif c == '\\':
str += '\\'
else:
str += '\\' + c
else:
str += c
lit.Util.warning("missing quote character in %r" % self.data)
return str
def lex_arg_checked(self, c):
pos = self.pos
res = self.lex_arg_fast(c)
end = self.pos
self.pos = pos
reference = self.lex_arg_slow(c)
if res is not None:
if res != reference:
raise ValueError("Fast path failure: %r != %r" % (
res, reference))
if self.pos != end:
raise ValueError("Fast path failure: %r != %r" % (
self.pos, end))
return reference
def lex_arg(self, c):
return self.lex_arg_fast(c) or self.lex_arg_slow(c)
def lex_one_token(self):
"""
lex_one_token - Lex a single 'sh' token. """
c = self.eat()
if c == ';':
return (c,)
if c == '|':
if self.maybe_eat('|'):
return ('||',)
return (c,)
if c == '&':
if self.maybe_eat('&'):
return ('&&',)
if self.maybe_eat('>'):
return ('&>',)
return (c,)
if c == '>':
if self.maybe_eat('&'):
return ('>&',)
if self.maybe_eat('>'):
return ('>>',)
return (c,)
if c == '<':
if self.maybe_eat('&'):
return ('<&',)
if self.maybe_eat('>'):
return ('<<',)
return (c,)
return self.lex_arg(c)
def lex(self):
while self.pos != self.end:
if self.look().isspace():
self.eat()
else:
yield self.lex_one_token()
###
class ShParser:
def __init__(self, data, win32Escapes = False, pipefail = False):
self.data = data
self.pipefail = pipefail
self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
def lex(self):
for item in self.tokens:
return item
return None
def look(self):
token = self.lex()
if token is not None:
self.tokens = itertools.chain([token], self.tokens)
return token
def parse_command(self):
tok = self.lex()
if not tok:
raise ValueError("empty command!")
if isinstance(tok, tuple):
raise ValueError("syntax error near unexpected token %r" % tok[0])
args = [tok]
redirects = []
while 1:
tok = self.look()
# EOF?
if tok is None:
break
# If this is an argument, just add it to the current command.
if isinstance(tok, str):
args.append(self.lex())
continue
# Otherwise see if it is a terminator.
assert isinstance(tok, tuple)
if tok[0] in ('|',';','&','||','&&'):
break
# Otherwise it must be a redirection.
op = self.lex()
arg = self.lex()
if not arg:
raise ValueError("syntax error near token %r" % op[0])
redirects.append((op, arg))
return Command(args, redirects)
def parse_pipeline(self):
negate = False
commands = [self.parse_command()]
while self.look() == ('|',):
self.lex()
commands.append(self.parse_command())
return Pipeline(commands, negate, self.pipefail)
def parse(self):
lhs = self.parse_pipeline()
while self.look():
operator = self.lex()
assert isinstance(operator, tuple) and len(operator) == 1
if not self.look():
raise ValueError(
"missing argument to operator %r" % operator[0])
# FIXME: Operator precedence!!
lhs = Seq(lhs, operator[0], self.parse_pipeline())
return lhs
###
import unittest
class TestShLexer(unittest.TestCase):
def lex(self, str, *args, **kwargs):
return list(ShLexer(str, *args, **kwargs).lex())
def test_basic(self):
self.assertEqual(self.lex('a|b>c&d<e;f'),
['a', ('|',), 'b', ('>',), 'c', ('&',), 'd',
('<',), 'e', (';',), 'f'])
def test_redirection_tokens(self):
self.assertEqual(self.lex('a2>c'),
['a2', ('>',), 'c'])
self.assertEqual(self.lex('a 2>c'),
['a', ('>',2), 'c'])
def test_quoting(self):
self.assertEqual(self.lex(""" 'a' """),
['a'])
self.assertEqual(self.lex(""" "hello\\"world" """),
['hello"world'])
self.assertEqual(self.lex(""" "hello\\'world" """),
["hello\\'world"])
self.assertEqual(self.lex(""" "hello\\\\world" """),
["hello\\world"])
self.assertEqual(self.lex(""" he"llo wo"rld """),
["hello world"])
self.assertEqual(self.lex(""" a\\ b a\\\\b """),
["a b", "a\\b"])
self.assertEqual(self.lex(""" "" "" """),
["", ""])
self.assertEqual(self.lex(""" a\\ b """, win32Escapes = True),
['a\\', 'b'])
class TestShParse(unittest.TestCase):
def parse(self, str):
return ShParser(str).parse()
def test_basic(self):
self.assertEqual(self.parse('echo hello'),
Pipeline([Command(['echo', 'hello'], [])], False))
self.assertEqual(self.parse('echo ""'),
Pipeline([Command(['echo', ''], [])], False))
self.assertEqual(self.parse("""echo -DFOO='a'"""),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
self.assertEqual(self.parse('echo -DFOO="a"'),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
def test_redirection(self):
self.assertEqual(self.parse('echo hello > c'),
Pipeline([Command(['echo', 'hello'],
[((('>'),), 'c')])], False))
self.assertEqual(self.parse('echo hello > c >> d'),
Pipeline([Command(['echo', 'hello'], [(('>',), 'c'),
(('>>',), 'd')])], False))
self.assertEqual(self.parse('a 2>&1'),
Pipeline([Command(['a'], [(('>&',2), '1')])], False))
def test_pipeline(self):
self.assertEqual(self.parse('a | b'),
Pipeline([Command(['a'], []),
Command(['b'], [])],
False))
self.assertEqual(self.parse('a | b | c'),
Pipeline([Command(['a'], []),
Command(['b'], []),
Command(['c'], [])],
False))
def test_list(self):
self.assertEqual(self.parse('a ; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a & b'),
Seq(Pipeline([Command(['a'], [])], False),
'&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b'),
Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a || b'),
Seq(Pipeline([Command(['a'], [])], False),
'||',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b || c'),
Seq(Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)),
'||',
Pipeline([Command(['c'], [])], False)))
self.assertEqual(self.parse('a; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -4,915,834,333,598,974,000 | 33.307042 | 79 | 0.416126 | false |
Subsets and Splits