repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rsignell-usgs/notebook | pyugrid/pyugrid/test_examples.py | 1 | 2559 | #!/usr/bin/env python
"""
Some example UGRIDs to test, etc with
"""
from __future__ import (absolute_import, division, print_function)
from pyugrid import ugrid
def two_triangles():
"""
returns about the simplest triangle grid possible
4 nodes, two triangles, five edges
"""
nodes = [(0.1, 0.1),
(2.1, 0.1),
(1.1, 2.1),
(3.1, 2.1)]
faces = [(0, 1, 2),
(1, 3, 2), ]
edges = [(0, 1),
(1, 3),
(3, 2),
(2, 0),
(1, 2)]
return ugrid.UGrid(nodes, faces, edges)
def twenty_one_triangles():
"""
returns a basic triangle grid with 21 triangles, a hole and a "tail"
"""
nodes = [(5, 1),
(10, 1),
(3, 3),
(7, 3),
(9, 4),
(12, 4),
(5, 5),
(3, 7),
(5, 7),
(7, 7),
(9, 7),
(11, 7),
(5, 9),
(8, 9),
(11, 9),
(9, 11),
(11, 11),
(7, 13),
(9, 13),
(7, 15), ]
faces = [(0, 1, 3),
(0, 6, 2),
(0, 3, 6),
(1, 4, 3),
(1, 5, 4),
(2, 6, 7),
(6, 8, 7),
(7, 8, 12),
(6, 9, 8),
(8, 9, 12),
(9, 13, 12),
(4, 5, 11),
(4, 11, 10),
(9, 10, 13),
(10, 11, 14),
(10, 14, 13),
(13, 14, 15),
(14, 16, 15),
(15, 16, 18),
(15, 18, 17),
(17, 18, 19), ]
# We may want to use this later to define just the outer boundary.
boundaries = [(0, 1),
(1, 5),
(5, 11),
(11, 14),
(14, 16),
(16, 18),
(18, 19),
(19, 17),
(17, 15),
(15, 13),
(13, 12),
(12, 7),
(7, 2),
(2, 0),
(3, 4),
(4, 10),
(10, 9),
(9, 6),
(6, 3), ]
grid = ugrid.UGrid(nodes, faces, boundaries=boundaries)
grid.build_edges()
return grid
if __name__ == "__main__":
grid = twenty_one_triangles()
print(grid.edges)
print(len(grid.edges))
grid.build_edges()
print(grid.edges)
print(len(grid.edges))
| mit | 651,994,480,699,171,300 | 21.447368 | 72 | 0.313794 | false |
harpesichord/Door-Access | door/MFRC522.py | 1 | 6647 | #-------------------------------------------------------------------------------
# Name: MFRC522.py
# Purpose: Mifare MFRC-522 module 13.56 MHz card/tag reader
#
# Author: Mario Gomez, Jakub Dvorak (aditional changes for this project)
# More: http://fuenteabierta.teubi.co/2013/07/utilizando-el-lector-nfc-rc522-en-la.html
# Updated: 26.10.2013
#-------------------------------------------------------------------------------
import RPi.GPIO as GPIO
import spi
import signal
class MFRC522:
NRSTPD = 22
MAX_LEN = 16
PCD_IDLE = 0x00
PCD_AUTHENT = 0x0E
PCD_RECEIVE = 0x08
PCD_TRANSMIT = 0x04
PCD_TRANSCEIVE = 0x0C
PCD_RESETPHASE = 0x0F
PCD_CALCCRC = 0x03
PICC_REQIDL = 0x26
PICC_REQALL = 0x52
PICC_ANTICOLL = 0x93
PICC_SElECTTAG = 0x93
PICC_AUTHENT1A = 0x60
PICC_AUTHENT1B = 0x61
PICC_READ = 0x30
PICC_WRITE = 0xA0
PICC_DECREMENT = 0xC0
PICC_INCREMENT = 0xC1
PICC_RESTORE = 0xC2
PICC_TRANSFER = 0xB0
PICC_HALT = 0x50
MI_OK = 0
MI_NOTAGERR = 1
MI_ERR = 2
Reserved00 = 0x00
CommandReg = 0x01
CommIEnReg = 0x02
DivlEnReg = 0x03
CommIrqReg = 0x04
DivIrqReg = 0x05
ErrorReg = 0x06
Status1Reg = 0x07
Status2Reg = 0x08
FIFODataReg = 0x09
FIFOLevelReg = 0x0A
WaterLevelReg = 0x0B
ControlReg = 0x0C
BitFramingReg = 0x0D
CollReg = 0x0E
Reserved01 = 0x0F
Reserved10 = 0x10
ModeReg = 0x11
TxModeReg = 0x12
RxModeReg = 0x13
TxControlReg = 0x14
TxAutoReg = 0x15
TxSelReg = 0x16
RxSelReg = 0x17
RxThresholdReg = 0x18
DemodReg = 0x19
Reserved11 = 0x1A
Reserved12 = 0x1B
MifareReg = 0x1C
Reserved13 = 0x1D
Reserved14 = 0x1E
SerialSpeedReg = 0x1F
Reserved20 = 0x20
CRCResultRegM = 0x21
CRCResultRegL = 0x22
Reserved21 = 0x23
ModWidthReg = 0x24
Reserved22 = 0x25
RFCfgReg = 0x26
GsNReg = 0x27
CWGsPReg = 0x28
ModGsPReg = 0x29
TModeReg = 0x2A
TPrescalerReg = 0x2B
TReloadRegH = 0x2C
TReloadRegL = 0x2D
TCounterValueRegH = 0x2E
TCounterValueRegL = 0x2F
Reserved30 = 0x30
TestSel1Reg = 0x31
TestSel2Reg = 0x32
TestPinEnReg = 0x33
TestPinValueReg = 0x34
TestBusReg = 0x35
AutoTestReg = 0x36
VersionReg = 0x37
AnalogTestReg = 0x38
TestDAC1Reg = 0x39
TestDAC2Reg = 0x3A
TestADCReg = 0x3B
Reserved31 = 0x3C
Reserved32 = 0x3D
Reserved33 = 0x3E
Reserved34 = 0x3F
serNum = []
def __init__(self,spd=1000000):
spi.openSPI(speed=spd)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(22, GPIO.OUT)
GPIO.output(self.NRSTPD, 1)
self.MFRC522_Init()
def MFRC522_Reset(self):
self.Write_MFRC522(self.CommandReg, self.PCD_RESETPHASE)
def Write_MFRC522(self,addr,val):
spi.transfer(((addr<<1)&0x7E,val))
def Read_MFRC522(self,addr):
val = spi.transfer((((addr<<1)&0x7E) | 0x80,0))
return val[1]
def SetBitMask(self, reg, mask):
tmp = self.Read_MFRC522(reg)
self.Write_MFRC522(reg, tmp | mask)
def ClearBitMask(self, reg, mask):
tmp = self.Read_MFRC522(reg);
self.Write_MFRC522(reg, tmp & (~mask))
def AntennaOn(self):
temp = self.Read_MFRC522(self.TxControlReg)
if(~(temp & 0x03)):
self.SetBitMask(self.TxControlReg, 0x03)
def AntennaOff(self):
self.ClearBitMask(self.TxControlReg, 0x03)
spi.closeSPI()
def MFRC522_ToCard(self,command,sendData):
backData = []
backLen = 0
status = self.MI_ERR
irqEn = 0x00
waitIRq = 0x00
lastBits = None
n = 0
i = 0
if command == self.PCD_AUTHENT:
irqEn = 0x12
waitIRq = 0x10
if command == self.PCD_TRANSCEIVE:
irqEn = 0x77
waitIRq = 0x30
self.Write_MFRC522(self.CommIEnReg, irqEn|0x80)
self.ClearBitMask(self.CommIrqReg, 0x80)
self.SetBitMask(self.FIFOLevelReg, 0x80)
self.Write_MFRC522(self.CommandReg, self.PCD_IDLE);
while(i<len(sendData)):
self.Write_MFRC522(self.FIFODataReg, sendData[i])
i = i+1
self.Write_MFRC522(self.CommandReg, command)
if command == self.PCD_TRANSCEIVE:
self.SetBitMask(self.BitFramingReg, 0x80)
i = 2000
while True:
n = self.Read_MFRC522(self.CommIrqReg)
i = i - 1
if ~((i!=0) and ~(n&0x01) and ~(n&waitIRq)):
break
self.ClearBitMask(self.BitFramingReg, 0x80)
if i != 0:
if (self.Read_MFRC522(self.ErrorReg) & 0x1B)==0x00:
status = self.MI_OK
if n & irqEn & 0x01:
status = self.MI_NOTAGERR
if command == self.PCD_TRANSCEIVE:
n = self.Read_MFRC522(self.FIFOLevelReg)
lastBits = self.Read_MFRC522(self.ControlReg) & 0x07
if lastBits != 0:
backLen = (n-1)*8 + lastBits
else:
backLen = n*8
if n == 0:
n = 1
if n > self.MAX_LEN:
n = self.MAX_LEN
i = 0
while i<n:
backData.append(self.Read_MFRC522(self.FIFODataReg))
i = i + 1;
else:
status = self.MI_ERR
return (status,backData,backLen)
def MFRC522_Request(self, reqMode):
status = None
backBits = None
TagType = []
self.Write_MFRC522(self.BitFramingReg, 0x07)
TagType.append(reqMode);
(status,backData,backBits) = self.MFRC522_ToCard(self.PCD_TRANSCEIVE, TagType)
if ((status != self.MI_OK) | (backBits != 0x10)):
status = self.MI_ERR
return (status,backBits)
def MFRC522_Anticoll(self):
backData = []
serNumCheck = 0
serNum = []
self.Write_MFRC522(self.BitFramingReg, 0x00)
serNum.append(self.PICC_ANTICOLL)
serNum.append(0x20)
(status,backData,backBits) = self.MFRC522_ToCard(self.PCD_TRANSCEIVE,serNum)
if(status == self.MI_OK):
i = 0
if len(backData)==5:
while i<4:
serNumCheck = serNumCheck ^ backData[i]
i = i + 1
if serNumCheck != backData[i]:
status = self.MI_ERR
else:
status = self.MI_ERR
return (status,backData)
def MFRC522_Init(self):
GPIO.output(self.NRSTPD, 1)
self.MFRC522_Reset();
self.Write_MFRC522(self.TModeReg, 0x8D)
self.Write_MFRC522(self.TPrescalerReg, 0x3E)
self.Write_MFRC522(self.TReloadRegL, 30)
self.Write_MFRC522(self.TReloadRegH, 0)
self.Write_MFRC522(self.TxAutoReg, 0x40)
self.Write_MFRC522(self.ModeReg, 0x3D)
self.AntennaOn()
| mit | -3,602,060,209,386,655,000 | 23.618519 | 87 | 0.590492 | false |
at15/ts-parallel | bin/agraph.py | 1 | 2982 | #!/usr/bin/env python3
import glob
import re
import csv
import matplotlib.pyplot as plt
def main():
data = {}
operations = ["sort", "reduce"]
types = ["int", "float", "double"]
for op in operations:
for tp in types:
# i.e. sort int
data[op + "_" + tp] = {}
results = glob.glob("*_" + op + "_*_" + tp + ".csv")
for result in results:
backend, num = re.match(
"(.*)_" + op + "_(.*)_" + tp + ".csv", result).groups()
# data[op + "_" + tp]
if backend not in data[op + "_" + tp]:
data[op + "_" + tp][backend] = {}
num = int(num)
# print(backend, num)
data[op + "_" + tp][backend][num] = {}
with open(result) as f:
# NOTE: it will detect the header of CSV and change it to
# key
reader = csv.DictReader(f)
for row in reader:
data[op + "_" + tp][backend][num][row["stage"]
] = row["duration"]
# print(row)
# print(results)
# print(data)
# now let's draw the graph
plot_data = {}
for op, backends in data.items():
# print(op)
plot_data[op] = []
for backend, results in backends.items():
pdata = {"name": backend, "x": [], "y": []}
# print(backend)
# [(10, {'init': '2771', 'generate': '7667', 'copy': '112781784', 'run': '825079', 'delete': '67504'}), (50, {'init': '1045', 'generate': '8579', 'copy': '110102907', 'run': '1389482', 'delete': '68685'})]
sorted_results = sorted(results.items())
for result in sorted_results:
num, stages = result
# print(num)
if "run" not in stages:
print("didn't find run!", op, backend, num)
continue
pdata["x"].append(num)
pdata["y"].append(stages["run"])
plot_data[op].append(pdata)
# print(plot_data)
i = 1
color_map = {"serial": "C1", "boost": "C2", "thrust": "C3"}
exclude = {"serial": True}
for op, pdatas in plot_data.items():
plt.figure(i)
i += 1
for pdata in pdatas:
if pdata["name"] in exclude:
continue
plt.plot(pdata["x"], pdata["y"],
color_map[pdata["name"]], label=pdata["name"])
plt.title(op)
plt.xlabel("Vector length")
# TODO: ylabel is not shown, and the color changes in different figure
# NOTE: we are using microseconds, because nano seconds got negative
# value
plt.ylabel("Time (us)")
plt.legend(loc='upper right', shadow=True, fontsize='x-small')
plt.show()
if __name__ == "__main__":
main()
| mit | 5,607,377,259,063,584,000 | 37.230769 | 217 | 0.450704 | false |
cyphactor/lifecyclemanager | testenv/trac-0.10.4/trac/versioncontrol/web_ui/log.py | 1 | 9917 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2006 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <[email protected]>
# Copyright (C) 2005-2006 Christian Boos <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <[email protected]>
# Christian Boos <[email protected]>
import re
import urllib
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.util.datefmt import http_date
from trac.util.html import html
from trac.util.text import wrap
from trac.versioncontrol import Changeset
from trac.versioncontrol.web_ui.changeset import ChangesetModule
from trac.versioncontrol.web_ui.util import *
from trac.web import IRequestHandler
from trac.web.chrome import add_link, add_stylesheet, INavigationContributor
from trac.wiki import IWikiSyntaxProvider, Formatter
LOG_LIMIT = 100
class LogModule(Component):
implements(INavigationContributor, IPermissionRequestor, IRequestHandler,
IWikiSyntaxProvider)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'browser'
def get_navigation_items(self, req):
return []
# IPermissionRequestor methods
def get_permission_actions(self):
return ['LOG_VIEW']
# IRequestHandler methods
def match_request(self, req):
import re
match = re.match(r'/log(?:(/.*)|$)', req.path_info)
if match:
req.args['path'] = match.group(1) or '/'
return True
def process_request(self, req):
req.perm.assert_permission('LOG_VIEW')
mode = req.args.get('mode', 'stop_on_copy')
path = req.args.get('path', '/')
rev = req.args.get('rev')
stop_rev = req.args.get('stop_rev')
format = req.args.get('format')
verbose = req.args.get('verbose')
limit = LOG_LIMIT
repos = self.env.get_repository(req.authname)
normpath = repos.normalize_path(path)
rev = unicode(repos.normalize_rev(rev))
if stop_rev:
stop_rev = unicode(repos.normalize_rev(stop_rev))
if repos.rev_older_than(rev, stop_rev):
rev, stop_rev = stop_rev, rev
req.hdf['title'] = path + ' (log)'
req.hdf['log'] = {
'mode': mode,
'path': path,
'rev': rev,
'verbose': verbose,
'stop_rev': stop_rev,
'browser_href': req.href.browser(path),
'changeset_href': req.href.changeset(),
'log_href': req.href.log(path, rev=rev)
}
path_links = get_path_links(req.href, path, rev)
req.hdf['log.path'] = path_links
if path_links:
add_link(req, 'up', path_links[-1]['href'], 'Parent directory')
# The `history()` method depends on the mode:
# * for ''stop on copy'' and ''follow copies'', it's `Node.history()`
# * for ''show only add, delete'' it's`Repository.get_path_history()`
if mode == 'path_history':
def history(limit):
for h in repos.get_path_history(path, rev, limit):
yield h
else:
history = get_existing_node(req, repos, path, rev).get_history
# -- retrieve history, asking for limit+1 results
info = []
previous_path = repos.normalize_path(path)
for old_path, old_rev, old_chg in history(limit+1):
if stop_rev and repos.rev_older_than(old_rev, stop_rev):
break
old_path = repos.normalize_path(old_path)
item = {
'rev': str(old_rev),
'path': old_path,
'log_href': req.href.log(old_path, rev=old_rev),
'browser_href': req.href.browser(old_path, rev=old_rev),
'changeset_href': req.href.changeset(old_rev),
'restricted_href': req.href.changeset(old_rev, new_path=old_path),
'change': old_chg
}
if not (mode == 'path_history' and old_chg == Changeset.EDIT):
info.append(item)
if old_path and old_path != previous_path \
and not (mode == 'path_history' and old_path == normpath):
item['copyfrom_path'] = old_path
if mode == 'stop_on_copy':
break
if len(info) > limit: # we want limit+1 entries
break
previous_path = old_path
if info == []:
# FIXME: we should send a 404 error here
raise TracError("The file or directory '%s' doesn't exist "
"at revision %s or at any previous revision."
% (path, rev), 'Nonexistent path')
def make_log_href(path, **args):
link_rev = rev
if rev == str(repos.youngest_rev):
link_rev = None
params = {'rev': link_rev, 'mode': mode, 'limit': limit}
params.update(args)
if verbose:
params['verbose'] = verbose
return req.href.log(path, **params)
if len(info) == limit+1: # limit+1 reached, there _might_ be some more
next_rev = info[-1]['rev']
next_path = info[-1]['path']
add_link(req, 'next', make_log_href(next_path, rev=next_rev),
'Revision Log (restarting at %s, rev. %s)'
% (next_path, next_rev))
# now, only show 'limit' results
del info[-1]
req.hdf['log.items'] = info
revs = [i['rev'] for i in info]
changes = get_changes(self.env, repos, revs, verbose, req, format)
if format == 'rss':
# Get the email addresses of all known users
email_map = {}
for username,name,email in self.env.get_known_users():
if email:
email_map[username] = email
for cs in changes.values():
# For RSS, author must be an email address
author = cs['author']
author_email = ''
if '@' in author:
author_email = author
elif email_map.has_key(author):
author_email = email_map[author]
cs['author'] = author_email
cs['date'] = http_date(cs['date_seconds'])
elif format == 'changelog':
for rev in revs:
changeset = repos.get_changeset(rev)
cs = changes[rev]
cs['message'] = wrap(changeset.message, 70,
initial_indent='\t',
subsequent_indent='\t')
files = []
actions = []
for path, kind, chg, bpath, brev in changeset.get_changes():
files.append(chg == Changeset.DELETE and bpath or path)
actions.append(chg)
cs['files'] = files
cs['actions'] = actions
req.hdf['log.changes'] = changes
if req.args.get('format') == 'changelog':
return 'log_changelog.cs', 'text/plain'
elif req.args.get('format') == 'rss':
return 'log_rss.cs', 'application/rss+xml'
add_stylesheet(req, 'common/css/browser.css')
add_stylesheet(req, 'common/css/diff.css')
rss_href = make_log_href(path, format='rss', stop_rev=stop_rev)
add_link(req, 'alternate', rss_href, 'RSS Feed', 'application/rss+xml',
'rss')
changelog_href = make_log_href(path, format='changelog',
stop_rev=stop_rev)
add_link(req, 'alternate', changelog_href, 'ChangeLog', 'text/plain')
return 'log.cs', None
# IWikiSyntaxProvider methods
REV_RANGE = "%s[-:]%s" % ((ChangesetModule.CHANGESET_ID,)*2)
def get_wiki_syntax(self):
yield (
# [...] form, starts with optional intertrac: [T... or [trac ...
r"!?\[(?P<it_log>%s\s*)" % Formatter.INTERTRAC_SCHEME +
# <from>:<to> + optional path restriction
r"(?P<log_rev>%s)(?P<log_path>/[^\]]*)?\]" % self.REV_RANGE,
lambda x, y, z: self._format_link(x, 'log1', y[1:-1], y, z))
yield (
# r<from>:<to> form (no intertrac and no path restriction)
r"(?:\b|!)r%s\b" % self.REV_RANGE,
lambda x, y, z: self._format_link(x, 'log2', '@' + y[1:], y))
def get_link_resolvers(self):
yield ('log', self._format_link)
def _format_link(self, formatter, ns, match, label, fullmatch=None):
if ns == 'log1':
it_log = fullmatch.group('it_log')
rev = fullmatch.group('log_rev')
path = fullmatch.group('log_path') or '/'
target = '%s%s@%s' % (it_log, path, rev)
# prepending it_log is needed, as the helper expects it there
intertrac = formatter.shorthand_intertrac_helper(
'log', target, label, fullmatch)
if intertrac:
return intertrac
else: # ns == 'log2'
path, rev, line = get_path_rev_line(match)
stop_rev = None
for sep in ':-':
if not stop_rev and rev and sep in rev:
stop_rev, rev = rev.split(sep, 1)
href = formatter.href.log(path or '/', rev=rev, stop_rev=stop_rev)
return html.A(label, href=href, class_='source')
| gpl-3.0 | -4,291,927,195,399,466,500 | 38.66 | 82 | 0.543217 | false |
wfnex/openbras | src/VPP/test/test_ip_mcast.py | 1 | 22566 | #!/usr/bin/env python
import unittest
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
from vpp_ip_route import VppIpMRoute, VppMRoutePath, VppMFibSignal, \
MRouteItfFlags, MRouteEntryFlags
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP, getmacbyip, ICMP
from scapy.layers.inet6 import IPv6, getmacbyip6
from util import ppp
#
# The number of packets sent is set to 90 so that when we replicate more than 3
# times, which we do for some entries, we will generate more than 256 packets
# to the next node in the VLIB graph. Thus we are testing the code's
# correctness handling this over-flow
#
N_PKTS_IN_STREAM = 90
class TestMFIB(VppTestCase):
""" MFIB Test Case """
def setUp(self):
super(TestMFIB, self).setUp()
def test_mfib(self):
""" MFIB Unit Tests """
error = self.vapi.cli("test mfib")
if error:
self.logger.critical(error)
self.assertEqual(error.find("Failed"), -1)
class TestIPMcast(VppTestCase):
""" IP Multicast Test Case """
def setUp(self):
super(TestIPMcast, self).setUp()
# create 8 pg interfaces
self.create_pg_interfaces(range(8))
# setup interfaces
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.config_ip6()
i.resolve_arp()
i.resolve_ndp()
def create_stream_ip4(self, src_if, src_ip, dst_ip, payload_size=0):
pkts = []
# default to small packet sizes
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=src_ip, dst=dst_ip) /
UDP(sport=1234, dport=1234))
if not payload_size:
payload_size = 64 - len(p)
p = p / Raw('\xa5' * payload_size)
for i in range(0, N_PKTS_IN_STREAM):
pkts.append(p)
return pkts
def create_stream_ip6(self, src_if, src_ip, dst_ip):
pkts = []
for i in range(0, N_PKTS_IN_STREAM):
info = self.create_packet_info(src_if, src_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IPv6(src=src_ip, dst=dst_ip) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
pkts.append(p)
return pkts
def verify_filter(self, capture, sent):
if not len(capture) == len(sent):
# filter out any IPv6 RAs from the captur
for p in capture:
if (p.haslayer(IPv6)):
capture.remove(p)
return capture
def verify_capture_ip4(self, rx_if, sent):
rxd = rx_if.get_capture(len(sent))
try:
capture = self.verify_filter(rxd, sent)
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
eth = rx[Ether]
self.assertEqual(eth.type, 0x800)
tx_ip = tx[IP]
rx_ip = rx[IP]
# check the MAC address on the RX'd packet is correctly formed
self.assertEqual(eth.dst, getmacbyip(rx_ip.dst))
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.ttl + 1, tx_ip.ttl)
except:
raise
def verify_capture_ip6(self, rx_if, sent):
capture = rx_if.get_capture(len(sent))
self.assertEqual(len(capture), len(sent))
for i in range(len(capture)):
tx = sent[i]
rx = capture[i]
eth = rx[Ether]
self.assertEqual(eth.type, 0x86DD)
tx_ip = tx[IPv6]
rx_ip = rx[IPv6]
# check the MAC address on the RX'd packet is correctly formed
self.assertEqual(eth.dst, getmacbyip6(rx_ip.dst))
self.assertEqual(rx_ip.src, tx_ip.src)
self.assertEqual(rx_ip.dst, tx_ip.dst)
# IP processing post pop has decremented the TTL
self.assertEqual(rx_ip.hlim + 1, tx_ip.hlim)
def test_ip_mcast(self):
""" IP Multicast Replication """
#
# a stream that matches the default route. gets dropped.
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.assert_nothing_captured(
remark="IP multicast packets forwarded on default route")
#
# A (*,G).
# one accepting interface, pg0, 7 forwarding interfaces
# many forwarding interfaces test the case where the replicare DPO
# needs to use extra cache lines for the buckets.
#
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg3.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg4.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg5.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg6.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg7.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
#
# An (S,G).
# one accepting interface, pg0, 2 forwarding interfaces
#
route_1_1_1_1_232_1_1_1 = VppIpMRoute(
self,
"1.1.1.1",
"232.1.1.1", 64,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_1_1_1_1_232_1_1_1.add_vpp_config()
#
# An (*,G/m).
# one accepting interface, pg0, 1 forwarding interfaces
#
route_232 = VppIpMRoute(
self,
"0.0.0.0",
"232.0.0.0", 8,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232.add_vpp_config()
#
# a stream that matches the route for (1.1.1.1,232.1.1.1)
# small packets
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# We expect replications on Pg1->7
self.verify_capture_ip4(self.pg1, tx)
self.verify_capture_ip4(self.pg2, tx)
# no replications on Pg0
self.pg0.assert_nothing_captured(
remark="IP multicast packets forwarded on PG0")
self.pg3.assert_nothing_captured(
remark="IP multicast packets forwarded on PG3")
#
# a stream that matches the route for (1.1.1.1,232.1.1.1)
# large packets
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.1.1.1",
payload_size=1024)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# We expect replications on Pg1->7
self.verify_capture_ip4(self.pg1, tx)
self.verify_capture_ip4(self.pg2, tx)
# no replications on Pg0
self.pg0.assert_nothing_captured(
remark="IP multicast packets forwarded on PG0")
self.pg3.assert_nothing_captured(
remark="IP multicast packets forwarded on PG3")
#
# a stream that matches the route for (*,232.0.0.0/8)
# Send packets with the 9th bit set so we test the correct clearing
# of that bit in the mac rewrite
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "1.1.1.1", "232.255.255.255")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# We expect replications on Pg1 only
self.verify_capture_ip4(self.pg1, tx)
# no replications on Pg0, Pg2 not Pg3
self.pg0.assert_nothing_captured(
remark="IP multicast packets forwarded on PG0")
self.pg2.assert_nothing_captured(
remark="IP multicast packets forwarded on PG2")
self.pg3.assert_nothing_captured(
remark="IP multicast packets forwarded on PG3")
#
# a stream that matches the route for (*,232.1.1.1)
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0, "1.1.1.2", "232.1.1.1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# We expect replications on Pg1, 2, 3.
self.verify_capture_ip4(self.pg1, tx)
self.verify_capture_ip4(self.pg2, tx)
self.verify_capture_ip4(self.pg3, tx)
self.verify_capture_ip4(self.pg4, tx)
self.verify_capture_ip4(self.pg5, tx)
self.verify_capture_ip4(self.pg6, tx)
self.verify_capture_ip4(self.pg7, tx)
route_232_1_1_1.remove_vpp_config()
route_1_1_1_1_232_1_1_1.remove_vpp_config()
route_232.remove_vpp_config()
def test_ip6_mcast(self):
""" IPv6 Multicast Replication """
#
# a stream that matches the default route. gets dropped.
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip6(self.pg0, "2001::1", "ff01::1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg0.assert_nothing_captured(
remark="IPv6 multicast packets forwarded on default route")
#
# A (*,G).
# one accepting interface, pg0, 3 forwarding interfaces
#
route_ff01_1 = VppIpMRoute(
self,
"::",
"ff01::1", 128,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg3.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
is_ip6=1)
route_ff01_1.add_vpp_config()
#
# An (S,G).
# one accepting interface, pg0, 2 forwarding interfaces
#
route_2001_ff01_1 = VppIpMRoute(
self,
"2001::1",
"ff01::1", 256,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD),
VppMRoutePath(self.pg2.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
is_ip6=1)
route_2001_ff01_1.add_vpp_config()
#
# An (*,G/m).
# one accepting interface, pg0, 1 forwarding interface
#
route_ff01 = VppIpMRoute(
self,
"::",
"ff01::", 16,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)],
is_ip6=1)
route_ff01.add_vpp_config()
#
# a stream that matches the route for (*, ff01::/16)
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip6(self.pg0, "2002::1", "ff01:2::255")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# We expect replications on Pg1
self.verify_capture_ip6(self.pg1, tx)
# no replications on Pg0, Pg3
self.pg0.assert_nothing_captured(
remark="IP multicast packets forwarded on PG0")
self.pg2.assert_nothing_captured(
remark="IP multicast packets forwarded on PG2")
self.pg3.assert_nothing_captured(
remark="IP multicast packets forwarded on PG3")
#
# Bounce the interface and it should still work
#
self.pg1.admin_down()
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.pg1.assert_nothing_captured(
remark="IP multicast packets forwarded on down PG1")
self.pg1.admin_up()
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
self.verify_capture_ip6(self.pg1, tx)
#
# a stream that matches the route for (*,ff01::1)
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip6(self.pg0, "2002::2", "ff01::1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# We expect replications on Pg1, 2, 3.
self.verify_capture_ip6(self.pg1, tx)
self.verify_capture_ip6(self.pg2, tx)
self.verify_capture_ip6(self.pg3, tx)
# no replications on Pg0
self.pg0.assert_nothing_captured(
remark="IPv6 multicast packets forwarded on PG0")
#
# a stream that matches the route for (2001::1, ff00::1)
#
self.vapi.cli("clear trace")
tx = self.create_stream_ip6(self.pg0, "2001::1", "ff01::1")
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# We expect replications on Pg1, 2,
self.verify_capture_ip6(self.pg1, tx)
self.verify_capture_ip6(self.pg2, tx)
# no replications on Pg0, Pg3
self.pg0.assert_nothing_captured(
remark="IP multicast packets forwarded on PG0")
self.pg3.assert_nothing_captured(
remark="IP multicast packets forwarded on PG3")
route_ff01.remove_vpp_config()
route_ff01_1.remove_vpp_config()
route_2001_ff01_1.remove_vpp_config()
def _mcast_connected_send_stream(self, dst_ip):
self.vapi.cli("clear trace")
tx = self.create_stream_ip4(self.pg0,
self.pg0.remote_ip4,
dst_ip)
self.pg0.add_stream(tx)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# We expect replications on Pg1.
self.verify_capture_ip4(self.pg1, tx)
return tx
def test_ip_mcast_connected(self):
""" IP Multicast Connected Source check """
#
# A (*,G).
# one accepting interface, pg0, 1 forwarding interfaces
#
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
route_232_1_1_1.update_entry_flags(
MRouteEntryFlags.MFIB_ENTRY_FLAG_CONNECTED)
#
# Now the (*,G) is present, send from connected source
#
tx = self._mcast_connected_send_stream("232.1.1.1")
#
# Constrct a representation of the signal we expect on pg0
#
signal_232_1_1_1_itf_0 = VppMFibSignal(self,
route_232_1_1_1,
self.pg0.sw_if_index,
tx[0])
#
# read the only expected signal
#
signals = self.vapi.mfib_signal_dump()
self.assertEqual(1, len(signals))
signal_232_1_1_1_itf_0.compare(signals[0])
#
# reading the signal allows for the generation of another
# so send more packets and expect the next signal
#
tx = self._mcast_connected_send_stream("232.1.1.1")
signals = self.vapi.mfib_signal_dump()
self.assertEqual(1, len(signals))
signal_232_1_1_1_itf_0.compare(signals[0])
#
# A Second entry with connected check
# one accepting interface, pg0, 1 forwarding interfaces
#
route_232_1_1_2 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.2", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_2.add_vpp_config()
route_232_1_1_2.update_entry_flags(
MRouteEntryFlags.MFIB_ENTRY_FLAG_CONNECTED)
#
# Send traffic to both entries. One read should net us two signals
#
signal_232_1_1_2_itf_0 = VppMFibSignal(self,
route_232_1_1_2,
self.pg0.sw_if_index,
tx[0])
tx = self._mcast_connected_send_stream("232.1.1.1")
tx2 = self._mcast_connected_send_stream("232.1.1.2")
#
# read the only expected signal
#
signals = self.vapi.mfib_signal_dump()
self.assertEqual(2, len(signals))
signal_232_1_1_1_itf_0.compare(signals[1])
signal_232_1_1_2_itf_0.compare(signals[0])
route_232_1_1_1.remove_vpp_config()
route_232_1_1_2.remove_vpp_config()
def test_ip_mcast_signal(self):
""" IP Multicast Signal """
#
# A (*,G).
# one accepting interface, pg0, 1 forwarding interfaces
#
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
route_232_1_1_1.update_entry_flags(
MRouteEntryFlags.MFIB_ENTRY_FLAG_SIGNAL)
#
# Now the (*,G) is present, send from connected source
#
tx = self._mcast_connected_send_stream("232.1.1.1")
#
# Constrct a representation of the signal we expect on pg0
#
signal_232_1_1_1_itf_0 = VppMFibSignal(self,
route_232_1_1_1,
self.pg0.sw_if_index,
tx[0])
#
# read the only expected signal
#
signals = self.vapi.mfib_signal_dump()
self.assertEqual(1, len(signals))
signal_232_1_1_1_itf_0.compare(signals[0])
#
# reading the signal allows for the generation of another
# so send more packets and expect the next signal
#
tx = self._mcast_connected_send_stream("232.1.1.1")
signals = self.vapi.mfib_signal_dump()
self.assertEqual(1, len(signals))
signal_232_1_1_1_itf_0.compare(signals[0])
#
# Set the negate-signal on the accepting interval - the signals
# should stop
#
route_232_1_1_1.update_path_flags(
self.pg0.sw_if_index,
(MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT |
MRouteItfFlags.MFIB_ITF_FLAG_NEGATE_SIGNAL))
self.vapi.cli("clear trace")
tx = self._mcast_connected_send_stream("232.1.1.1")
signals = self.vapi.mfib_signal_dump()
self.assertEqual(0, len(signals))
#
# Clear the SIGNAL flag on the entry and the signals should
# come back since the interface is still NEGATE-SIGNAL
#
route_232_1_1_1.update_entry_flags(
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE)
tx = self._mcast_connected_send_stream("232.1.1.1")
signals = self.vapi.mfib_signal_dump()
self.assertEqual(1, len(signals))
signal_232_1_1_1_itf_0.compare(signals[0])
#
# Lastly remove the NEGATE-SIGNAL from the interface and the
# signals should stop
#
route_232_1_1_1.update_path_flags(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT)
tx = self._mcast_connected_send_stream("232.1.1.1")
signals = self.vapi.mfib_signal_dump()
self.assertEqual(0, len(signals))
#
# Cleanup
#
route_232_1_1_1.remove_vpp_config()
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| bsd-3-clause | -9,168,045,352,946,064,000 | 32.781437 | 79 | 0.546309 | false |
googleinterns/hw-fuzzing | experiment_scripts/plots/exp006_plot_coverage_noseeds.py | 1 | 13291 | #!/usr/bin/env python3
# Copyright 2020 Timothy Trippel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import glob
import itertools
import os
import sys
from dataclasses import dataclass
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from hwfutils.string_color import color_str_green as green
from hwfutils.string_color import color_str_red as red
from hwfutils.string_color import color_str_yellow as yellow
# from scipy import stats
# ------------------------------------------------------------------------------
# Plot parameters
# ------------------------------------------------------------------------------
LABEL_FONT_SIZE = 8
TICK_FONT_SIZE = 8
LEGEND_FONT_SIZE = 8
LEGEND_TITLE_FONT_SIZE = 8
TIME_SCALE = "m"
SCALED_MAX_PLOT_TIME = 60
PLOT_FILE_NAME = "hwf_no_seeds.pdf"
PLOT_FORMAT = "PDF"
# ------------------------------------------------------------------------------
# Plot labels
# ------------------------------------------------------------------------------
TIME_LABEL = "Time"
TOPLEVEL_LABEL = "Core"
GRAMMAR_LABEL = "Grammar"
COVERAGE_TYPE_LABEL = "Coverage"
COVERAGE_LABEL = "Cov. (%)"
HW_LINE_COVERAGE_LABEL = "HW Line (VLT)"
SW_LINE_COVERAGE_LABEL = "SW Line (kcov)"
SW_REGION_COVERAGE_LABEL = "SW Basic Block (LLVM)"
# ------------------------------------------------------------------------------
# Other Labels
# ------------------------------------------------------------------------------
AFL_TEST_ID_LABEL = "Test-ID"
# ------------------------------------------------------------------------------
# Experiment Parameters
# ------------------------------------------------------------------------------
EXPERIMENT_BASE_NAME = "exp014-cpp-afl-%s-%s-%s-%s"
TOPLEVELS = ["aes", "hmac", "kmac", "rv_timer"]
OPCODE_TYPES = ["mapped"]
INSTR_TYPES = ["variable"]
TERMINATE_TYPES = ["never"]
TRIALS = range(0, 5)
# ------------------------------------------------------------------------------
# Other defines
# ------------------------------------------------------------------------------
TERMINAL_ROWS, TERMINAL_COLS = os.popen('stty size', 'r').read().split()
LINE_SEP = "=" * int(TERMINAL_COLS)
COUNT = 0
@dataclass
class SubplotAxisLimits:
x_lower: int = None
x_upper: int = None
y_lower: int = None
y_upper: int = None
@dataclass
class FigureAxisLimits:
kcov_limits: SubplotAxisLimits
llvm_cov_limits: SubplotAxisLimits
vlt_cov_limits: SubplotAxisLimits
@dataclass
class FuzzingData:
toplevel: str = ""
opcode_type: str = ""
instr_type: str = ""
terminate_type: str = ""
trial_num: int = -1
afl_data_path: str = ""
cov_data_path: str = ""
def __post_init__(self):
self.afl_data = self._load_afl_data()
self.kcov_data = self._load_cov_data("kcov")
self.llvm_cov_data = self._load_cov_data("llvm_cov")
self.vlt_cov_data = self._load_cov_data("vlt_cov")
def _load_afl_data(self):
afl_glob_path = os.path.join(self.afl_data_path, "out",
"afl_*_interactive", "plot_data")
afl_plot_data_files = glob.glob(afl_glob_path)
if len(afl_plot_data_files) != 1:
print(red("ERROR: AFL plot_data file no found."))
sys.exit(1)
# Load data into Pandas DataFrame
afl_df = self._load_csv_data(afl_plot_data_files[0])
# Remove leading/trailing white space from column names
afl_df = afl_df.rename(columns=lambda x: x.strip())
# Adjust time stamps to be relative to start time
afl_df.loc[:, "# unix_time"] -= afl_df.loc[0, "# unix_time"]
# Set time as index
afl_df = afl_df.set_index("# unix_time")
return afl_df
@staticmethod
def _id_str_to_int(id_str):
return int(id_str.lstrip("id:"))
def _load_cov_data(self, cov_type):
cov_data_path = "%s/logs/%s_cum.csv" % (self.cov_data_path, cov_type)
if not os.path.exists(cov_data_path):
print(red("ERROR: coverage data (%s) does not exist." % cov_data_path))
sys.exit(1)
# Load data into Pandas DataFrame
cov_df = self._load_csv_data(cov_data_path)
if cov_df.shape[0] < int(self.afl_data.iloc[-1, 2]):
print(red("ERROR: coverage data is missing (%s). Aborting!" % cov_type))
sys.exit(1)
# TODO(ttrippel): remove this hack after fixing run_cov_local.sh
if cov_type == "vlt_cov":
cov_df.drop(AFL_TEST_ID_LABEL, axis=1, inplace=True)
cov_df.insert(0, AFL_TEST_ID_LABEL, list(range(cov_df.shape[0])))
else:
# Convert Test-ID labels to ints
cov_df.loc[:,
AFL_TEST_ID_LABEL] = cov_df.loc[:, AFL_TEST_ID_LABEL].apply(
FuzzingData._id_str_to_int)
# Set ID column as the row indicies
cov_df = cov_df.set_index(AFL_TEST_ID_LABEL)
return cov_df
def _load_csv_data(self, csv_file):
return pd.read_csv(csv_file,
delimiter=',',
index_col=None,
engine='python')
@property
def grammar(self):
return "%s-%s-%s" % (self.opcode_type, self.instr_type,
self.terminate_type)
def get_paths_total_at_time(time, afl_data):
while time not in afl_data.index:
time -= 1
return afl_data.loc[time, "paths_total"]
def get_cov_at_time(paths_total, cov_data, cov_data_key):
return cov_data.loc[paths_total, cov_data_key] * 100.0
def get_vlt_cov_at_time(paths_total, vlt_cov_data):
vlt_cov = (float(vlt_cov_data.loc[paths_total, "Lines-Covered"]) /
float(vlt_cov_data.loc[paths_total, "Total-Lines"])) * 100.0
return vlt_cov
def build_avg_coverage_df(exp2data,
time_units="m",
normalize_to_start=False,
consolidation="max"):
print(yellow("Building average coverage dataframe ..."))
# Create empty dictionary that will be used to create a Pandas DataFrame that
# looks like the following:
# +--------------------------------------------------------------------+
# | toplevel | isa (grammar) | coverage type | time (s) | coverage (%) |
# +--------------------------------------------------------------------+
# | ... | ... | ... | ... | ... |
coverage_dict = {
TOPLEVEL_LABEL: [],
GRAMMAR_LABEL: [],
COVERAGE_TYPE_LABEL: [],
TIME_LABEL: [],
COVERAGE_LABEL: [],
}
for exp_name, fd_list in exp2data.items():
anchor_fd = fd_list[0]
for time, row in anchor_fd.afl_data.iterrows():
# scale time
if time_units == "h":
scaled_time = float(time) / float(3600)
elif time_units == "m":
scaled_time = float(time) / float(60)
else:
scaled_time = time
# add circuit, grammar, and time values to dataframe row
for _ in range(3):
coverage_dict[TOPLEVEL_LABEL].append(anchor_fd.toplevel)
coverage_dict[GRAMMAR_LABEL].append(anchor_fd.grammar)
coverage_dict[TIME_LABEL].append(scaled_time)
# compute average coverage at all points in time
kcov_avg = 0
llvm_cov_avg = 0
vlt_cov_avg = 0
kcov_max = 0
llvm_cov_max = 0
vlt_cov_max = 0
i = 0
for fd in fd_list:
# get the paths_total at the current time
paths_total = get_paths_total_at_time(time, fd.afl_data) - 1
# get coverage data
# print(exp_name, i)
kcov = get_cov_at_time(paths_total, fd.kcov_data, "Line-Coverage-(%)")
kcov_avg += kcov
kcov_max = max(kcov_max, kcov)
llvm_cov = get_cov_at_time(paths_total, fd.llvm_cov_data,
"Region-Coverage-(%)")
llvm_cov_avg += llvm_cov
llvm_cov_max = max(llvm_cov_max, llvm_cov)
vlt_cov = get_vlt_cov_at_time(paths_total, fd.vlt_cov_data)
vlt_cov_avg += vlt_cov
vlt_cov_max = max(vlt_cov_max, vlt_cov)
i += 1
kcov_avg /= float(len(fd_list))
llvm_cov_avg /= float(len(fd_list))
vlt_cov_avg /= float(len(fd_list))
# save time 0 coverage to normalize
if time == 0:
kcov_avg_t0 = kcov_avg
llvm_cov_avg_t0 = llvm_cov_avg
vlt_cov_avg_t0 = vlt_cov_avg
if normalize_to_start:
kcov_avg /= kcov_avg_t0
llvm_cov_avg /= llvm_cov_avg_t0
vlt_cov_avg /= vlt_cov_avg_t0
coverage_dict[COVERAGE_TYPE_LABEL].append(SW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_TYPE_LABEL].append(SW_REGION_COVERAGE_LABEL)
coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
if consolidation == "avg":
coverage_dict[COVERAGE_LABEL].append(kcov_avg)
coverage_dict[COVERAGE_LABEL].append(llvm_cov_avg)
coverage_dict[COVERAGE_LABEL].append(vlt_cov_avg)
else:
coverage_dict[COVERAGE_LABEL].append(kcov_max)
coverage_dict[COVERAGE_LABEL].append(llvm_cov_max)
coverage_dict[COVERAGE_LABEL].append(vlt_cov_max)
# extend lines to max time value
if coverage_dict[TIME_LABEL][-1] != SCALED_MAX_PLOT_TIME:
for _ in range(3):
coverage_dict[TOPLEVEL_LABEL].append(anchor_fd.toplevel)
coverage_dict[GRAMMAR_LABEL].append(anchor_fd.grammar)
coverage_dict[TIME_LABEL].append(SCALED_MAX_PLOT_TIME)
coverage_dict[COVERAGE_TYPE_LABEL].append(SW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_TYPE_LABEL].append(SW_REGION_COVERAGE_LABEL)
coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_LABEL].extend(coverage_dict[COVERAGE_LABEL][-3:])
# print("Max SW Line coverage: ", coverage_dict[COVERAGE_LABEL][-3])
# print("Max SW Basic Block coverage:", coverage_dict[COVERAGE_LABEL][-2])
print("Max HW Line coverage: ", coverage_dict[COVERAGE_LABEL][-1])
print(green("Done."))
print(LINE_SEP)
return pd.DataFrame.from_dict(coverage_dict)
def load_fuzzing_data(afl_data_root, cov_data_root):
print(yellow("Loading data ..."))
exp2data = collections.defaultdict(list)
# TODO: change this to automatically extract names from a single exp. number
# extract each data file into a Pandas dataframe
isas = list(
itertools.product(TOPLEVELS, OPCODE_TYPES, INSTR_TYPES, TERMINATE_TYPES))
for toplevel, opcode_type, instr_type, terminate_type in isas:
for trial in TRIALS:
# Build complete path to data files
exp_name_wo_trialnum = EXPERIMENT_BASE_NAME % (
toplevel, opcode_type, instr_type, terminate_type)
exp_name_wo_trialnum = exp_name_wo_trialnum.replace("_", "-")
exp_name = "%s-%d" % (exp_name_wo_trialnum, trial)
afl_data_path = os.path.join(afl_data_root, exp_name)
cov_data_path = os.path.join(cov_data_root, exp_name)
# Load fuzzing data into an object
exp2data[exp_name_wo_trialnum].append(
FuzzingData(toplevel, opcode_type, instr_type, terminate_type, trial,
afl_data_path, cov_data_path))
return exp2data
def plot_avg_coverage_vs_time(cov_df, time_units="m"):
print(yellow("Generating plot ..."))
# Set plot style and extract only HDL line coverage
sns.set_theme(context="notebook", style="darkgrid")
hdl_cov_df = cov_df[cov_df[COVERAGE_TYPE_LABEL] == HW_LINE_COVERAGE_LABEL]
# create figure and plot the data
fig, ax = plt.subplots(1, 1, figsize=(4, 2))
sns.lineplot(data=hdl_cov_df,
x=TIME_LABEL,
y=COVERAGE_LABEL,
hue=TOPLEVEL_LABEL,
ax=ax,
markers="x")
# format the plot
if time_units == "m":
time_units_label = "min."
elif time_units == "h":
time_units_label = "hours"
else:
time_units_label = "s"
ax.set_xlabel(TIME_LABEL + " (%s)" % time_units_label,
fontsize=LABEL_FONT_SIZE)
ax.set_ylabel("HDL Line " + COVERAGE_LABEL, fontsize=LABEL_FONT_SIZE)
ax.tick_params("x", labelsize=TICK_FONT_SIZE)
ax.tick_params("y", labelsize=TICK_FONT_SIZE)
plt.legend(title="Core",
fontsize=LEGEND_FONT_SIZE,
title_fontsize=LEGEND_TITLE_FONT_SIZE,
ncol=2)
plt.tight_layout()
# save the plot
plt.savefig(PLOT_FILE_NAME, format=PLOT_FORMAT)
print(green("Done."))
print(LINE_SEP)
def main(argv):
parser = argparse.ArgumentParser(description="Plotting script for exp. 004.")
parser.add_argument("afl_data_root")
parser.add_argument("cov_data_root")
args = parser.parse_args()
# Load runtime data
exp2data = load_fuzzing_data(args.afl_data_root, args.cov_data_root)
avg_cov_df = build_avg_coverage_df(exp2data,
time_units=TIME_SCALE,
normalize_to_start=False)
# Plot data
plot_avg_coverage_vs_time(avg_cov_df, time_units=TIME_SCALE)
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 | 8,622,760,688,873,013,000 | 35.614325 | 80 | 0.587841 | false |
alatiera/YEAP | src/rssinfo.py | 1 | 2735 | import feedparser as fp
from . import parse_feeds
def feedinfo(feed: parse_feeds.FEEDTUP) -> None:
"""Print the contents of the FeedTup of the Rss feed."""
# Based on RSS 2.0 Spec
# https://cyber.harvard.edu/rss/rss.html
print('\n----- Feed Info -----')
# Common elements
print(f'Feed Title: {feed.title}')
print(f'Link: {feed.link}')
print(f'Description: {feed.description}')
print(f'Published: {feed.published}')
# print('Published Parsed:{}'.format(feedobj.feed.get('published_parsed')))
# Uncommon elements
if feed.image:
print('Image: {}'.format(feed.image.get('href')))
# print('Categories: {}'.format(feedobj.feed.get('categories')))
# print('Cloud: {}'.format(feedobj.feed.get('cloud')))
# Extra
print(f'Author: {feed.author}')
print(f'Language: {feed.language}')
print(f'Rights: {feed.copyright}')
def iteminfo(entry: parse_feeds.ENTRYTUP, content: bool = True) -> None:
"""Print the contents of the Item object of the feed."""
print('\n----- Item Info -----')
# Common elements
print(f'Title: {entry.title}')
print(f'Description: {entry.description}')
print(f'Link: {entry.link}')
print(f'Published: {entry.published}')
# print(f'Published Parsed: {entry.published_parsed}')
# print(f'ID: {entry.id}')
# Uncommon elements
# Enclosures
# print(f'Enclosures: {entry.enclosures}')
print(f'Source: {entry.uri}')
print(f'Type: {entry.type}')
print(f'Length: {entry.length}')
# Content
if content and entry.content is not None:
print(f'Contents: {entry.content}')
for content in entry.content:
con = parse_feeds.content_data(content)
entrycontent(con)
print(f'Comments: {entry.comments}')
def entrycontent(content: parse_feeds.CONTUP) -> None:
"""Print the data of entry.content."""
print('\n----- Content Info -----')
print(f'Content Base: {content.base}')
print(f'Content Type: {content.type}')
print(f'Content Value: {content.value}')
print(f'Content Base: {content.language}')
def fullprint(feeduri: str, limit: int = 3) -> None:
"""Print the data of the :feeduri feed, :limit limits the enties result."""
feed = fp.parse(feeduri)
print('Feed version: {}'.format(feed.get('version')))
feed_ = parse_feeds.feed_data(feed)
feedinfo(feed_)
for ent in feed_.entries[:limit]:
entry = parse_feeds.entry_data(ent)
iteminfo(entry)
def main() -> None:
"""Ask for a url and print the contents of the rss feed."""
url = input('Insert Feed URL: ')
# Something Something Sanitize the input
fullprint(url)
if __name__ == '__main__':
main()
| gpl-3.0 | -6,923,101,523,011,745,000 | 28.408602 | 79 | 0.623766 | false |
ckolumbus/ReTextWiki.donotuse | ReTextWiki/window.py | 1 | 11507 | # vim: noexpandtab:ts=4:sw=4
# This file is part of ReTextWiki
# Copyright: CKolumbus (Chris Drexler) 2014
# License: GNU GPL v2 or higher
import os
import markups
from subprocess import Popen, PIPE
from ReText import QtCore, QtPrintSupport, QtGui, QtWidgets, QtWebKitWidgets, \
icon_path, DOCTYPE_MARKDOWN, DOCTYPE_REST, app_name, app_version, globalSettings, \
settings, readListFromSettings, writeListToSettings, writeToSettings, \
datadirs, enchant, enchant_available
from ReText.webpages import wpInit, wpUpdateAll
from ReText.dialogs import HtmlDialog, LocaleDialog
from ReText.config import ConfigDialog
from ReText.highlighter import ReTextHighlighter
from ReText.editor import ReTextEdit
from mikidown.config import Setting
from mikidown.mikibook import Mikibook
from mikidown.config import __appname__, __version__
from mikidown.mikibook import NotebookListDialog
from mikidown.mikitree import MikiTree, TocTree
from mikidown.mikisearch import MikiSearch
from mikidown.attachment import AttachmentView
from mikidown.highlighter import MikiHighlighter
from mikidown.utils import LineEditDialog, ViewedNoteIcon, parseHeaders, parseTitle
from .functions import initTree
from .whooshif import Whoosh
from whoosh.qparser import QueryParser, RegexPlugin
from ReText.window import ReTextWindow
(Qt, QSize) = (QtCore.Qt, QtCore.QSize)
(QLineEdit, QSplitter, QMainWindow, QTabWidget, QTreeWidgetItemIterator) = (
QtWidgets.QLineEdit, QtWidgets.QSplitter, QtWidgets.QMainWindow, QtWidgets.QTabWidget,
QtWidgets.QTreeWidgetItemIterator)
(QWidget, QDockWidget, QVBoxLayout, QKeySequence) = (
QtGui.QTableWidget, QtGui.QDockWidget, QtGui.QVBoxLayout, QtGui.QKeySequence)
class ReTextWikiWindow(ReTextWindow):
def __init__(self, parent=None):
ReTextWindow.__init__(self, parent)
# Read notebookList, open the first notebook.
notebooks = Mikibook.read()
if len(notebooks) == 0:
Mikibook.create()
notebooks = Mikibook.read()
if len(notebooks) != 0:
settings = Setting(notebooks)
# Initialize application and main window.
self.settings = settings
self.notePath = settings.notePath
################ Setup core components ################
self.notesTree = MikiTree(self)
self.notesTree.setObjectName("notesTree")
initTree(self.notePath, self.notesTree)
self.notesTree.sortItems(0, Qt.AscendingOrder)
#self.viewedList = QToolBar(self.tr('Recently Viewed'), self)
#self.viewedList.setIconSize(QSize(16, 16))
#self.viewedList.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
#self.viewedListActions = []
self.noteSplitter = QSplitter(Qt.Horizontal)
self.dockIndex = QDockWidget("Index")
self.dockSearch = QDockWidget("Search")
self.searchEdit = QLineEdit()
self.searchView = MikiSearch(self)
self.searchTab = QWidget()
self.dockToc = QDockWidget("TOC")
self.tocTree = TocTree()
self.dockAttachment = QDockWidget("Attachment")
self.attachmentView = AttachmentView(self)
#<-- wiki init done
################ Setup search engine ################
self.whoosh = Whoosh(self.settings.indexdir, self.settings.schema)
self.whoosh.reindex(wikiPageIterator(self.notesTree))
self.actions = dict()
self.setupActions()
self.setupMainWindow()
def setupMainWindow(self):
#--> setup Wiki Window
searchLayout = QVBoxLayout()
searchLayout.addWidget(self.searchEdit)
searchLayout.addWidget(self.searchView)
self.searchTab.setLayout(searchLayout)
self.tocTree.header().close()
self.dockIndex.setObjectName("Index")
self.dockIndex.setWidget(self.notesTree)
self.dockSearch.setObjectName("Search")
self.dockSearch.setWidget(self.searchTab)
self.dockToc.setObjectName("TOC")
self.dockToc.setWidget(self.tocTree)
self.dockAttachment.setObjectName("Attachment")
self.dockAttachment.setWidget(self.attachmentView)
self.setDockOptions(QMainWindow.VerticalTabs)
self.addDockWidget(Qt.LeftDockWidgetArea, self.dockIndex)
self.addDockWidget(Qt.LeftDockWidgetArea, self.dockSearch)
self.addDockWidget(Qt.LeftDockWidgetArea, self.dockToc)
self.addDockWidget(Qt.LeftDockWidgetArea, self.dockAttachment)
self.tabifyDockWidget(self.dockIndex, self.dockSearch)
self.tabifyDockWidget(self.dockSearch, self.dockToc)
self.tabifyDockWidget(self.dockToc, self.dockAttachment)
self.setTabPosition(Qt.LeftDockWidgetArea, QTabWidget.North)
self.dockIndex.raise_() # Put dockIndex on top of the tab stack
self.notesTree.currentItemChanged.connect(
self.currentItemChangedWrapperWiki)
self.notesTree.itemDoubleClicked.connect(
self.loadItemWiki)
self.tabWidget.currentChanged.connect(self.changeIndexWiki)
self.tabWidget.tabCloseRequested.connect(self.closeTabWiki)
menubar = self.menuBar()
menuWiki = menubar.addMenu(self.tr('Wiki'))
menuWiki.addAction(self.actions['newPage'])
menuWiki.addAction(self.actions['newSubpage'])
menuWiki.addAction(self.actions['importPage'])
menuWiki.addAction(self.actions['openNotebook'])
menuWiki.addAction(self.actions['reIndex'])
menuWiki.addSeparator()
menuWiki.addAction(self.actions['renamePage'])
menuWiki.addAction(self.actions['delPage'])
menuWiki.addSeparator()
menuWiki.addAction(self.actions['insertImage'])
def setupActions(self):
# Global Actions
actTabIndex = self.act(self.tr('Switch to Index Tab'),
trig=lambda: self.raiseDock(self.dockIndex), shct='Ctrl+Shift+I')
actTabSearch = self.act(self.tr('Switch to Search Tab'),
trig=lambda: self.raiseDock(self.dockSearch), shct='Ctrl+Shift+F')
self.addAction(actTabIndex)
self.addAction(actTabSearch)
self.searchEdit.returnPressed.connect(self.searchNote)
################ Menu Actions ################
# actions in menuFile
actionNewPage = self.act(self.tr('&New Page...'),
trig=self.notesTree.newPage, shct=QKeySequence.New)
self.actions.update(newPage=actionNewPage)
actionNewSubpage = self.act(self.tr('New Sub&page...'),
trig=self.notesTree.newSubpage, shct='Ctrl+Shift+N')
self.actions.update(newSubpage=actionNewSubpage)
actionImportPage = self.act(self.tr('&Import Page...'), trig=self.importPage)
self.actions.update(importPage=actionImportPage)
actionOpenNotebook = self.act(self.tr('&Open Notebook...'),
trig=self.openNotebook)
self.actions.update(openNotebook=actionOpenNotebook)
actionReIndex = self.act(self.tr('Re-index'), trig=self.reIndex)
self.actions.update(reIndex=actionReIndex)
actionRenamePage = self.act(self.tr('&Rename Page...'),
trig=self.notesTree.renamePage, shct='F2')
self.actions.update(renamePage=actionRenamePage)
actionDelPage = self.act(self.tr('&Delete Page'),
trig=self.notesTree.delPageWrapper) #, QKeySequence.Delete)
self.actions.update(delPage=actionDelPage)
actionInsertImage = self.act(self.tr('&Insert Attachment'),
trig=self.insertAttachment, shct='Ctrl+I')
actionInsertImage.setEnabled(False)
self.actions.update(insertImage=actionInsertImage)
def searchNote(self):
""" Sorting criteria: "title > path > content"
Search matches are organized into html source.
"""
pattern = self.searchEdit.text()
if not pattern:
return
results = []
with self.whoosh.ix.searcher() as searcher:
matches = []
for f in ["title", "path", "content"]:
queryp = QueryParser(f, self.whoosh.ix.schema)
queryp.add_plugin(RegexPlugin())
# r"pattern" is the desired regex term format
query = queryp.parse('r"' + pattern + '"')
ms = searcher.search(query, limit=None) # default limit is 10!
for m in ms:
if not m in matches:
matches.append(m)
for r in matches:
title = r['title']
path = r['path']
term = r.highlights("content")
results.append([title, path, term])
html = """
<style>
body { font-size: 14px; }
.path { font-size: 12px; color: #009933; }
</style>
"""
for title, path, hi in results:
html += ("<p><a href='" + path + "'>" + title +
"</a><br/><span class='path'>" +
path + "</span><br/>" + hi + "</p>")
self.searchView.setHtml(html)
def changeIndexWiki(self):
pass
def closeTabWiki(self):
pass
def saveFileMain(self, dlg):
ReTextWindow.saveFileMain(self, dlg)
# TODO: add indexing code
def currentItemChangedWrapperWiki(self, current, previous):
if current is None:
return
#if previous != None and self.notesTree.pageExists(previous):
prev = self.notesTree.itemToPage(previous)
if self.notesTree.pageExists(prev):
#self.saveNote(previous)
pass
self.loadItemWiki(current)
def loadItemWiki(self, item):
currentFile = self.notesTree.itemToFile(item)
self.openFileWrapper(currentFile)
# Update attachmentView to show corresponding attachments.
attachmentdir = self.notesTree.itemToAttachmentDir(item)
self.attachmentView.model.setRootPath(attachmentdir)
#self.__logger.debug("currentItemChangedWrapper: %s", attachmentdir)
index = self.attachmentView.model.index(attachmentdir)
if index.row() == -1:
index = self.attachmentView.model.index(self.settings.attachmentPath)
#self.attachmentView.model.setFilter(QDir.Files)
#self.attachmentView.setModel(self.attachmentView.model)
self.attachmentView.setRootIndex(index)
def importPage(self):
pass
def openNotebook(self):
pass
def reIndex(self):
pass
def insertAttachment(self):
pass
def updateAttachmentView(self):
pass
class wikiPageIterator():
def __init__(self, mikitree):
self.mikiTree = mikitree
self.it = QTreeWidgetItemIterator(
self.mikiTree, QTreeWidgetItemIterator.All)
def __iter__(self):
return self
# python3 compatibility
def __next__(self):
return self.next()
def next(self):
while self.it.value():
treeItem = self.it.value()
name = self.mikiTree.itemToPage(treeItem)
path = os.path.join(self.mikiTree.pageToFile(name))
x = (path, name)
self.it += 1
return(x)
raise StopIteration | gpl-3.0 | 6,371,239,563,240,895,000 | 36.731148 | 98 | 0.63457 | false |
democratech/LaPrimaire | tools/simulation/simulation_lots.py | 1 | 2675 | import numpy as np
import mj
import matplotlib.pyplot as plt
from matplotlib import cm
import sys, os
# ---------------------------------------------------------------------------------------------------
# montrer que l'algorithme de construction des lots est fiable
#
Ncandidats = 100
electeurs = np.arange(10000,100000,10000)
Nlot = 10
root = "simulation/lots/"
def simulation(Nelecteurs, Nlot):
occurence = np.zeros(Ncandidats)
corr = np.zeros((Ncandidats,Ncandidats))
log_occr = root + "occr/Nc_%i-Ne_%i-Nl_%i.txt" % (Ncandidats, Nelecteurs, Nlot)
log_corr = root + "corr/Nc_%i-Ne_%i-Nl_%i.txt" % (Ncandidats, Nelecteurs, Nlot)
try:
os.makedirs(root + "occr")
os.makedirs(root + "corr")
except OSError:
pass
if os.path.isfile(log_occr) and os.path.isfile(log_corr):
occurence = np.genfromtxt(log_occr, delimiter=",")
corr = np.genfromtxt(log_corr, delimiter=",")
return [occurence,corr]
for i in range(Nelecteurs):
lot = mj.subset(Ncandidats, Nlot, occurence)
for j in lot:
corr[j,lot] += 1
np.savetxt(log_corr, corr, delimiter = ",", fmt="%i")
np.savetxt(log_occr, occurence, delimiter = ",", fmt="%i")
return [occurence, corr]
def plotOccurences(occurence):
width = 0.95
m = np.mean(occurence)
plt.bar(range(Ncandidats), occurence, width,color="#3a6d99", edgecolor='white')
plt.ylabel('Nombre d\'occurence')
plt.xlabel('Candidats')
plt.xlim([0,Ncandidats])
plt.plot([0, Ncandidats], [m,m], color="#d91d1c")
plt.show()
def plotRSMvsCandidats(occurences, electeurs):
RSM = []
for occr in occurences:
m = np.mean(occr)
std = np.std(occr)
RSM.append(std/m)
print RSM
plt.ylabel('Ratio deviation/moyenne')
plt.xlabel('Nombre d\'electeurs')
plt.xlim([0,max(electeurs)])
plt.plot(electeurs, RSM, color="#d91d1c")
plt.show()
def plotCorrelations(corr):
# mask = np.tri(corr.shape[0], k=-1)
# A = np.ma.array(corr, mask=mask)
plt.pcolor(corr)
plt.colorbar()
plt.ylabel('Candidats')
plt.xlabel('Candidats')
# plt.yticks(np.arange(0.5,10.5),range(0,10))
# plt.xticks(np.arange(0.5,10.5),range(0,10))
plt.show()
# ------
# plot 1
#[occr,corr] = simulation(100000, Nlot)
#plotOccurences(occr)
# ------
# plot 2
# occrs = []
# for e in electeurs:
# [occr,corr] = simulation(e, Nlot)
# occrs.append(occr)
# plotRSMvsCandidats(occrs, electeurs)
# ------
# plot 3
#
[occr,corr] = simulation(100000, Nlot)
plotCorrelations(corr) | agpl-3.0 | 1,603,931,362,166,058,000 | 28.406593 | 101 | 0.582804 | false |
aerler/WRF-Projects | src/archive/plotInnerPrecip.py | 1 | 6163 | '''
Created on 2012-09-29
A simple script that reads a WRF netcdf-4 file and displays a 2D field in a proper geographic projection;
application here is plotting precipitation in the inner WRF domain.
@author: Andre R. Erler
'''
## includes
# matplotlib config: size etc.
import numpy as np
import matplotlib.pylab as pyl
import matplotlib as mpl
mpl.rc('lines', linewidth=1.)
mpl.rc('font', size=10)
# pygeode stuff
from myDatasets.loadWRF import openWRF
from myPlots.plots import surfacePlot
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm, maskoceans
#from pygeode.plot import plot_v1 as pl
#from pygeode.plot import basemap as bm
## settings
nax = 2 # number of panels
ndom = 2
sf = dict(dpi=150) # print properties
folder = '/home/me/Research/Dynamical Downscaling/figures/' # figure directory
if __name__ == '__main__':
## read data
data = openWRF('ctrl-1',[1982],list(range(11,12)))
print(data[ndom-1])
## compute data
precip = []; ndays = []
for n in range(ndom):
nrec = data[n].time.values[-1]+1
ndays = data[n].xtime(time=nrec-1).get() /24/60 # xtime is in minutes, need days
dailyrain = data[n].rain(time=nrec-1).get() / ndays
# ndays = ( data[n].xtime(time=nrec-1).get() - data[n].xtime(time=0).get() )/24/60 # xtime is in minutes, need days
# dailyrain = ( data[n].rain(time=nrec-1).get() - data[n].rain(time=0).get() ) / ndays
precip.append(dailyrain.squeeze())
## setup projection
f = pyl.figure(facecolor='white', figsize = (6.25,4.25))
ax = []
for n in range(nax):
ax.append(f.add_subplot(1,2,n+1))
f.subplots_adjust(bottom=0.12, left=0.06, right=.97, top=.94, hspace=0.05, wspace=0.05) # hspace, wspace
# setup lambert conformal basemap.
# lat_1 is first standard parallel.
# lat_2 is second standard parallel (defaults to lat_1).
# lon_0,lat_0 is central point.
# rsphere=(6378137.00,6356752.3142) specifies WGS4 ellipsoid
# area_thresh=1000 means don't plot coastline features less
# than 1000 km^2 in area.
lcc = dict(projection='lcc', lat_0=59, lon_0=-123, lat_1=53, rsphere=(6378137.00,6356752.3142),#
width=310*10e3, height=315*10e3, area_thresh = 1000., resolution='l')
# map projection boundaries for inner WRF domain
map = []
for n in range(nax):
map.append(Basemap(ax=ax[n],**lcc)) # one map for each panel!!
## Plot data
grid = 10; res = 'l'
clevs = np.linspace(0,25,51)
norm = mpl.colors.Normalize(vmin=min(clevs),vmax=max(clevs),clip=True)
cmap = mpl.cm.gist_ncar #s3pcpn
cmap.set_over('purple'); cmap.set_under('blue')
# coordinates
lat = []; lon = []; x = []; y = []
for n in range(ndom):
lat.append(data[n].lat.get())
lon.append(data[n].lon.get())
xx, yy = map[0](lon[n],lat[n]) # convert to map-native coordinates
x.append(xx); y.append(yy)
# draw boundaries of inner and outer domains
bdy2 = np.ones_like(lat[1]); bdy2[0,:]=0; bdy2[-1,:]=0; bdy2[:,0]=0; bdy2[:,-1]=0
for n in range(nax):
# N.B.: bdy2 depends on inner domain coordinates x[1],y[1]
map[n].contour(x[1],y[1],bdy2,[0],ax=ax[n], colors='k') # draw boundary of inner domain
# # terrain data: mask out ocean
# zs = []
# for n in xrange(ndom):
# zs.append(maskoceans(lon[n],lat[n],data[n].zs.get(),resolution=res,grid=grid))
# draw data
cd = []
for n in range(nax): # only plot first domain in first panel
for m in range(n+1): # but also plot first domain in second panel (as background)
print('panel %i / domain %i'%(n,m))
print('precip: min %f / max %f / mean %f'%(precip[m].min(),precip[m].max(),precip[m].mean()))
cd.append(map[n].contourf(x[m],y[m],precip[m],clevs,ax=ax[n],cmap=cmap, norm=norm,extend='both'))
# add colorbar
cax = f.add_axes([0.1, 0.06, 0.8, 0.03])
for cn in cd: # [c1d1, c1d2, c2d2]:
cn.set_clim(vmin=min(clevs),vmax=max(clevs))
cbar = f.colorbar(cax=cax,mappable=cd[0],orientation='h',extend='both') # ,size='3%',pad='2%'
cbl = np.linspace(min(clevs),max(clevs),6)
cbar.set_ticks(cbl); cbar.set_ticklabels(['%02.1f mm'%(lev) for lev in cbl])
## Annotation
# add labels
f.suptitle('Average Daily Precipitation',fontsize=12)
ax[0].set_title('Outer Domain (30 km)',fontsize=11)
ax[1].set_title('Inner Domain (10 km)',fontsize=11)
# ax.set_xlabel('Longitude'); ax.set_ylabel('Latitude')
map[0].drawmapscale(-135, 49, -137, 57, 800, barstyle='fancy', yoffset=0.01*(map[n].ymax-map[n].ymin))
for n in range(nax):
if n == 0 or n == 1: Bottom = True
else: Bottom = False
if n == 0: Left = True
else: Left = False
# land/sea mask
map[n].drawlsmask(ocean_color='blue', land_color='green',resolution=res,grid=grid)
# add map stuff
map[n].drawcoastlines(linewidth=0.5)
map[n].drawcountries(linewidth=0.5)
# map[n].drawrivers(linewidth=0.5)
# map[n].fillcontinents(color = 'coral')
map[n].drawmapboundary(fill_color='k',linewidth=2)
# labels = [left,right,top,bottom]
map[n].drawparallels([45,65],linewidth=1, labels=[Left,False,False,False])
map[n].drawparallels([55,75],linewidth=0.5, labels=[Left,False,False,False])
map[n].drawmeridians([-140,-120,-100],linewidth=1, labels=[False,False,False,Bottom])
map[n].drawmeridians([-150,-130,-110],linewidth=0.5, labels=[False,False,False,Bottom])
# save figure to disk
f.savefig(folder+'AnnualPrecip.pdf', **sf) # save figure to pdf
print(('\nSaved figure in '+folder+'AnnualPrecip.pdf'))
# show plots
pyl.show()
## more projections
# setup lambert azimuthal equal area basemap.
# lat_ts is latitude of true scale.
# lon_0,lat_0 is central point.
# laea = dict(projection='laea', lat_0=57, lon_0=-137, lat_ts=53, resolution='l', #
# width=259*30e3, height=179*30e3, rsphere=(6378137.00,6356752.3142), area_thresh = 1000.)
# lon_0, lat_0 are the center point of the projection.
# resolution = 'l' means use low resolution coastlines.
# ortho = dict(projection='ortho', lat_0 = 57, lon_0 = -137, resolution = 'l', area_thresh = 1000.)
# 'parallels':[30,50,70], 'meridians':[-180,-150,-120,-90], 'labels':[1,0,0,1]}
| gpl-3.0 | 8,309,401,785,416,781,000 | 41.212329 | 122 | 0.651631 | false |
ktkirk/HSSI | grovekit/button.py | 1 | 1500 | # Author: Sarah Knepper <[email protected]>
# Copyright (c) 2014 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time
import pyupm_grove as grove
# Create the button object using GPIO pin 0
button = grove.GroveButton(0)
# Read the input and print, waiting one second between readings
while 1:
print("{} value is {}".format(button.name(), button.value()))
time.sleep(1)
# Delete the button object
del button
| bsd-2-clause | -7,523,010,755,728,816,000 | 40.666667 | 72 | 0.766 | false |
ralucagoja/pentagram | practica/practica/settings.py | 1 | 2699 | """
Django settings for practica project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k(p-s3iq!p3oj=70#pb3rh^dyz7w#t_(f)pvj1szs7e$7o_my7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'pentagram',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'practica.urls'
WSGI_APPLICATION = 'practica.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
)
}
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
LOGIN_REDIRECT_URL = "homepage"
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTIFICATION_CLASSES':(
'rest_framework.authentificatoin.TokenAuthentification',
)
} | gpl-3.0 | -5,448,509,871,541,174,000 | 23.324324 | 71 | 0.712116 | false |
lxlxlo/jokosher-devel | Jokosher/Globals.py | 1 | 27373 | #
# THIS FILE IS PART OF THE JOKOSHER PROJECT AND LICENSED UNDER THE GPL. SEE
# THE 'COPYING' FILE FOR DETAILS
#
# Globals.py
#
# This module contains variable definitions that can be used across the code
# base and also includes methods for reading and writing these settings to
# the Jokosher configuration in JOKOSHER_CONFIG_HOME/config.
#
#-------------------------------------------------------------------------------
import ConfigParser
import os
import locale, gettext
import pygtk
pygtk.require("2.0")
import gobject, gtk
import xdg.BaseDirectory
import shutil
import PlatformUtils
import gettext
_ = gettext.gettext
class Settings:
"""
Handles loading/saving settings from/to a file on disk.
"""
# the different settings in each config block
general = {
# increment each time there is an incompatible change with the config file
"version" : "1",
"recentprojects": "value",
"startupaction" : "value",
"projectfolder" : "",
"windowheight" : 550,
"windowwidth" : 900,
"addinstrumentwindowheight" : 350,
"addinstrumentwindowwidth" : 300,
"instrumenteffectwindowheight" : 450,
"instrumenteffectwindowwidth" : 650,
}
recording = {
"fileformat": "flacenc",
"file_extension": "flac",
"samplerate": "0", # zero means, autodetect sample rate (ie use any available)
"audiosrc" : "gconfaudiosrc",
"device" : "default"
}
# Overwrite with platform specific settings
recording.update( PlatformUtils.GetRecordingDefaults() )
playback = {
"devicename": "default",
"device": "default",
"audiosink":"autoaudiosink"
}
# Overwrite with platform specific settings
playback.update( PlatformUtils.GetPlaybackDefaults() )
extensions = {
"extensions_blacklist": ""
}
sections = {
"General" : general,
"Recording" : recording,
"Playback" : playback,
"Extensions" : extensions
}
#_____________________________________________________________________
def __init__(self):
self.filename = os.path.join(JOKOSHER_CONFIG_HOME, "config")
self.config = ConfigParser.ConfigParser()
self.read()
#_____________________________________________________________________
def read(self):
"""
Reads configuration settings from the config file and loads
then into the Settings dictionaries.
"""
self.config.read(self.filename)
for section in self.sections:
if not self.config.has_section(section):
self.config.add_section(section)
for section, section_dict in self.sections.iteritems():
for key, value in self.config.items(section):
if value == "None":
value = None
section_dict[key] = value
#_____________________________________________________________________
def write(self):
"""
Writes configuration settings to the Settings config file.
"""
for section, section_dict in self.sections.iteritems():
for key, value in section_dict.iteritems():
self.config.set(section, key, value)
file = open(self.filename, 'w')
self.config.write(file)
file.close()
#_____________________________________________________________________
def debug(*listToPrint):
"""
Global debug function to redirect all the debugging output from the other
methods.
Parameters:
*listToPrint -- list of elements to append to the debugging output.
"""
#HACK: we can't import gst at the top of Globals.py because
#if we do, gstreamer will get to the sys.args and print it's own
#message instead of ours. This will be fixed once we can use
#GOption when we depend on pygobject 2.12.
import gst
message = " ".join( [ str(x) for x in listToPrint ] )
if DEBUG_STDOUT:
print(message)
if DEBUG_GST:
gst.debug(message)
#_____________________________________________________________________
def FAT32SafeFilename(filename):
"""
Returns a copy fo the given string that has all the
characters that are not allowed in FAT32 path names
taken out.
Parameters:
filename -- the filename string.
"""
allowedChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789$%'`-@{}~!#()&_^ "
return "".join([x for x in filename if x in allowedChars])
#_____________________________________________________________________
#static list of all the Instrument files (to prevent having to reimport files).
instrumentPropertyList = []
_alreadyCached = False
_cacheGeneratorObject = None
def _cacheInstrumentsGenerator(alreadyLoadedTypes=[]):
"""
Yields a loaded Instrument everytime this method is called,
so that the gui isn't blocked while loading many Instruments.
If an Instrument's type is already in alreadyLoadedTypes,
it is considered a duplicate and it's not loaded.
Parameters:
alreadyLoadedTypes -- array containing the already loaded Instrument types.
Returns:
the loaded Instrument. *CHECK*
"""
try:
#getlocale() will usually return a tuple like: ('en_GB', 'UTF-8')
lang = locale.getlocale()[0]
except:
lang = None
for instr_path in INSTR_PATHS:
if not os.path.exists(instr_path):
continue
instrFiles = [x for x in os.listdir(instr_path) if x.endswith(".instr")]
for f in instrFiles:
config = ConfigParser.SafeConfigParser()
try:
config.read(os.path.join(instr_path, f))
except ConfigParser.MissingSectionHeaderError,e:
debug("Instrument file %s in %s is corrupt or invalid, not loading"%(f,instr_path))
continue
if config.has_option('core', 'type') and config.has_option('core', 'icon'):
icon = config.get('core', 'icon')
type = config.get('core', 'type')
else:
continue
#don't load duplicate instruments
if type in alreadyLoadedTypes:
continue
if lang and config.has_option('i18n', lang):
name = config.get('i18n', lang)
elif lang and config.has_option('i18n', lang.split("_")[0]):
#in case lang was 'de_DE', use only 'de'
name = config.get('i18n', lang.split("_")[0])
elif config.has_option('i18n', 'en'):
#fall back on english (or a PO translation, if there is any)
name = _(config.get( 'i18n', 'en'))
else:
continue
name = unicode(name, "UTF-8")
pixbufPath = os.path.join(instr_path, "images", icon)
pixbuf = gtk.gdk.pixbuf_new_from_file(pixbufPath)
# add instrument to defaults list if it's a defaults
if instr_path == INSTR_PATHS[0]:
DEFAULT_INSTRUMENTS.append(type)
yield (name, type, pixbuf, pixbufPath)
#_____________________________________________________________________
def getCachedInstruments(checkForNew=False):
"""
Creates the Instrument cache if it hasn't been created already and
return it.
Parameters:
checkForNew -- True = scan the Instrument folders for new_dir.
False = don't scan for new Instruments.
Returns:
a list with the Instruments cached in memory.
"""
global instrumentPropertyList, _alreadyCached
if _alreadyCached and not checkForNew:
return instrumentPropertyList
else:
_alreadyCached = True
listOfTypes = [x[1] for x in instrumentPropertyList]
try:
newlyCached = list(_cacheInstrumentsGenerator(listOfTypes))
#extend the list so we don't overwrite the already cached instruments
instrumentPropertyList.extend(newlyCached)
except StopIteration:
pass
#sort the instruments alphabetically
#using the lowercase of the name (at index 0)
instrumentPropertyList.sort(key=lambda x: x[0].lower())
return instrumentPropertyList
#_____________________________________________________________________
def getCachedInstrumentPixbuf(get_type):
for (name, type, pixbuf, pixbufPath) in getCachedInstruments():
if type == get_type:
return pixbuf
return None
#_____________________________________________________________________
def idleCacheInstruments():
"""
Loads the Instruments 'lazily' to avoid blocking the GUI.
Returns:
True -- keep calling itself to load more Instruments.
False -- stop calling itself and sort Instruments alphabetically.
"""
global instrumentPropertyList, _alreadyCached, _cacheGeneratorObject
if _alreadyCached:
#Stop idle_add from calling us again
return False
#create the generator if it hasnt been already
if not _cacheGeneratorObject:
_cacheGeneratorObject = _cacheInstrumentsGenerator()
try:
instrumentPropertyList.append(_cacheGeneratorObject.next())
#Make sure idle add calls us again
return True
except StopIteration:
_alreadyCached = True
#sort the instruments alphabetically
#using the lowercase of the name (at index 0)
instrumentPropertyList.sort(key=lambda x: x[0].lower())
#Stop idle_add from calling us again
return False
#_____________________________________________________________________
def PopulateEncoders():
"""
Check if the hardcoded list of encoders is available on the system.
"""
for type in _export_formats:
if VerifyAllElements(type[2]):
#create a dictionary using _export_template as the keys
#and the current item from _export_formats as the values.
d = dict(zip(_export_template, type))
EXPORT_FORMATS.append(d)
#_____________________________________________________________________
def VerifyAllElements(bin_desc):
#HACK: we can't import gst at the top of Globals.py because
#if we do, gstreamer will get to the sys.args and print it's own
#message instead of ours. This will be fixed once we can use
#GOption when we depend on pygobject 2.12.
import gst
all_elements_exist = True
for element in bin_desc.split("!"):
exists = gst.default_registry_check_feature_version(element.strip(), 0, 10, 0)
if not exists:
all_elements_exist = False
debug('Cannot find "%s" plugin, disabling: "%s"' % (element.strip(), bin_desc))
# we know at least one of the elements doesnt exist, so skip this encode format.
break
return all_elements_exist
#_____________________________________________________________________
def PopulateAudioBackends():
CheckBackendList(PLAYBACK_BACKENDS)
CheckBackendList(CAPTURE_BACKENDS)
#_____________________________________________________________________
def CheckBackendList(backend_list):
remove_list = []
for tuple_ in backend_list:
bin_desc = tuple_[1]
if not VerifyAllElements(bin_desc):
remove_list.append(tuple_)
for tuple_ in remove_list:
backend_list.remove(tuple_)
#_____________________________________________________________________
def CopyAllFiles(src_dir, dest_dir, only_these_files=None):
""" Copies all the files, but only the files from one directory to another."""
for file in os.listdir(src_dir):
if only_these_files is not None and file not in only_these_files:
continue
src_path = os.path.join(src_dir, file)
dest_path = os.path.join(dest_dir, file)
if os.path.isfile(src_path):
try:
shutil.copy2(src_path, dest_path)
except IOError:
print "Unable to copy from old ~/.jokosher directory:", src_path
#_____________________________________________________________________
"""
Used for launching the correct help file:
True -- Jokosher's running locally by the user. Use the help file from
the help subdirectory.
False -- Jokosher has been installed system-wide. Use yelp's automatic
help file selection.
"""
USE_LOCAL_HELP = False
"""
Global paths, so all methods can access them.
If JOKOSHER_DATA_PATH is not set, that is, Jokosher is running locally,
use paths relative to the current running directory instead of /usr ones.
"""
XDG_RESOURCE_NAME = "jokosher"
JOKOSHER_CONFIG_HOME = xdg.BaseDirectory.save_config_path(XDG_RESOURCE_NAME)
JOKOSHER_DATA_HOME = xdg.BaseDirectory.save_data_path(XDG_RESOURCE_NAME)
data_path = os.getenv("JOKOSHER_DATA_PATH")
if data_path:
INSTR_PATHS = (os.path.join(data_path, "Instruments"), os.path.join(JOKOSHER_DATA_HOME, "instruments"))
EXTENSION_PATHS = (os.path.join(data_path, "extensions"), os.path.join(JOKOSHER_DATA_HOME, "extensions"))
GLADE_PATH = os.path.join(data_path, "Jokosher.glade")
else:
data_path = os.path.dirname(os.path.abspath(__file__))
INSTR_PATHS = (os.path.join(data_path, "..", "Instruments"), os.path.join(JOKOSHER_DATA_HOME, "instruments"))
EXTENSION_PATHS = (os.path.join(data_path, "..", "extensions"), os.path.join(JOKOSHER_DATA_HOME, "extensions"))
GLADE_PATH = os.path.join(data_path, "Jokosher.glade")
LOCALE_PATH = os.path.join(data_path, "..", "locale")
# create a couple dirs to avoid having problems creating a non-existing
# directory inside another non-existing directory
create_dirs = [
'extensions',
'instruments',
('instruments', 'images'),
'presets',
('presets', 'effects'),
('presets', 'mixdown'),
'mixdownprofiles',
'templates',
]
# do a listing before we create the dirs so we know if it was empty (ie first run)
jokosher_dir_empty = (len(os.listdir(JOKOSHER_DATA_HOME)) == 0)
_HOME_DOT_JOKOSHER = os.path.expanduser("~/.jokosher")
if jokosher_dir_empty and os.path.isdir(_HOME_DOT_JOKOSHER):
# Copying old config file from ~/.jokosher.
CopyAllFiles(_HOME_DOT_JOKOSHER, JOKOSHER_CONFIG_HOME, ["config"])
for dirs in create_dirs:
if isinstance(dirs, str):
new_dir = os.path.join(JOKOSHER_DATA_HOME, dirs)
old_dir = os.path.join(_HOME_DOT_JOKOSHER, dirs)
else:
new_dir = os.path.join(JOKOSHER_DATA_HOME, *dirs)
old_dir = os.path.join(_HOME_DOT_JOKOSHER, *dirs)
if not os.path.isdir(new_dir):
try:
os.makedirs(new_dir)
except:
raise "Failed to create user config directory %s" % new_dir
if jokosher_dir_empty and os.path.isdir(old_dir) and os.path.isdir(new_dir):
CopyAllFiles(old_dir, new_dir)
#TODO: make this a list with the system path and home directory path
EFFECT_PRESETS_PATH = os.path.join(JOKOSHER_DATA_HOME, "presets", "effects")
TEMPLATES_PATH = os.path.join(JOKOSHER_DATA_HOME, "templates")
MIXDOWN_PROFILES_PATH = os.path.join(JOKOSHER_DATA_HOME, "mixdownprofiles")
IMAGE_PATH = os.getenv("JOKOSHER_IMAGE_PATH")
if not IMAGE_PATH:
IMAGE_PATH = os.path.join(data_path, "..", "images")
LOCALE_PATH = os.getenv("JOKOSHER_LOCALE_PATH")
if not LOCALE_PATH:
LOCALE_PATH = os.path.join(data_path, "..", "locale")
HELP_PATH = os.getenv("JOKOSHER_HELP_PATH")
if not HELP_PATH:
USE_LOCAL_HELP = True
# change the local help file to match the current locale
current_locale = "C"
if locale.getlocale()[0] and not locale.getlocale()[0].startswith("en", 0, 2):
current_locale = locale.getlocale()[0][:2]
HELP_PATH = os.path.join(data_path, "..", "help/jokosher",
current_locale, "jokosher.xml")
# use C (en) as the default help fallback
if not os.path.exists(HELP_PATH):
HELP_PATH = os.path.join(data_path, "..", "help/jokosher/C/jokosher.xml")
# add your own extension dirs with envar JOKOSHER_EXTENSION_DIRS, colon-separated
__extra_ext_dirs = os.environ.get('JOKOSHER_EXTENSION_DIRS','')
if __extra_ext_dirs:
EXTENSION_PATHS = __extra_ext_dirs.split(':') + list(EXTENSION_PATHS)
""" ExtensionManager data """
AVAILABLE_EXTENSIONS = []
""" Locale constant """
LOCALE_APP = "jokosher"
""" Categories enum """
class Categories:
(broken, unclassified, amplifiers, chorus, compressors,
delays, distortions, equalizers, filters, flangers,
miscellaneous, modulators, oscillators, phasers, reverbs,
simulators) = range(16)
""" Set in Project.py """
VERSION = None
EFFECT_PRESETS_VERSION = None
LADSPA_FACTORY_REGISTRY = None
LADSPA_NAME_MAP = []
LADPSA_CATEGORIES_LIST = [
(_("Broken"), "effect_broken.png"),
(_("Unclassified"), "effect_unclassified.png"),
(_("Amplifiers"), "effect_amplifiers.png"),
(_("Chorus"), "effect_chorus.png"),
(_("Compressors"), "effect_compressors.png"),
(_("Delays"), "effect_delays.png"),
(_("Distortions"), "effect_distortion.png"),
(_("Equalizers"), "effect_equalizers.png"),
(_("Filters"), "effect_filters.png"),
(_("Flangers"), "effect_flangers.png"),
(_("Miscellaneous"), "effect_miscellaneous.png"),
(_("Modulators"), "effect_modulators.png"),
(_("Oscillators"), "effect_oscillators.png"),
(_("Phasers"), "effect_phasers.png"),
(_("Reverbs"), "effect_reverbs.png"),
(_("Simulators"), "effect_simulators.png")
]
LADSPA_CATEGORIES_DICT = {
"ladspa-SweepVFII" : Categories.modulators,
"ladspa-SweepVFI" : Categories.modulators,
"ladspa-PhaserII" : Categories.phasers,
"ladspa-PhaserI" : Categories.phasers,
"ladspa-ChorusII" : Categories.chorus,
"ladspa-ChorusI" : Categories.chorus,
"ladspa-Clip" : Categories.amplifiers,
"ladspa-CabinetII" : Categories.simulators,
"ladspa-CabinetI" : Categories.simulators,
"ladspa-AmpV" : Categories.simulators,
"ladspa-AmpIV" : Categories.simulators,
"ladspa-AmpIII" : Categories.simulators,
"ladspa-PreampIV" : Categories.simulators,
"ladspa-PreampIII" : Categories.simulators,
"ladspa-Compress" : Categories.compressors,
"ladspa-Eq" : Categories.equalizers,
"ladspa-ssm-masher" : Categories.broken, #no sound
"ladspa-slew-limiter-rc" : Categories.broken, #no sound
"ladspa-slide-tc" : Categories.broken, #chirps then dies
"ladspa-signal-abs-cr" : Categories.modulators,
"ladspa-vcf-hshelf" : Categories.broken, #erratic behavior.
"ladspa-vcf-lshelf" : Categories.broken, #erratic behavior
"ladspa-vcf-peakeq" : Categories.filters,
"ladspa-vcf-notch" : Categories.filters,
"ladspa-vcf-bp2" : Categories.filters,
"ladspa-vcf-bp1" : Categories.broken, #no sound
"ladspa-vcf-hp" : Categories.filters,
"ladspa-vcf-lp" : Categories.filters,
"ladspa-vcf-reslp" : Categories.filters,
"ladspa-range-trans-cr" : Categories.amplifiers, #works, but the settings are impossible to use properly
"ladspa-hz-voct-ar" : Categories.broken, #no sound
"ladspa-Phaser1+LFO" : Categories.phasers,
"ladspa-Chorus2" : Categories.chorus, #so so
"ladspa-Chorus1" : Categories.chorus, # so so
"ladspa-tap-vibrato" : Categories.modulators,
"ladspa-tap-tubewarmth" : Categories.filters,
"ladspa-tap-tremolo" : Categories.modulators,
"ladspa-tap-sigmoid" : Categories.amplifiers,
"ladspa-tap-reflector" : Categories.modulators,
"ladspa-tap-pitch" : Categories.modulators,
"ladspa-tap-pinknoise" : Categories.miscellaneous,
"ladspa-tap-limiter" : Categories.amplifiers,
"ladspa-tap-equalizer-bw" : Categories.equalizers,
"ladspa-tap-equalizer" : Categories.equalizers,
"ladspa-formant-vc" : Categories.modulators,
"ladspa-tap-deesser" : Categories.filters,
"ladspa-tap-dynamics-m" : Categories.filters, #could be in another category
"ladspa-imp" : Categories.filters,
"ladspa-pitchScaleHQ" : Categories.modulators, #crap
"ladspa-mbeq" : Categories.equalizers,
"ladspa-sc4m" : Categories.filters, #could be in another category
"ladspa-artificialLatency" : Categories.miscellaneous,
"ladspa-pitchScale" : Categories.modulators, #crap
"ladspa-pointerCastDistortion" : Categories.distortions, #crap
"ladspa-const" : Categories.distortions, #could be in another category
"ladspa-lsFilter" : Categories.filters,
"ladspa-revdelay" : Categories.delays,
"ladspa-delay-c" : Categories.broken, #erratic behavior
"ladspa-delay-l" : Categories.broken, #no change in sound?
"ladspa-delay-n" : Categories.broken, #no change in sound?
"ladspa-decay" : Categories.distortions, #controls make it unusable
"ladspa-comb-c" : Categories.broken, #erratic behavior
"ladspa-comb-l" : Categories.broken, #no change in sound?
"ladspa-comb-n" : Categories.broken, #no change in sound and static
"ladspa-allpass-c" : Categories.broken, #no change in sound?
"ladspa-allpass-l" : Categories.broken, #no change in sound?
"ladspa-allpass-n" : Categories.broken, #no change in sound?
"ladspa-butthigh-iir" : Categories.filters,
"ladspa-buttlow-iir" : Categories.filters,
"ladspa-dj-eq-mono" : Categories.equalizers,
"ladspa-notch-iir" : Categories.filters,
"ladspa-lowpass-iir" : Categories.filters,
"ladspa-highpass-iir" : Categories.filters,
"ladspa-bandpass-iir" : Categories.filters,
"ladspa-bandpass-a-iir" : Categories.filters,
"ladspa-gongBeater" : Categories.modulators, #crap
"ladspa-djFlanger" : Categories.flangers,
"ladspa-giantFlange" : Categories.flangers,
"ladspa-amPitchshift" : Categories.modulators,
"ladspa-chebstortion" : Categories.distortions, #weak
"ladspa-inv" : Categories.broken, #no change in sound, no options either
"ladspa-zm1" : Categories.broken, #no change in sound, no options either
"ladspa-sc1" : Categories.compressors, #could be in another category
"ladspa-gong" : Categories.filters,
"ladspa-freqTracker" : Categories.broken, #no sound
"ladspa-rateShifter" : Categories.filters,
"ladspa-fmOsc" : Categories.broken, #erratic behavior
"ladspa-smoothDecimate" : Categories.filters,
"ladspa-hardLimiter" : Categories.amplifiers,
"ladspa-gate" : Categories.filters, #could be in another category
"ladspa-satanMaximiser" : Categories.distortions,
"ladspa-alias" : Categories.filters, #could be in another category
"ladspa-valveRect" : Categories.filters,
"ladspa-crossoverDist" : Categories.distortions, #crap
"ladspa-dysonCompress" : Categories.compressors,
"ladspa-delayorama" : Categories.delays,
"ladspa-autoPhaser" : Categories.phasers,
"ladspa-fourByFourPole" : Categories.filters,
"ladspa-lfoPhaser" : Categories.phasers,
"ladspa-gsm" : Categories.modulators,
"ladspa-svf" : Categories.filters,
"ladspa-foldover" : Categories.distortions,
"ladspa-harmonicGen" : Categories.modulators, #crap
"ladspa-sifter" : Categories.modulators, #sounds like Distortion
"ladspa-valve" : Categories.distortions, #weak
"ladspa-tapeDelay" : Categories.delays,
"ladspa-dcRemove" : Categories.broken, #no change in sound, no options either
"ladspa-fadDelay" : Categories.delays, #psychedelic stuff
"ladspa-transient" : Categories.modulators,
"ladspa-triplePara" : Categories.filters,
"ladspa-singlePara" : Categories.filters,
"ladspa-retroFlange" : Categories.flangers,
"ladspa-flanger" : Categories.flangers,
"ladspa-decimator" : Categories.filters,
"ladspa-hermesFilter" : Categories.filters, #control needs to have 2 columns, doesn't fit screen
"ladspa-multivoiceChorus" : Categories.chorus,
"ladspa-foverdrive" : Categories.distortions,
"ladspa-declip" : Categories.filters, #couldn't properly test it since I had no clipping audio
"ladspa-comb" : Categories.filters,
"ladspa-ringmod-1i1o1l" : Categories.modulators,
"ladspa-shaper" : Categories.filters,
"ladspa-divider" : Categories.filters,
"ladspa-diode" : Categories.distortions,
"ladspa-amp" : Categories.amplifiers,
"ladspa-Parametric1" : Categories.filters,
"ladspa-wshape-sine" : Categories.broken, #no change in sound?
"ladspa-vcf303" : Categories.filters,
"ladspa-limit-rms" : Categories.broken, #controls make it unusable
"ladspa-limit-peak" : Categories.broken, #controls make it unusable
"ladspa-expand-rms" : Categories.broken, #controls make it unusable
"ladspa-expand-peak" : Categories.broken, #controls make it unusable
"ladspa-compress-rms" : Categories.broken, #controls make it unusable
"ladspa-compress-peak" : Categories.broken, #controls make it unusable
"ladspa-identity-audio" : Categories.broken, #no change in sound?
"ladspa-hard-gate" : Categories.filters,
"ladspa-grain-scatter" : Categories.broken, #no sound
"ladspa-fbdelay-60s" : Categories.delays,
"ladspa-fbdelay-5s" : Categories.delays,
"ladspa-fbdelay-1s" : Categories.delays,
"ladspa-fbdelay-0-1s" : Categories.delays,
"ladspa-fbdelay-0-01s" : Categories.delays,
"ladspa-delay-60s" : Categories.delays,
"ladspa-delay-1s" : Categories.delays,
"ladspa-delay-0-1s" : Categories.delays,
"ladspa-delay-0-01s" : Categories.delays,
"ladspa-disintegrator" : Categories.filters, #crap
"ladspa-triangle-fcsa-oa" : Categories.oscillators,
"ladspa-triangle-fasc-oa" : Categories.broken, #no sound
"ladspa-syncsquare-fcga-oa" : Categories.oscillators,
"ladspa-syncpulse-fcpcga-oa" : Categories.oscillators,
"ladspa-sum-iaic-oa" : Categories.filters,
"ladspa-square-fa-oa" : Categories.oscillators,
"ladspa-sinusWavewrapper" : Categories.filters,
"ladspa-ratio-ncda-oa" : Categories.distortions,
"ladspa-ratio-nadc-oa" : Categories.broken, #no sound
"ladspa-random-fcsa-oa" : Categories.oscillators, #we GOTTA call this Atari or Arcade. It's the same sound!
"ladspa-random-fasc-oa" : Categories.broken, #no sound
"ladspa-sawtooth-fa-oa" : Categories.oscillators,
"ladspa-pulse-fcpa-oa" : Categories.oscillators,
"ladspa-pulse-fapc-oa" : Categories.oscillators,
"ladspa-product-iaic-oa" : Categories.oscillators,
"ladspa-lp4pole-fcrcia-oa" : Categories.filters,
"ladspa-fmod-fcma-oa" : Categories.filters,
"ladspa-fmod-famc-oa" : Categories.broken, #controls make it unusable
"ladspa-amp-gcia-oa" : Categories.broken, #controls make it unusable
"ladspa-difference-icma-oa" : Categories.amplifiers,
"ladspa-difference-iamc-oa" : Categories.broken, #no sound
"ladspa-sine-fcaa" : Categories.oscillators,
"ladspa-sine-faac" : Categories.broken, #no sound
"ladspa-hpf" : Categories.filters,
"ladspa-lpf" : Categories.filters,
"ladspa-adsr" : Categories.broken, #controls make it unusable, no sound
"ladspa-amp-mono" : Categories.amplifiers,
"ladspa-delay-5s" : Categories.delays
}
DEBUG_STDOUT, DEBUG_GST = (False, False)
_export_template = ("description", "extension", "pipeline")
_export_formats = [
("Ogg Vorbis", "ogg", "vorbisenc ! oggmux"),
("MP3", "mp3", "lame"),
("Flac", "flac", "flacenc"),
("WAV", "wav", "wavenc"),
]
EXPORT_FORMATS = []
SAMPLE_RATES = [8000, 11025, 22050, 32000, 44100, 48000, 96000, 192000]
PLAYBACK_BACKENDS = [
(_("Autodetect"), "autoaudiosink"),
(_("Use GNOME Settings"), "gconfaudiosink"),
("ALSA", "alsasink"),
("OSS", "osssink"),
("JACK", "jackaudiosink"),
("PulseAudio", "pulsesink"),
("Direct Sound", "directsoundsink"),
("Core Audio", "osxaudiosink")
]
CAPTURE_BACKENDS = [
(_("GNOME Settings"), "gconfaudiosrc"),
("ALSA", "alsasrc"),
("OSS", "osssrc"),
("JACK", "jackaudiosrc"),
("PulseAudio", "pulsesrc"),
("Direct Sound", "dshowaudiosrc"),
("Core Audio", "osxaudiosrc")
]
""" Default Instruments """
DEFAULT_INSTRUMENTS = []
""" init Settings """
settings = Settings()
""" Cache Instruments """
gobject.idle_add(idleCacheInstruments)
gobject.set_application_name(_("Jokosher Audio Editor"))
gobject.set_prgname(LOCALE_APP)
gtk.window_set_default_icon_name("jokosher")
# environment variable for pulseaudio type
os.environ["PULSE_PROP_media.role"] = "production"
# I have decided that Globals.py is a boring source file. So, here is a little
# joke. What does the tax office and a pelican have in common? They can both stick
# their bills up their arses. Har har har.
| gpl-2.0 | 3,587,878,030,044,677,600 | 36.394809 | 113 | 0.666021 | false |
tjwalch/python-payson | payson_api.py | 1 | 15177 | # -*- coding: utf-8 -*-
"""Python API for Payson paymnents provider
Copyright (c) 2012 Tomas Walch
MIT-License, see LICENSE for details
"""
import datetime
import decimal
import logging
import json
import urllib
import urllib2
import urlparse
PAYSON_API_ENDPOINT = "https://api.payson.se"
PAYSON_TEST_API_ENDPOINT = "https://test-api.payson.se"
PAYSON_API_VERSION = "1.0"
PAYSON_API_PAY_ACTION = "Pay/"
PAYSON_API_PAYMENT_DETAILS_ACTION = "PaymentDetails/"
PAYSON_API_PAYMENT_UPDATE_ACTION = "PaymentUpdate/"
PAYSON_API_VALIDATE_ACTION = "Validate/"
PAYSON_WWW_PAY_FORWARD_URL = 'https://www.payson.se/paysecure/?token=%s'
PAYSON_WWW_PAY_FORWARD_TEST_URL = \
'https://test-www.payson.se/paysecure/?token=%s'
PAYSON_TEST_AGENT_ID = ('1', '4')
PAYSON_TEST_AGENT_KEY = ('fddb19ac-7470-42b6-a91d-072cb1495f0a',
'2acab30d-fe50-426f-90d7-8c60a7eb31d4')
log = logging.getLogger('Payson API')
class PaysonApi():
def __init__(self, user_id, user_key):
"""Constructor
:param user_id: Agent ID obtained from Payson
:type user_id: str
:param user_key: Password (MD5 Key) obtained from Payson
:type user_key: str
"""
if (user_id in PAYSON_TEST_AGENT_ID and
user_key in PAYSON_TEST_AGENT_KEY):
endpoint = PAYSON_TEST_API_ENDPOINT
self.forward_pay_url = PAYSON_WWW_PAY_FORWARD_TEST_URL
else:
endpoint = PAYSON_API_ENDPOINT
self.forward_pay_url = PAYSON_WWW_PAY_FORWARD_URL
self.user_id = user_id
self.user_key = user_key
def mkcmd(cmd):
return '/'.join((endpoint, PAYSON_API_VERSION, cmd))
self.pay_cmd = mkcmd(PAYSON_API_PAY_ACTION)
self.get_payment_details_cmd = mkcmd(PAYSON_API_PAYMENT_DETAILS_ACTION)
self.update_payment_details_cmd = \
mkcmd(PAYSON_API_PAYMENT_UPDATE_ACTION)
self.validate_ipn_cmd = mkcmd(PAYSON_API_VALIDATE_ACTION)
self.send_ipn_cmd = mkcmd('SendIPN/')
def pay(self,
returnUrl,
cancelUrl,
memo,
senderEmail,
senderFirstName,
senderLastName,
receiverList,
ipnNotificationUrl=None,
localeCode=None,
currencyCode=None,
fundingList=tuple(),
feesPayer=None,
invoiceFee=None,
custom=None,
trackingId=None,
guaranteeOffered=None,
orderItemList=tuple(),
showReceiptPage=True):
"""The starting point for any kind of payment.
For a longer description, including possible parameter values and
constraints, see https://api.payson.se/#Initializepayment
:type returnUrl: unicode
:type cancelUrl: unicode
:type memo: unicode
:type senderEmail: unicode
:type senderFirstName: unicode
:type senderLastName: unicode
:type receiverList: iterable of Receiver instances
:type ipnNotificationUrl: unicode
:type localeCode: unicode
:type currencyCode: unicode
:type fundingList: iterable with unicode instances
:type feesPayer: unicode
:type invoiceFee: decimal.Decimal
:type custom: any json serializable Python object
:type trackingId: unicode or int
:type guaranteeOffered: unicode
:type orderItemList: iterable of OrderItem instances
:type showReceiptPage: bool
:rtype: PayResponse
"""
pay_request = {'returnUrl': returnUrl,
'cancelUrl': cancelUrl,
'memo': memo.encode('utf-8'),
'senderEmail': senderEmail.encode('utf-8'),
'senderFirstName': senderFirstName.encode('utf-8'),
'senderLastName': senderLastName.encode('utf-8')}
for i, v in enumerate(receiverList):
k = 'receiverList.receiver(%d).%s'
pay_request[k % (i, 'email')] = v.email.encode('utf-8')
pay_request[k % (i, 'amount')] = str(v.amount)
if v.primary is not None:
pay_request[k % (i, 'primary')] = json.dumps(v.primary)
if v.firstName:
pay_request[k % (i, 'firstName')] = v.firstName.encode('utf-8')
if v.lastName:
pay_request[k % (i, 'lastName')] = v.lastName.encode('utf-8')
if ipnNotificationUrl:
pay_request['ipnNotificationUrl'] = ipnNotificationUrl
if localeCode:
pay_request['localeCode'] = localeCode
if currencyCode:
pay_request['currencyCode'] = currencyCode
for i, v in enumerate(fundingList):
pay_request['fundingList.fundingConstraint'
'(%d).constraint' % i] = v
if feesPayer:
pay_request['feesPayer'] = feesPayer
if invoiceFee is not None:
pay_request['invoiceFee'] = str(invoiceFee)
if custom is not None:
pay_request['custom'] = json.dumps(custom)
if trackingId is not None:
pay_request['trackingId'] = trackingId.encode('utf-8')
if guaranteeOffered:
pay_request['guaranteeOffered'] = guaranteeOffered
for i, v in enumerate(orderItemList):
k = 'orderItemList.orderItem(%d).%s'
pay_request[k % (i, 'description')] = v.description.encode('utf-8')
pay_request[k % (i, 'sku')] = str(v.sku)
pay_request[k % (i, 'quantity')] = str(v.quantity)
pay_request[k % (i, 'unitPrice')] = str(v.unitPrice)
pay_request[k % (i, 'taxPercentage')] = str(v.taxPercentage)
if showReceiptPage is False:
pay_request['showReceiptPage'] = json.dumps(showReceiptPage)
response_dict = self._do_request(self.pay_cmd, pay_request)
pay_response = PayResponse(self.forward_pay_url, response_dict)
log.info('PAYSON: %s response: %r' % (self.pay_cmd, response_dict))
return pay_response
def payment_details(self, token):
"""Get details about an existing payment.
For a longer description, including possible parameter values, see
https://api.payson.se/#PaymentDetailsrequest
:type token: unicode
:rtype: PaymentDetailsResponse
"""
response_dict = self._do_request(
self.get_payment_details_cmd,
{'token': token})
payment_details_response = PaymentDetailsResponse(response_dict)
log.info('PAYSON: %s response: %r' % (self.get_payment_details_cmd,
response_dict))
return payment_details_response
def payment_update(self, token, action):
"""Update an existing payment, for instance mark an order as shipped or canceled.
For a longer description, including possible parameter values, see
https://api.payson.se/#PaymentUpdaterequest
:type token: unicode
:type action: unicode
:rtype: ResponseEnvelope
"""
response_dict = self._do_request(
self.update_payment_details_cmd,
{'token': token,
'action': action})
response = ResponseEnvelope(response_dict)
log.info('PAYSON: %s response: %r' % (self.update_payment_details_cmd,
response_dict))
return response.ack == 'SUCCESS'
def validate(self, message):
"""This method is used to validate the content of the IPN message that was sent to your ipnNotificationUrl.
For a longer description, including possible parameter values, see
https://api.payson.se/#Validaterequest
:param message: complete unaltered query string from the IPN request
:type message: str
:returns: True if IPN is verified, otherwise False
:rtype: bool
"""
response = self._send_request(self.validate_ipn_cmd, message)
log.info('PAYSON: %s response: %r' % (self.validate_ipn_cmd,
response))
if response == 'VERIFIED':
return True
elif response == 'INVALID':
return False
else:
raise ValueError('Invalid response for IPN validation.')
def _do_request(self, cmd, data):
query = urllib.urlencode(data)
response_body = self._send_request(cmd, query)
data = urlparse.parse_qs(response_body)
return {k: v[0] for k, v in data.items()}
def _send_request(self, cmd, query):
request = urllib2.Request(cmd, query)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('PAYSON-SECURITY-USERID', self.user_id)
request.add_header('PAYSON-SECURITY-PASSWORD', self.user_key)
log.info('PAYSON: Calling %s with %r' % (cmd, query))
try:
response = urllib2.urlopen(request)
except urllib2.URLError, e:
log.error('Exception when calling {0}: {1}'.format(cmd, e))
raise
return response.read()
class OrderItem(object):
"""Holds Order Item values used in pay operation.
"""
def __init__(self,
description,
sku,
quantity,
unitPrice,
taxPercentage):
"""Constructor.
Payson API documentation states that some of these values are optional,
this is incorrect, all must be provided.
For possible parameter values and constraints see
https://api.payson.se/#Initializepayment
:param description: Description of this item.
:type description: unicode
:param sku: SKU of this item.
:type sku: unicode or int
:param quantity: Quantity of this item.
:type quantity: decimal.Decimal
:param unitPrice: The unit price of this item not including VAT.
:type unitPrice: decimal.Decimal
:param taxPercentage: Tax percentage for this item.
:type taxPercentage: decimal.Decimal
"""
self.description = description
self.sku = sku
self.quantity = quantity
self.unitPrice = unitPrice
self.taxPercentage = taxPercentage
class Receiver(object):
"""Holds receiver data.
Used both in pay request and in payment details objects.
"""
def __init__(self,
email,
amount,
primary=None,
firstName=None,
lastName=None):
self.email = email
self.amount = decimal.Decimal(amount)
self.primary = primary
self.firstName = firstName
self.lastName = lastName
@classmethod
def from_response_data(cls, data):
receivers = []
i = 0
while 'receiverList.receiver(%d).email' % i in data:
primary = data.get('receiverList.receiver(%d).primary' % i)
primary = json.loads(primary.lower()) if primary else None
receivers.append(
cls(data['receiverList.receiver(%d).email' % i],
data['receiverList.receiver(%d).amount' % i],
primary)
)
i += 1
return receivers
class Error(object):
def __init__(self, errorId, message, parameter=None):
self.errorId = int(errorId)
self.message = message
self.parameter = parameter
@classmethod
def from_response_dict(cls, data):
errors = []
i = 0
while 'errorList.error(%d).errorId' % i in data:
errors.append(
cls(data['errorList.error(%d).errorId' % i],
data['errorList.error(%d).message' % i],
data.get('errorList.error(%d).parameter' % i))
)
i += 1
return errors
class ResponseEnvelope(object):
def __init__(self, data):
self.ack = data['responseEnvelope.ack']
self.timestamp = datetime.datetime.strptime(
data['responseEnvelope.timestamp'], '%Y-%m-%dT%H:%M:%S')
self.correlationId = data['responseEnvelope.correlationId']
self.errorList = Error.from_response_dict(data)
@property
def success(self):
"""True if request succeeded."""
return self.ack == 'SUCCESS'
class PayResponse(object):
"""Holds the returned values from the pay operation.
"""
def __init__(self, forward_pay_url, data):
self.responseEnvelope = ResponseEnvelope(data)
self.token = data.get('TOKEN', '')
self.forward_pay_url = forward_pay_url % self.token if self.token \
else ''
@property
def success(self):
"""True if request (not payment) succeeded."""
return self.responseEnvelope.success
class ShippingAddress(object):
"""Invoice shipping address info.
"""
def __init__(self, data):
self.name = data['shippingAddress.name'].decode('utf-8')
self.streetAddress = data['shippingAddress.streetAddress'].decode('utf-8')
self.postalCode = data['shippingAddress.postalCode'].decode('utf-8')
self.city = data['shippingAddress.city'].decode('utf-8')
self.country = data['shippingAddress.country'].decode('utf-8')
class PaymentDetails(object):
"""Holds the returned values from the payment_details and IPN callback operations.
See https://api.payson.se/#PaymentDetailsrequest for a description of
attributes.
"""
def __init__(self, data):
self.purchaseId = data.get('purchaseId', '')
self.token = data.get('token')
self.senderEmail = data.get('senderEmail', '')
self.status = data['status']
self.type = data['type']
self.guaranteeStatus = data.get('guaranteeStatus')
self.guaranteeDeadlineTimestamp = datetime.datetime.strptime(
data['guaranteeDeadlineTimestamp'], '%Y-%m-%dT%H:%M:%S') \
if 'guaranteeDeadlineTimestamp' in data else None
self.invoiceStatus = data.get('invoiceStatus')
custom = data.get('custom')
self.custom = custom and json.loads(custom)
self.trackingId = data.get('trackingId', '').decode('utf-8')
self.currencyCode = data['currencyCode']
self.receiverFee = decimal.Decimal(data.get('receiverFee', '0'))
self.receiverList = Receiver.from_response_data(data)
if 'shippingAddress.name' in data:
self.shippingAddress = ShippingAddress(data)
self.post_data = data.copy()
@property
def amount(self):
return sum(receiver.amount for receiver in self.receiverList)
class PaymentDetailsResponse(PaymentDetails):
"""Returned from payment_details.
This class contains PaymentDetails with a ResponseEnvelope.
"""
def __init__(self, data):
super(PaymentDetailsResponse, self).__init__(data)
self.responseEnvelope = ResponseEnvelope(data)
@property
def success(self):
"""True if request succeeded."""
return self.responseEnvelope.success
| mit | -3,137,430,344,846,844,400 | 36.198529 | 115 | 0.598603 | false |
Aerotenna/Firmware | Tools/ecl_ekf/analyse_logdata_ekf.py | 1 | 90361 | import numpy as np
# matplotlib don't use Xwindows backend (must be before pyplot import)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def analyse_ekf(estimator_status, ekf2_innovations, sensor_preflight, check_levels,
plot=False, output_plot_filename=None, late_start_early_ending=True):
if plot:
# create summary plots
# save the plots to PDF
pp = PdfPages(output_plot_filename)
# plot IMU consistency data
if ('accel_inconsistency_m_s_s' in sensor_preflight.keys()) and (
'gyro_inconsistency_rad_s' in sensor_preflight.keys()):
plt.figure(0, figsize=(20, 13))
plt.subplot(2, 1, 1)
plt.plot(sensor_preflight['accel_inconsistency_m_s_s'], 'b')
plt.title('IMU Consistency Check Levels')
plt.ylabel('acceleration (m/s/s)')
plt.xlabel('data index')
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(sensor_preflight['gyro_inconsistency_rad_s'], 'b')
plt.ylabel('angular rate (rad/s)')
plt.xlabel('data index')
pp.savefig()
plt.close(0)
# generate max, min and 1-std metadata
innov_time = 1e-6 * ekf2_innovations['timestamp']
status_time = 1e-6 * estimator_status['timestamp']
if plot:
# vertical velocity and position innovations
plt.figure(1, figsize=(20, 13))
# generate metadata for velocity innovations
innov_2_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[2]'])
innov_2_max_time = innov_time[innov_2_max_arg]
innov_2_max = np.amax(ekf2_innovations['vel_pos_innov[2]'])
innov_2_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[2]'])
innov_2_min_time = innov_time[innov_2_min_arg]
innov_2_min = np.amin(ekf2_innovations['vel_pos_innov[2]'])
s_innov_2_max = str(round(innov_2_max, 2))
s_innov_2_min = str(round(innov_2_min, 2))
# s_innov_2_std = str(round(np.std(ekf2_innovations['vel_pos_innov[2]']),2))
# generate metadata for position innovations
innov_5_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[5]'])
innov_5_max_time = innov_time[innov_5_max_arg]
innov_5_max = np.amax(ekf2_innovations['vel_pos_innov[5]'])
innov_5_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[5]'])
innov_5_min_time = innov_time[innov_5_min_arg]
innov_5_min = np.amin(ekf2_innovations['vel_pos_innov[5]'])
s_innov_5_max = str(round(innov_5_max, 2))
s_innov_5_min = str(round(innov_5_min, 2))
# s_innov_5_std = str(round(np.std(ekf2_innovations['vel_pos_innov[5]']),2))
# generate plot for vertical velocity innovations
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[2]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[2]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[2]']), 'r')
plt.title('Vertical Innovations')
plt.ylabel('Down Vel (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_2_max_time, innov_2_max, 'max=' + s_innov_2_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_2_min_time, innov_2_min, 'min=' + s_innov_2_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_2_std],loc='upper left',frameon=False)
# generate plot for vertical position innovations
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[5]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[5]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[5]']), 'r')
plt.ylabel('Down Pos (m)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_5_max_time, innov_5_max, 'max=' + s_innov_5_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_5_min_time, innov_5_min, 'min=' + s_innov_5_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_5_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(1)
# horizontal velocity innovations
plt.figure(2, figsize=(20, 13))
# generate North axis metadata
innov_0_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[0]'])
innov_0_max_time = innov_time[innov_0_max_arg]
innov_0_max = np.amax(ekf2_innovations['vel_pos_innov[0]'])
innov_0_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[0]'])
innov_0_min_time = innov_time[innov_0_min_arg]
innov_0_min = np.amin(ekf2_innovations['vel_pos_innov[0]'])
s_innov_0_max = str(round(innov_0_max, 2))
s_innov_0_min = str(round(innov_0_min, 2))
# s_innov_0_std = str(round(np.std(ekf2_innovations['vel_pos_innov[0]']),2))
# Generate East axis metadata
innov_1_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[1]'])
innov_1_max_time = innov_time[innov_1_max_arg]
innov_1_max = np.amax(ekf2_innovations['vel_pos_innov[1]'])
innov_1_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[1]'])
innov_1_min_time = innov_time[innov_1_min_arg]
innov_1_min = np.amin(ekf2_innovations['vel_pos_innov[1]'])
s_innov_1_max = str(round(innov_1_max, 2))
s_innov_1_min = str(round(innov_1_min, 2))
# s_innov_1_std = str(round(np.std(ekf2_innovations['vel_pos_innov[1]']),2))
# draw plots
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[0]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[0]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[0]']), 'r')
plt.title('Horizontal Velocity Innovations')
plt.ylabel('North Vel (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_0_max_time, innov_0_max, 'max=' + s_innov_0_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_0_min_time, innov_0_min, 'min=' + s_innov_0_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_0_std],loc='upper left',frameon=False)
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[1]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[1]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[1]']), 'r')
plt.ylabel('East Vel (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_1_max_time, innov_1_max, 'max=' + s_innov_1_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_1_min_time, innov_1_min, 'min=' + s_innov_1_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_1_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(2)
# horizontal position innovations
plt.figure(3, figsize=(20, 13))
# generate North axis metadata
innov_3_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[3]'])
innov_3_max_time = innov_time[innov_3_max_arg]
innov_3_max = np.amax(ekf2_innovations['vel_pos_innov[3]'])
innov_3_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[3]'])
innov_3_min_time = innov_time[innov_3_min_arg]
innov_3_min = np.amin(ekf2_innovations['vel_pos_innov[3]'])
s_innov_3_max = str(round(innov_3_max, 2))
s_innov_3_min = str(round(innov_3_min, 2))
# s_innov_3_std = str(round(np.std(ekf2_innovations['vel_pos_innov[3]']),2))
# generate East axis metadata
innov_4_max_arg = np.argmax(ekf2_innovations['vel_pos_innov[4]'])
innov_4_max_time = innov_time[innov_4_max_arg]
innov_4_max = np.amax(ekf2_innovations['vel_pos_innov[4]'])
innov_4_min_arg = np.argmin(ekf2_innovations['vel_pos_innov[4]'])
innov_4_min_time = innov_time[innov_4_min_arg]
innov_4_min = np.amin(ekf2_innovations['vel_pos_innov[4]'])
s_innov_4_max = str(round(innov_4_max, 2))
s_innov_4_min = str(round(innov_4_min, 2))
# s_innov_4_std = str(round(np.std(ekf2_innovations['vel_pos_innov[4]']),2))
# generate plots
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[3]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[3]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[3]']), 'r')
plt.title('Horizontal Position Innovations')
plt.ylabel('North Pos (m)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_3_max_time, innov_3_max, 'max=' + s_innov_3_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_3_min_time, innov_3_min, 'min=' + s_innov_3_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_3_std],loc='upper left',frameon=False)
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['vel_pos_innov[4]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['vel_pos_innov_var[4]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['vel_pos_innov_var[4]']), 'r')
plt.ylabel('East Pos (m)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_4_max_time, innov_4_max, 'max=' + s_innov_4_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_4_min_time, innov_4_min, 'min=' + s_innov_4_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_4_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(3)
# manetometer innovations
plt.figure(4, figsize=(20, 13))
# generate X axis metadata
innov_0_max_arg = np.argmax(ekf2_innovations['mag_innov[0]'])
innov_0_max_time = innov_time[innov_0_max_arg]
innov_0_max = np.amax(ekf2_innovations['mag_innov[0]'])
innov_0_min_arg = np.argmin(ekf2_innovations['mag_innov[0]'])
innov_0_min_time = innov_time[innov_0_min_arg]
innov_0_min = np.amin(ekf2_innovations['mag_innov[0]'])
s_innov_0_max = str(round(innov_0_max, 3))
s_innov_0_min = str(round(innov_0_min, 3))
# s_innov_0_std = str(round(np.std(ekf2_innovations['mag_innov[0]']),3))
# generate Y axis metadata
innov_1_max_arg = np.argmax(ekf2_innovations['mag_innov[1]'])
innov_1_max_time = innov_time[innov_1_max_arg]
innov_1_max = np.amax(ekf2_innovations['mag_innov[1]'])
innov_1_min_arg = np.argmin(ekf2_innovations['mag_innov[1]'])
innov_1_min_time = innov_time[innov_1_min_arg]
innov_1_min = np.amin(ekf2_innovations['mag_innov[1]'])
s_innov_1_max = str(round(innov_1_max, 3))
s_innov_1_min = str(round(innov_1_min, 3))
# s_innov_1_std = str(round(np.std(ekf2_innovations['mag_innov[1]']),3))
# generate Z axis metadata
innov_2_max_arg = np.argmax(ekf2_innovations['mag_innov[2]'])
innov_2_max_time = innov_time[innov_2_max_arg]
innov_2_max = np.amax(ekf2_innovations['mag_innov[2]'])
innov_2_min_arg = np.argmin(ekf2_innovations['mag_innov[2]'])
innov_2_min_time = innov_time[innov_2_min_arg]
innov_2_min = np.amin(ekf2_innovations['mag_innov[2]'])
s_innov_2_max = str(round(innov_2_max, 3))
s_innov_2_min = str(round(innov_2_min, 3))
# s_innov_2_std = str(round(np.std(ekf2_innovations['mag_innov[0]']),3))
# draw plots
plt.subplot(3, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['mag_innov[0]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['mag_innov_var[0]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['mag_innov_var[0]']), 'r')
plt.title('Magnetometer Innovations')
plt.ylabel('X (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_0_max_time, innov_0_max, 'max=' + s_innov_0_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_0_min_time, innov_0_min, 'min=' + s_innov_0_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_0_std],loc='upper left',frameon=False)
plt.subplot(3, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['mag_innov[1]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['mag_innov_var[1]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['mag_innov_var[1]']), 'r')
plt.ylabel('Y (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_1_max_time, innov_1_max, 'max=' + s_innov_1_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_1_min_time, innov_1_min, 'min=' + s_innov_1_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_1_std],loc='upper left',frameon=False)
plt.subplot(3, 1, 3)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['mag_innov[2]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['mag_innov_var[2]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['mag_innov_var[2]']), 'r')
plt.ylabel('Z (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_2_max_time, innov_2_max, 'max=' + s_innov_2_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_2_min_time, innov_2_min, 'min=' + s_innov_2_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_2_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(4)
# magnetic heading innovations
plt.figure(5, figsize=(20, 13))
# generate metadata
innov_0_max_arg = np.argmax(ekf2_innovations['heading_innov'])
innov_0_max_time = innov_time[innov_0_max_arg]
innov_0_max = np.amax(ekf2_innovations['heading_innov'])
innov_0_min_arg = np.argmin(ekf2_innovations['heading_innov'])
innov_0_min_time = innov_time[innov_0_min_arg]
innov_0_min = np.amin(ekf2_innovations['heading_innov'])
s_innov_0_max = str(round(innov_0_max, 3))
s_innov_0_min = str(round(innov_0_min, 3))
# s_innov_0_std = str(round(np.std(ekf2_innovations['heading_innov']),3))
# draw plot
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['heading_innov'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['heading_innov_var']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['heading_innov_var']), 'r')
plt.title('Magnetic Heading Innovations')
plt.ylabel('Heaing (rad)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(innov_0_max_time, innov_0_max, 'max=' + s_innov_0_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(innov_0_min_time, innov_0_min, 'min=' + s_innov_0_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
# plt.legend(['std='+s_innov_0_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(5)
# air data innovations
plt.figure(6, figsize=(20, 13))
# generate airspeed metadata
airspeed_innov_max_arg = np.argmax(ekf2_innovations['airspeed_innov'])
airspeed_innov_max_time = innov_time[airspeed_innov_max_arg]
airspeed_innov_max = np.amax(ekf2_innovations['airspeed_innov'])
airspeed_innov_min_arg = np.argmin(ekf2_innovations['airspeed_innov'])
airspeed_innov_min_time = innov_time[airspeed_innov_min_arg]
airspeed_innov_min = np.amin(ekf2_innovations['airspeed_innov'])
s_airspeed_innov_max = str(round(airspeed_innov_max, 3))
s_airspeed_innov_min = str(round(airspeed_innov_min, 3))
# generate sideslip metadata
beta_innov_max_arg = np.argmax(ekf2_innovations['beta_innov'])
beta_innov_max_time = innov_time[beta_innov_max_arg]
beta_innov_max = np.amax(ekf2_innovations['beta_innov'])
beta_innov_min_arg = np.argmin(ekf2_innovations['beta_innov'])
beta_innov_min_time = innov_time[beta_innov_min_arg]
beta_innov_min = np.amin(ekf2_innovations['beta_innov'])
s_beta_innov_max = str(round(beta_innov_max, 3))
s_beta_innov_min = str(round(beta_innov_min, 3))
# draw plots
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['airspeed_innov'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['airspeed_innov_var']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['airspeed_innov_var']), 'r')
plt.title('True Airspeed Innovations')
plt.ylabel('innovation (m/sec)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(airspeed_innov_max_time, airspeed_innov_max, 'max=' + s_airspeed_innov_max, fontsize=12,
horizontalalignment='left', verticalalignment='bottom')
plt.text(airspeed_innov_min_time, airspeed_innov_min, 'min=' + s_airspeed_innov_min, fontsize=12,
horizontalalignment='left', verticalalignment='top')
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['beta_innov'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['beta_innov_var']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['beta_innov_var']), 'r')
plt.title('Synthetic Sideslip Innovations')
plt.ylabel('innovation (rad)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(beta_innov_max_time, beta_innov_max, 'max=' + s_beta_innov_max, fontsize=12, horizontalalignment='left',
verticalalignment='bottom')
plt.text(beta_innov_min_time, beta_innov_min, 'min=' + s_beta_innov_min, fontsize=12, horizontalalignment='left',
verticalalignment='top')
pp.savefig()
plt.close(6)
# optical flow innovations
plt.figure(7, figsize=(20, 13))
# generate X axis metadata
flow_innov_x_max_arg = np.argmax(ekf2_innovations['flow_innov[0]'])
flow_innov_x_max_time = innov_time[flow_innov_x_max_arg]
flow_innov_x_max = np.amax(ekf2_innovations['flow_innov[0]'])
flow_innov_x_min_arg = np.argmin(ekf2_innovations['flow_innov[0]'])
flow_innov_x_min_time = innov_time[flow_innov_x_min_arg]
flow_innov_x_min = np.amin(ekf2_innovations['flow_innov[0]'])
s_flow_innov_x_max = str(round(flow_innov_x_max, 3))
s_flow_innov_x_min = str(round(flow_innov_x_min, 3))
# s_flow_innov_x_std = str(round(np.std(ekf2_innovations['flow_innov[0]']),3))
# generate Y axis metadata
flow_innov_y_max_arg = np.argmax(ekf2_innovations['flow_innov[1]'])
flow_innov_y_max_time = innov_time[flow_innov_y_max_arg]
flow_innov_y_max = np.amax(ekf2_innovations['flow_innov[1]'])
flow_innov_y_min_arg = np.argmin(ekf2_innovations['flow_innov[1]'])
flow_innov_y_min_time = innov_time[flow_innov_y_min_arg]
flow_innov_y_min = np.amin(ekf2_innovations['flow_innov[1]'])
s_flow_innov_y_max = str(round(flow_innov_y_max, 3))
s_flow_innov_y_min = str(round(flow_innov_y_min, 3))
# s_flow_innov_y_std = str(round(np.std(ekf2_innovations['flow_innov[1]']),3))
# draw plots
plt.subplot(2, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['flow_innov[0]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['flow_innov_var[0]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['flow_innov_var[0]']), 'r')
plt.title('Optical Flow Innovations')
plt.ylabel('X (rad/sec)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(flow_innov_x_max_time, flow_innov_x_max, 'max=' + s_flow_innov_x_max, fontsize=12,
horizontalalignment='left', verticalalignment='bottom')
plt.text(flow_innov_x_min_time, flow_innov_x_min, 'min=' + s_flow_innov_x_min, fontsize=12,
horizontalalignment='left', verticalalignment='top')
# plt.legend(['std='+s_flow_innov_x_std],loc='upper left',frameon=False)
plt.subplot(2, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['flow_innov[1]'], 'b')
plt.plot(1e-6 * ekf2_innovations['timestamp'], np.sqrt(ekf2_innovations['flow_innov_var[1]']), 'r')
plt.plot(1e-6 * ekf2_innovations['timestamp'], -np.sqrt(ekf2_innovations['flow_innov_var[1]']), 'r')
plt.ylabel('Y (rad/sec)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(flow_innov_y_max_time, flow_innov_y_max, 'max=' + s_flow_innov_y_max, fontsize=12,
horizontalalignment='left', verticalalignment='bottom')
plt.text(flow_innov_y_min_time, flow_innov_y_min, 'min=' + s_flow_innov_y_min, fontsize=12,
horizontalalignment='left', verticalalignment='top')
# plt.legend(['std='+s_flow_innov_y_std],loc='upper left',frameon=False)
pp.savefig()
plt.close(7)
# generate metadata for the normalised innovation consistency test levels
# a value > 1.0 means the measurement data for that test has been rejected by the EKF
# magnetometer data
mag_test_max_arg = np.argmax(estimator_status['mag_test_ratio'])
mag_test_max_time = status_time[mag_test_max_arg]
mag_test_max = np.amax(estimator_status['mag_test_ratio'])
mag_test_mean = np.mean(estimator_status['mag_test_ratio'])
# velocity data (GPS)
vel_test_max_arg = np.argmax(estimator_status['vel_test_ratio'])
vel_test_max_time = status_time[vel_test_max_arg]
vel_test_max = np.amax(estimator_status['vel_test_ratio'])
vel_test_mean = np.mean(estimator_status['vel_test_ratio'])
# horizontal position data (GPS or external vision)
pos_test_max_arg = np.argmax(estimator_status['pos_test_ratio'])
pos_test_max_time = status_time[pos_test_max_arg]
pos_test_max = np.amax(estimator_status['pos_test_ratio'])
pos_test_mean = np.mean(estimator_status['pos_test_ratio'])
# height data (Barometer, GPS or rangefinder)
hgt_test_max_arg = np.argmax(estimator_status['hgt_test_ratio'])
hgt_test_max_time = status_time[hgt_test_max_arg]
hgt_test_max = np.amax(estimator_status['hgt_test_ratio'])
hgt_test_mean = np.mean(estimator_status['hgt_test_ratio'])
# airspeed data
tas_test_max_arg = np.argmax(estimator_status['tas_test_ratio'])
tas_test_max_time = status_time[tas_test_max_arg]
tas_test_max = np.amax(estimator_status['tas_test_ratio'])
tas_test_mean = np.mean(estimator_status['tas_test_ratio'])
# height above ground data (rangefinder)
hagl_test_max_arg = np.argmax(estimator_status['hagl_test_ratio'])
hagl_test_max_time = status_time[hagl_test_max_arg]
hagl_test_max = np.amax(estimator_status['hagl_test_ratio'])
hagl_test_mean = np.mean(estimator_status['hagl_test_ratio'])
if plot:
# plot normalised innovation test levels
plt.figure(8, figsize=(20, 13))
if tas_test_max == 0.0:
n_plots = 3
else:
n_plots = 4
plt.subplot(n_plots, 1, 1)
plt.plot(status_time, estimator_status['mag_test_ratio'], 'b')
plt.title('Normalised Innovation Test Levels')
plt.ylabel('mag')
plt.xlabel('time (sec)')
plt.grid()
plt.text(mag_test_max_time, mag_test_max,
'max=' + str(round(mag_test_max, 2)) + ' , mean=' + str(round(mag_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='b')
plt.subplot(n_plots, 1, 2)
plt.plot(status_time, estimator_status['vel_test_ratio'], 'b')
plt.plot(status_time, estimator_status['pos_test_ratio'], 'r')
plt.ylabel('vel,pos')
plt.xlabel('time (sec)')
plt.grid()
plt.text(vel_test_max_time, vel_test_max,
'vel max=' + str(round(vel_test_max, 2)) + ' , mean=' + str(round(vel_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='b')
plt.text(pos_test_max_time, pos_test_max,
'pos max=' + str(round(pos_test_max, 2)) + ' , mean=' + str(round(pos_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='r')
plt.subplot(n_plots, 1, 3)
plt.plot(status_time, estimator_status['hgt_test_ratio'], 'b')
plt.ylabel('hgt')
plt.xlabel('time (sec)')
plt.grid()
plt.text(hgt_test_max_time, hgt_test_max,
'hgt max=' + str(round(hgt_test_max, 2)) + ' , mean=' + str(round(hgt_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='b')
if hagl_test_max > 0.0:
plt.plot(status_time, estimator_status['hagl_test_ratio'], 'r')
plt.text(hagl_test_max_time, hagl_test_max,
'hagl max=' + str(round(hagl_test_max, 2)) + ' , mean=' + str(round(hagl_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='r')
plt.ylabel('hgt,HAGL')
if n_plots == 4:
plt.subplot(n_plots, 1, 4)
plt.plot(status_time, estimator_status['tas_test_ratio'], 'b')
plt.ylabel('TAS')
plt.xlabel('time (sec)')
plt.grid()
plt.text(tas_test_max_time, tas_test_max,
'max=' + str(round(tas_test_max, 2)) + ' , mean=' + str(round(tas_test_mean, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='bottom', color='b')
pp.savefig()
plt.close(8)
# extract control mode metadata from estimator_status.control_mode_flags
# 0 - true if the filter tilt alignment is complete
# 1 - true if the filter yaw alignment is complete
# 2 - true if GPS measurements are being fused
# 3 - true if optical flow measurements are being fused
# 4 - true if a simple magnetic yaw heading is being fused
# 5 - true if 3-axis magnetometer measurement are being fused
# 6 - true if synthetic magnetic declination measurements are being fused
# 7 - true when the vehicle is airborne
# 8 - true when wind velocity is being estimated
# 9 - true when baro height is being fused as a primary height reference
# 10 - true when range finder height is being fused as a primary height reference
# 11 - true when range finder height is being fused as a primary height reference
# 12 - true when local position data from external vision is being fused
# 13 - true when yaw data from external vision measurements is being fused
# 14 - true when height data from external vision measurements is being fused
tilt_aligned = ((2 ** 0 & estimator_status['control_mode_flags']) > 0) * 1
yaw_aligned = ((2 ** 1 & estimator_status['control_mode_flags']) > 0) * 1
using_gps = ((2 ** 2 & estimator_status['control_mode_flags']) > 0) * 1
using_optflow = ((2 ** 3 & estimator_status['control_mode_flags']) > 0) * 1
using_magyaw = ((2 ** 4 & estimator_status['control_mode_flags']) > 0) * 1
using_mag3d = ((2 ** 5 & estimator_status['control_mode_flags']) > 0) * 1
using_magdecl = ((2 ** 6 & estimator_status['control_mode_flags']) > 0) * 1
airborne = ((2 ** 7 & estimator_status['control_mode_flags']) > 0) * 1
estimating_wind = ((2 ** 8 & estimator_status['control_mode_flags']) > 0) * 1
using_barohgt = ((2 ** 9 & estimator_status['control_mode_flags']) > 0) * 1
using_rnghgt = ((2 ** 10 & estimator_status['control_mode_flags']) > 0) * 1
using_gpshgt = ((2 ** 11 & estimator_status['control_mode_flags']) > 0) * 1
using_evpos = ((2 ** 12 & estimator_status['control_mode_flags']) > 0) * 1
using_evyaw = ((2 ** 13 & estimator_status['control_mode_flags']) > 0) * 1
using_evhgt = ((2 ** 14 & estimator_status['control_mode_flags']) > 0) * 1
# define flags for starting and finishing in air
b_starts_in_air = False
b_finishes_in_air = False
# calculate in-air transition time
if (np.amin(airborne) < 0.5) and (np.amax(airborne) > 0.5):
in_air_transtion_time_arg = np.argmax(np.diff(airborne))
in_air_transition_time = status_time[in_air_transtion_time_arg]
elif (np.amax(airborne) > 0.5):
in_air_transition_time = np.amin(status_time)
print('log starts while in-air at ' + str(round(in_air_transition_time, 1)) + ' sec')
b_starts_in_air = True
else:
in_air_transition_time = float('NaN')
print('always on ground')
# calculate on-ground transition time
if (np.amin(np.diff(airborne)) < 0.0):
on_ground_transition_time_arg = np.argmin(np.diff(airborne))
on_ground_transition_time = status_time[on_ground_transition_time_arg]
elif (np.amax(airborne) > 0.5):
on_ground_transition_time = np.amax(status_time)
print('log finishes while in-air at ' + str(round(on_ground_transition_time, 1)) + ' sec')
b_finishes_in_air = True
else:
on_ground_transition_time = float('NaN')
print('always on ground')
if (np.amax(np.diff(airborne)) > 0.5) and (np.amin(np.diff(airborne)) < -0.5):
if ((on_ground_transition_time - in_air_transition_time) > 0.0):
in_air_duration = on_ground_transition_time - in_air_transition_time;
else:
in_air_duration = float('NaN')
else:
in_air_duration = float('NaN')
# calculate alignment completion times
tilt_align_time_arg = np.argmax(np.diff(tilt_aligned))
tilt_align_time = status_time[tilt_align_time_arg]
yaw_align_time_arg = np.argmax(np.diff(yaw_aligned))
yaw_align_time = status_time[yaw_align_time_arg]
# calculate position aiding start times
gps_aid_time_arg = np.argmax(np.diff(using_gps))
gps_aid_time = status_time[gps_aid_time_arg]
optflow_aid_time_arg = np.argmax(np.diff(using_optflow))
optflow_aid_time = status_time[optflow_aid_time_arg]
evpos_aid_time_arg = np.argmax(np.diff(using_evpos))
evpos_aid_time = status_time[evpos_aid_time_arg]
# calculate height aiding start times
barohgt_aid_time_arg = np.argmax(np.diff(using_barohgt))
barohgt_aid_time = status_time[barohgt_aid_time_arg]
gpshgt_aid_time_arg = np.argmax(np.diff(using_gpshgt))
gpshgt_aid_time = status_time[gpshgt_aid_time_arg]
rnghgt_aid_time_arg = np.argmax(np.diff(using_rnghgt))
rnghgt_aid_time = status_time[rnghgt_aid_time_arg]
evhgt_aid_time_arg = np.argmax(np.diff(using_evhgt))
evhgt_aid_time = status_time[evhgt_aid_time_arg]
# calculate magnetometer aiding start times
using_magyaw_time_arg = np.argmax(np.diff(using_magyaw))
using_magyaw_time = status_time[using_magyaw_time_arg]
using_mag3d_time_arg = np.argmax(np.diff(using_mag3d))
using_mag3d_time = status_time[using_mag3d_time_arg]
using_magdecl_time_arg = np.argmax(np.diff(using_magdecl))
using_magdecl_time = status_time[using_magdecl_time_arg]
if plot:
# control mode summary plot A
plt.figure(9, figsize=(20, 13))
# subplot for alignment completion
plt.subplot(4, 1, 1)
plt.title('EKF Control Status - Figure A')
plt.plot(status_time, tilt_aligned, 'b')
plt.plot(status_time, yaw_aligned, 'r')
plt.ylim(-0.1, 1.1)
plt.ylabel('aligned')
plt.grid()
if np.amin(tilt_aligned) > 0:
plt.text(tilt_align_time, 0.5, 'no pre-arm data - cannot calculate alignment completion times', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='black')
else:
plt.text(tilt_align_time, 0.33, 'tilt alignment at ' + str(round(tilt_align_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
plt.text(yaw_align_time, 0.67, 'yaw alignment at ' + str(round(tilt_align_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='r')
# subplot for position aiding
plt.subplot(4, 1, 2)
plt.plot(status_time, using_gps, 'b')
plt.plot(status_time, using_optflow, 'r')
plt.plot(status_time, using_evpos, 'g')
plt.ylim(-0.1, 1.1)
plt.ylabel('pos aiding')
plt.grid()
if np.amin(using_gps) > 0:
plt.text(gps_aid_time, 0.25, 'no pre-arm data - cannot calculate GPS aiding start time', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
elif np.amax(using_gps) > 0:
plt.text(gps_aid_time, 0.25, 'GPS aiding at ' + str(round(gps_aid_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
if np.amin(using_optflow) > 0:
plt.text(optflow_aid_time, 0.50, 'no pre-arm data - cannot calculate optical flow aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='r')
elif np.amax(using_optflow) > 0:
plt.text(optflow_aid_time, 0.50, 'optical flow aiding at ' + str(round(optflow_aid_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='r')
if np.amin(using_evpos) > 0:
plt.text(evpos_aid_time, 0.75, 'no pre-arm data - cannot calculate external vision aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='g')
elif np.amax(using_evpos) > 0:
plt.text(evpos_aid_time, 0.75, 'external vision aiding at ' + str(round(evpos_aid_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='g')
# subplot for height aiding
plt.subplot(4, 1, 3)
plt.plot(status_time, using_barohgt, 'b')
plt.plot(status_time, using_gpshgt, 'r')
plt.plot(status_time, using_rnghgt, 'g')
plt.plot(status_time, using_evhgt, 'c')
plt.ylim(-0.1, 1.1)
plt.ylabel('hgt aiding')
plt.grid()
if np.amin(using_barohgt) > 0:
plt.text(barohgt_aid_time, 0.2, 'no pre-arm data - cannot calculate Baro aiding start time', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
elif np.amax(using_barohgt) > 0:
plt.text(barohgt_aid_time, 0.2, 'Baro aiding at ' + str(round(gps_aid_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
if np.amin(using_gpshgt) > 0:
plt.text(gpshgt_aid_time, 0.4, 'no pre-arm data - cannot calculate GPS aiding start time', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='r')
elif np.amax(using_gpshgt) > 0:
plt.text(gpshgt_aid_time, 0.4, 'GPS aiding at ' + str(round(gpshgt_aid_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='r')
if np.amin(using_rnghgt) > 0:
plt.text(rnghgt_aid_time, 0.6, 'no pre-arm data - cannot calculate rangfinder aiding start time', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='g')
elif np.amax(using_rnghgt) > 0:
plt.text(rnghgt_aid_time, 0.6, 'rangefinder aiding at ' + str(round(rnghgt_aid_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='g')
if np.amin(using_evhgt) > 0:
plt.text(evhgt_aid_time, 0.8, 'no pre-arm data - cannot calculate external vision aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='c')
elif np.amax(using_evhgt) > 0:
plt.text(evhgt_aid_time, 0.8, 'external vision aiding at ' + str(round(evhgt_aid_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='c')
# subplot for magnetometer aiding
plt.subplot(4, 1, 4)
plt.plot(status_time, using_magyaw, 'b')
plt.plot(status_time, using_mag3d, 'r')
plt.plot(status_time, using_magdecl, 'g')
plt.ylim(-0.1, 1.1)
plt.ylabel('mag aiding')
plt.xlabel('time (sec)')
plt.grid()
if np.amin(using_magyaw) > 0:
plt.text(using_magyaw_time, 0.25, 'no pre-arm data - cannot calculate magnetic yaw aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='b')
elif np.amax(using_magyaw) > 0:
plt.text(using_magyaw_time, 0.25, 'magnetic yaw aiding at ' + str(round(using_magyaw_time, 1)) + ' sec',
fontsize=12, horizontalalignment='right', verticalalignment='center', color='b')
if np.amin(using_mag3d) > 0:
plt.text(using_mag3d_time, 0.50, 'no pre-arm data - cannot calculate 3D magnetoemter aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='r')
elif np.amax(using_mag3d) > 0:
plt.text(using_mag3d_time, 0.50, 'magnetometer 3D aiding at ' + str(round(using_mag3d_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='r')
if np.amin(using_magdecl) > 0:
plt.text(using_magdecl_time, 0.75, 'no pre-arm data - cannot magnetic declination aiding start time',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='g')
elif np.amax(using_magdecl) > 0:
plt.text(using_magdecl_time, 0.75,
'magnetic declination aiding at ' + str(round(using_magdecl_time, 1)) + ' sec', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='g')
pp.savefig()
plt.close(9)
# control mode summary plot B
plt.figure(10, figsize=(20, 13))
# subplot for airborne status
plt.subplot(2, 1, 1)
plt.title('EKF Control Status - Figure B')
plt.plot(status_time, airborne, 'b')
plt.ylim(-0.1, 1.1)
plt.ylabel('airborne')
plt.grid()
if np.amax(np.diff(airborne)) < 0.5:
plt.text(in_air_transition_time, 0.67, 'ground to air transition not detected', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
else:
plt.text(in_air_transition_time, 0.67, 'in-air at ' + str(round(in_air_transition_time, 1)) + ' sec',
fontsize=12, horizontalalignment='left', verticalalignment='center', color='b')
if np.amin(np.diff(airborne)) > -0.5:
plt.text(on_ground_transition_time, 0.33, 'air to ground transition not detected', fontsize=12,
horizontalalignment='left', verticalalignment='center', color='b')
else:
plt.text(on_ground_transition_time, 0.33, 'on-ground at ' + str(round(on_ground_transition_time, 1)) + ' sec',
fontsize=12, horizontalalignment='right', verticalalignment='center', color='b')
if in_air_duration > 0.0:
plt.text((in_air_transition_time + on_ground_transition_time) / 2, 0.5,
'duration = ' + str(round(in_air_duration, 1)) + ' sec', fontsize=12, horizontalalignment='center',
verticalalignment='center', color='b')
# subplot for wind estimation status
plt.subplot(2, 1, 2)
plt.plot(status_time, estimating_wind, 'b')
plt.ylim(-0.1, 1.1)
plt.ylabel('estimating wind')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(10)
# innovation_check_flags summary
# 0 - true if velocity observations have been rejected
# 1 - true if horizontal position observations have been rejected
# 2 - true if true if vertical position observations have been rejected
# 3 - true if the X magnetometer observation has been rejected
# 4 - true if the Y magnetometer observation has been rejected
# 5 - true if the Z magnetometer observation has been rejected
# 6 - true if the yaw observation has been rejected
# 7 - true if the airspeed observation has been rejected
# 8 - true if synthetic sideslip observation has been rejected
# 9 - true if the height above ground observation has been rejected
# 10 - true if the X optical flow observation has been rejected
# 11 - true if the Y optical flow observation has been rejected
vel_innov_fail = ((2 ** 0 & estimator_status['innovation_check_flags']) > 0) * 1
posh_innov_fail = ((2 ** 1 & estimator_status['innovation_check_flags']) > 0) * 1
posv_innov_fail = ((2 ** 2 & estimator_status['innovation_check_flags']) > 0) * 1
magx_innov_fail = ((2 ** 3 & estimator_status['innovation_check_flags']) > 0) * 1
magy_innov_fail = ((2 ** 4 & estimator_status['innovation_check_flags']) > 0) * 1
magz_innov_fail = ((2 ** 5 & estimator_status['innovation_check_flags']) > 0) * 1
yaw_innov_fail = ((2 ** 6 & estimator_status['innovation_check_flags']) > 0) * 1
tas_innov_fail = ((2 ** 7 & estimator_status['innovation_check_flags']) > 0) * 1
sli_innov_fail = ((2 ** 8 & estimator_status['innovation_check_flags']) > 0) * 1
hagl_innov_fail = ((2 ** 9 & estimator_status['innovation_check_flags']) > 0) * 1
ofx_innov_fail = ((2 ** 10 & estimator_status['innovation_check_flags']) > 0) * 1
ofy_innov_fail = ((2 ** 11 & estimator_status['innovation_check_flags']) > 0) * 1
if plot:
# plot innovation_check_flags summary
plt.figure(11, figsize=(20, 13))
plt.subplot(6, 1, 1)
plt.title('EKF Innovation Test Fails')
plt.plot(status_time, vel_innov_fail, 'b', label='vel NED')
plt.plot(status_time, posh_innov_fail, 'r', label='pos NE')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper left')
plt.grid()
plt.subplot(6, 1, 2)
plt.plot(status_time, posv_innov_fail, 'b', label='hgt absolute')
plt.plot(status_time, hagl_innov_fail, 'r', label='hgt above ground')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper left')
plt.grid()
plt.subplot(6, 1, 3)
plt.plot(status_time, magx_innov_fail, 'b', label='mag_x')
plt.plot(status_time, magy_innov_fail, 'r', label='mag_y')
plt.plot(status_time, magz_innov_fail, 'g', label='mag_z')
plt.plot(status_time, yaw_innov_fail, 'c', label='yaw')
plt.legend(loc='upper left')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.grid()
plt.subplot(6, 1, 4)
plt.plot(status_time, tas_innov_fail, 'b', label='airspeed')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper left')
plt.grid()
plt.subplot(6, 1, 5)
plt.plot(status_time, sli_innov_fail, 'b', label='sideslip')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper left')
plt.grid()
plt.subplot(6, 1, 6)
plt.plot(status_time, ofx_innov_fail, 'b', label='flow X')
plt.plot(status_time, ofy_innov_fail, 'r', label='flow Y')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.xlabel('time (sec')
plt.legend(loc='upper left')
plt.grid()
pp.savefig()
plt.close(11)
# gps_check_fail_flags summary
plt.figure(12, figsize=(20, 13))
# 0 : insufficient fix type (no 3D solution)
# 1 : minimum required sat count fail
# 2 : minimum required GDoP fail
# 3 : maximum allowed horizontal position error fail
# 4 : maximum allowed vertical position error fail
# 5 : maximum allowed speed error fail
# 6 : maximum allowed horizontal position drift fail
# 7 : maximum allowed vertical position drift fail
# 8 : maximum allowed horizontal speed fail
# 9 : maximum allowed vertical velocity discrepancy fail
gfix_fail = ((2 ** 0 & estimator_status['gps_check_fail_flags']) > 0) * 1
nsat_fail = ((2 ** 1 & estimator_status['gps_check_fail_flags']) > 0) * 1
gdop_fail = ((2 ** 2 & estimator_status['gps_check_fail_flags']) > 0) * 1
herr_fail = ((2 ** 3 & estimator_status['gps_check_fail_flags']) > 0) * 1
verr_fail = ((2 ** 4 & estimator_status['gps_check_fail_flags']) > 0) * 1
serr_fail = ((2 ** 5 & estimator_status['gps_check_fail_flags']) > 0) * 1
hdrift_fail = ((2 ** 6 & estimator_status['gps_check_fail_flags']) > 0) * 1
vdrift_fail = ((2 ** 7 & estimator_status['gps_check_fail_flags']) > 0) * 1
hspd_fail = ((2 ** 8 & estimator_status['gps_check_fail_flags']) > 0) * 1
veld_diff_fail = ((2 ** 9 & estimator_status['gps_check_fail_flags']) > 0) * 1
plt.subplot(2, 1, 1)
plt.title('GPS Direct Output Check Failures')
plt.plot(status_time, gfix_fail, 'k', label='fix type')
plt.plot(status_time, nsat_fail, 'b', label='N sats')
plt.plot(status_time, gdop_fail, 'r', label='GDOP')
plt.plot(status_time, herr_fail, 'g', label='horiz pos error')
plt.plot(status_time, verr_fail, 'c', label='vert pos error')
plt.plot(status_time, serr_fail, 'm', label='speed error')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.legend(loc='upper right')
plt.grid()
plt.subplot(2, 1, 2)
plt.title('GPS Derived Output Check Failures')
plt.plot(status_time, hdrift_fail, 'b', label='horiz drift')
plt.plot(status_time, vdrift_fail, 'r', label='vert drift')
plt.plot(status_time, hspd_fail, 'g', label='horiz speed')
plt.plot(status_time, veld_diff_fail, 'c', label='vert vel inconsistent')
plt.ylim(-0.1, 1.1)
plt.ylabel('failed')
plt.xlabel('time (sec')
plt.legend(loc='upper right')
plt.grid()
pp.savefig()
plt.close(12)
# filter reported accuracy
plt.figure(13, figsize=(20, 13))
plt.title('Reported Accuracy')
plt.plot(status_time, estimator_status['pos_horiz_accuracy'], 'b', label='horizontal')
plt.plot(status_time, estimator_status['pos_vert_accuracy'], 'r', label='vertical')
plt.ylabel('accuracy (m)')
plt.xlabel('time (sec')
plt.legend(loc='upper right')
plt.grid()
pp.savefig()
plt.close(13)
# Plot the EKF IMU vibration metrics
plt.figure(14, figsize=(20, 13))
vibe_coning_max_arg = np.argmax(estimator_status['vibe[0]'])
vibe_coning_max_time = status_time[vibe_coning_max_arg]
vibe_coning_max = np.amax(estimator_status['vibe[0]'])
vibe_hf_dang_max_arg = np.argmax(estimator_status['vibe[1]'])
vibe_hf_dang_max_time = status_time[vibe_hf_dang_max_arg]
vibe_hf_dang_max = np.amax(estimator_status['vibe[1]'])
vibe_hf_dvel_max_arg = np.argmax(estimator_status['vibe[2]'])
vibe_hf_dvel_max_time = status_time[vibe_hf_dvel_max_arg]
vibe_hf_dvel_max = np.amax(estimator_status['vibe[2]'])
plt.subplot(3, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], 1000.0 * estimator_status['vibe[0]'], 'b')
plt.title('IMU Vibration Metrics')
plt.ylabel('Del Ang Coning (mrad)')
plt.grid()
plt.text(vibe_coning_max_time, 1000.0 * vibe_coning_max, 'max=' + str(round(1000.0 * vibe_coning_max, 5)),
fontsize=12, horizontalalignment='left', verticalalignment='top')
plt.subplot(3, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], 1000.0 * estimator_status['vibe[1]'], 'b')
plt.ylabel('HF Del Ang (mrad)')
plt.grid()
plt.text(vibe_hf_dang_max_time, 1000.0 * vibe_hf_dang_max, 'max=' + str(round(1000.0 * vibe_hf_dang_max, 3)),
fontsize=12, horizontalalignment='left', verticalalignment='top')
plt.subplot(3, 1, 3)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['vibe[2]'], 'b')
plt.ylabel('HF Del Vel (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(vibe_hf_dvel_max_time, vibe_hf_dvel_max, 'max=' + str(round(vibe_hf_dvel_max, 4)), fontsize=12,
horizontalalignment='left', verticalalignment='top')
pp.savefig()
plt.close(14)
# Plot the EKF output observer tracking errors
plt.figure(15, figsize=(20, 13))
ang_track_err_max_arg = np.argmax(ekf2_innovations['output_tracking_error[0]'])
ang_track_err_max_time = innov_time[ang_track_err_max_arg]
ang_track_err_max = np.amax(ekf2_innovations['output_tracking_error[0]'])
vel_track_err_max_arg = np.argmax(ekf2_innovations['output_tracking_error[1]'])
vel_track_err_max_time = innov_time[vel_track_err_max_arg]
vel_track_err_max = np.amax(ekf2_innovations['output_tracking_error[1]'])
pos_track_err_max_arg = np.argmax(ekf2_innovations['output_tracking_error[2]'])
pos_track_err_max_time = innov_time[pos_track_err_max_arg]
pos_track_err_max = np.amax(ekf2_innovations['output_tracking_error[2]'])
plt.subplot(3, 1, 1)
plt.plot(1e-6 * ekf2_innovations['timestamp'], 1e3 * ekf2_innovations['output_tracking_error[0]'], 'b')
plt.title('Output Observer Tracking Error Magnitudes')
plt.ylabel('angles (mrad)')
plt.grid()
plt.text(ang_track_err_max_time, 1e3 * ang_track_err_max, 'max=' + str(round(1e3 * ang_track_err_max, 2)),
fontsize=12, horizontalalignment='left', verticalalignment='top')
plt.subplot(3, 1, 2)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['output_tracking_error[1]'], 'b')
plt.ylabel('velocity (m/s)')
plt.grid()
plt.text(vel_track_err_max_time, vel_track_err_max, 'max=' + str(round(vel_track_err_max, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='top')
plt.subplot(3, 1, 3)
plt.plot(1e-6 * ekf2_innovations['timestamp'], ekf2_innovations['output_tracking_error[2]'], 'b')
plt.ylabel('position (m)')
plt.xlabel('time (sec)')
plt.grid()
plt.text(pos_track_err_max_time, pos_track_err_max, 'max=' + str(round(pos_track_err_max, 2)), fontsize=12,
horizontalalignment='left', verticalalignment='top')
pp.savefig()
plt.close(15)
# Plot the delta angle bias estimates
plt.figure(16, figsize=(20, 13))
plt.subplot(3, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[10]'], 'b')
plt.title('Delta Angle Bias Estimates')
plt.ylabel('X (rad)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[11]'], 'b')
plt.ylabel('Y (rad)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 3)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[12]'], 'b')
plt.ylabel('Z (rad)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(16)
# Plot the delta velocity bias estimates
plt.figure(17, figsize=(20, 13))
plt.subplot(3, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[13]'], 'b')
plt.title('Delta Velocity Bias Estimates')
plt.ylabel('X (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[14]'], 'b')
plt.ylabel('Y (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 3)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[15]'], 'b')
plt.ylabel('Z (m/s)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(17)
# Plot the earth frame magnetic field estimates
plt.figure(18, figsize=(20, 13))
plt.subplot(3, 1, 3)
strength = (estimator_status['states[16]'] ** 2 + estimator_status['states[17]'] ** 2 + estimator_status[
'states[18]'] ** 2) ** 0.5
plt.plot(1e-6 * estimator_status['timestamp'], strength, 'b')
plt.ylabel('strength (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 1)
rad2deg = 57.2958
declination = rad2deg * np.arctan2(estimator_status['states[17]'], estimator_status['states[16]'])
plt.plot(1e-6 * estimator_status['timestamp'], declination, 'b')
plt.title('Earth Magnetic Field Estimates')
plt.ylabel('declination (deg)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 2)
inclination = rad2deg * np.arcsin(estimator_status['states[18]'] / np.maximum(strength, np.finfo(np.float32).eps) )
plt.plot(1e-6 * estimator_status['timestamp'], inclination, 'b')
plt.ylabel('inclination (deg)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(18)
# Plot the body frame magnetic field estimates
plt.figure(19, figsize=(20, 13))
plt.subplot(3, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[19]'], 'b')
plt.title('Magnetomer Bias Estimates')
plt.ylabel('X (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[20]'], 'b')
plt.ylabel('Y (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(3, 1, 3)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[21]'], 'b')
plt.ylabel('Z (Gauss)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(19)
# Plot the EKF wind estimates
plt.figure(20, figsize=(20, 13))
plt.subplot(2, 1, 1)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[22]'], 'b')
plt.title('Wind Velocity Estimates')
plt.ylabel('North (m/s)')
plt.xlabel('time (sec)')
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(1e-6 * estimator_status['timestamp'], estimator_status['states[23]'], 'b')
plt.ylabel('East (m/s)')
plt.xlabel('time (sec)')
plt.grid()
pp.savefig()
plt.close(20)
# close the pdf file
pp.close()
# don't display to screen
# plt.show()
# clase all figures
plt.close("all")
# Do some automated analysis of the status data
# normal index range is defined by the flight duration
start_index = np.amin(np.where(status_time > in_air_transition_time))
end_index = np.amax(np.where(status_time <= on_ground_transition_time))
num_valid_values = (end_index - start_index + 1)
# find a late/early index range from 5 sec after in_air_transtion_time to 5 sec before on-ground transition time for mag and optical flow checks to avoid false positives
# this can be used to prevent false positives for sensors adversely affected by close proximity to the ground
# don't do this if the log starts or finishes in air or if it is shut off by flag
late_start_index = np.amin(np.where(status_time > (in_air_transition_time + 5.0)))\
if (late_start_early_ending and not b_starts_in_air) else start_index
early_end_index = np.amax(np.where(status_time <= (on_ground_transition_time - 5.0))) \
if (late_start_early_ending and not b_finishes_in_air) else end_index
num_valid_values_trimmed = (early_end_index - late_start_index + 1)
# also find the start and finish indexes for the innovation data
innov_start_index = np.amin(np.where(innov_time > in_air_transition_time))
innov_end_index = np.amax(np.where(innov_time <= on_ground_transition_time))
innov_num_valid_values = (innov_end_index - innov_start_index + 1)
innov_late_start_index = np.amin(np.where(innov_time > (in_air_transition_time + 5.0))) \
if (late_start_early_ending and not b_starts_in_air) else innov_start_index
innov_early_end_index = np.amax(np.where(innov_time <= (on_ground_transition_time - 5.0))) \
if (late_start_early_ending and not b_finishes_in_air) else innov_end_index
innov_num_valid_values_trimmed = (innov_early_end_index - innov_late_start_index + 1)
# define dictionary of test results and descriptions
test_results = {
'master_status': ['Pass',
'Master check status which can be either Pass Warning or Fail. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'mag_sensor_status': ['Pass',
'Magnetometer sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'yaw_sensor_status': ['Pass',
'Yaw sensor check summary. This sensor data can be sourced from the magnetometer or an external vision system. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'vel_sensor_status': ['Pass',
'Velocity sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'pos_sensor_status': ['Pass',
'Position sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'hgt_sensor_status': ['Pass',
'Height sensor check summary. This sensor data can be sourced from either Baro or GPS or range finder or external vision system. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no anomalies were detected and no further investigation is required'],
'hagl_sensor_status': ['Pass',
'Height above ground sensor check summary. This sensor data is normally sourced from a rangefinder sensor. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'tas_sensor_status': ['Pass',
'Airspeed sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'imu_sensor_status': ['Pass',
'IMU sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'imu_vibration_check': ['Pass',
'IMU vibration check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'imu_bias_check': ['Pass',
'IMU bias check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'imu_output_predictor_check': ['Pass',
'IMU output predictor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'flow_sensor_status': ['Pass',
'Optical Flow sensor check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'filter_fault_status': ['Pass',
'Internal Filter check summary. A Fail result indicates a significant error that caused a significant reduction in vehicle navigation performance was detected. A Warning result indicates that error levels higher than normal were detected but these errors did not significantly impact navigation performance. A Pass result indicates that no amonalies were detected and no further investigation is required'],
'mag_percentage_red': [float('NaN'),
'The percentage of in-flight consolidated magnetic field sensor innovation consistency test values > 1.0.'],
'mag_percentage_amber': [float('NaN'),
'The percentage of in-flight consolidated magnetic field sensor innovation consistency test values > 0.5.'],
'magx_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the X-axis magnetic field sensor innovation consistency test.'],
'magy_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the Y-axis magnetic field sensor innovation consistency test.'],
'magz_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the Z-axis magnetic field sensor innovation consistency test.'],
'yaw_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the yaw sensor innovation consistency test.'],
'mag_test_max': [float('NaN'),
'The maximum in-flight value of the magnetic field sensor innovation consistency test ratio.'],
'mag_test_mean': [float('NaN'),
'The mean in-flight value of the magnetic field sensor innovation consistency test ratio.'],
'vel_percentage_red': [float('NaN'),
'The percentage of in-flight velocity sensor consolidated innovation consistency test values > 1.0.'],
'vel_percentage_amber': [float('NaN'),
'The percentage of in-flight velocity sensor consolidated innovation consistency test values > 0.5.'],
'vel_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the velocity sensor consolidated innovation consistency test.'],
'vel_test_max': [float('NaN'),
'The maximum in-flight value of the velocity sensor consolidated innovation consistency test ratio.'],
'vel_test_mean': [float('NaN'),
'The mean in-flight value of the velocity sensor consolidated innovation consistency test ratio.'],
'pos_percentage_red': [float('NaN'),
'The percentage of in-flight position sensor consolidated innovation consistency test values > 1.0.'],
'pos_percentage_amber': [float('NaN'),
'The percentage of in-flight position sensor consolidated innovation consistency test values > 0.5.'],
'pos_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the velocity sensor consolidated innovation consistency test.'],
'pos_test_max': [float('NaN'),
'The maximum in-flight value of the position sensor consolidated innovation consistency test ratio.'],
'pos_test_mean': [float('NaN'),
'The mean in-flight value of the position sensor consolidated innovation consistency test ratio.'],
'hgt_percentage_red': [float('NaN'),
'The percentage of in-flight height sensor innovation consistency test values > 1.0.'],
'hgt_percentage_amber': [float('NaN'),
'The percentage of in-flight height sensor innovation consistency test values > 0.5.'],
'hgt_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the height sensor innovation consistency test.'],
'hgt_test_max': [float('NaN'),
'The maximum in-flight value of the height sensor innovation consistency test ratio.'],
'hgt_test_mean': [float('NaN'),
'The mean in-flight value of the height sensor innovation consistency test ratio.'],
'tas_percentage_red': [float('NaN'),
'The percentage of in-flight airspeed sensor innovation consistency test values > 1.0.'],
'tas_percentage_amber': [float('NaN'),
'The percentage of in-flight airspeed sensor innovation consistency test values > 0.5.'],
'tas_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the airspeed sensor innovation consistency test.'],
'tas_test_max': [float('NaN'),
'The maximum in-flight value of the airspeed sensor innovation consistency test ratio.'],
'tas_test_mean': [float('NaN'),
'The mean in-flight value of the airspeed sensor innovation consistency test ratio.'],
'hagl_percentage_red': [float('NaN'),
'The percentage of in-flight height above ground sensor innovation consistency test values > 1.0.'],
'hagl_percentage_amber': [float('NaN'),
'The percentage of in-flight height above ground sensor innovation consistency test values > 0.5.'],
'hagl_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the height above ground sensor innovation consistency test.'],
'hagl_test_max': [float('NaN'),
'The maximum in-flight value of the height above ground sensor innovation consistency test ratio.'],
'hagl_test_mean': [float('NaN'),
'The mean in-flight value of the height above ground sensor innovation consistency test ratio.'],
'ofx_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the optical flow sensor X-axis innovation consistency test.'],
'ofy_fail_percentage': [float('NaN'),
'The percentage of in-flight recorded failure events for the optical flow sensor Y-axis innovation consistency test.'],
'filter_faults_max': [float('NaN'),
'Largest recorded value of the filter internal fault bitmask. Should always be zero.'],
'imu_coning_peak': [float('NaN'), 'Peak in-flight value of the IMU delta angle coning vibration metric (rad)'],
'imu_coning_mean': [float('NaN'), 'Mean in-flight value of the IMU delta angle coning vibration metric (rad)'],
'imu_hfdang_peak': [float('NaN'),
'Peak in-flight value of the IMU delta angle high frequency vibration metric (rad)'],
'imu_hfdang_mean': [float('NaN'),
'Mean in-flight value of the IMU delta angle high frequency vibration metric (rad)'],
'imu_hfdvel_peak': [float('NaN'),
'Peak in-flight value of the IMU delta velocity high frequency vibration metric (m/s)'],
'imu_hfdvel_mean': [float('NaN'),
'Mean in-flight value of the IMU delta velocity high frequency vibration metric (m/s)'],
'output_obs_ang_err_median': [float('NaN'),
'Median in-flight value of the output observer angular error (rad)'],
'output_obs_vel_err_median': [float('NaN'),
'Median in-flight value of the output observer velocity error (m/s)'],
'output_obs_pos_err_median': [float('NaN'), 'Median in-flight value of the output observer position error (m)'],
'imu_dang_bias_median': [float('NaN'), 'Median in-flight value of the delta angle bias vector length (rad)'],
'imu_dvel_bias_median': [float('NaN'), 'Median in-flight value of the delta velocity bias vector length (m/s)'],
'tilt_align_time': [float('NaN'),
'The time in seconds measured from startup that the EKF completed the tilt alignment. A nan value indicates that the alignment had completed before logging started or alignment did not complete.'],
'yaw_align_time': [float('NaN'),
'The time in seconds measured from startup that the EKF completed the yaw alignment.'],
'in_air_transition_time': [round(in_air_transition_time, 1),
'The time in seconds measured from startup that the EKF transtioned into in-air mode. Set to a nan if a transition event is not detected.'],
'on_ground_transition_time': [round(on_ground_transition_time, 1),
'The time in seconds measured from startup that the EKF transitioned out of in-air mode. Set to a nan if a transition event is not detected.'],
}
# generate test metadata
# reduction of innovation message data
if (innov_early_end_index > (innov_late_start_index + 50)):
# Output Observer Tracking Errors
test_results['output_obs_ang_err_median'][0] = np.median(
ekf2_innovations['output_tracking_error[0]'][innov_late_start_index:innov_early_end_index + 1])
test_results['output_obs_vel_err_median'][0] = np.median(
ekf2_innovations['output_tracking_error[1]'][innov_late_start_index:innov_early_end_index + 1])
test_results['output_obs_pos_err_median'][0] = np.median(
ekf2_innovations['output_tracking_error[2]'][innov_late_start_index:innov_early_end_index + 1])
# reduction of status message data
if (early_end_index > (late_start_index + 50)):
# IMU vibration checks
temp = np.amax(estimator_status['vibe[0]'][late_start_index:early_end_index])
if (temp > 0.0):
test_results['imu_coning_peak'][0] = temp
test_results['imu_coning_mean'][0] = np.mean(estimator_status['vibe[0]'][late_start_index:early_end_index + 1])
temp = np.amax(estimator_status['vibe[1]'][late_start_index:early_end_index])
if (temp > 0.0):
test_results['imu_hfdang_peak'][0] = temp
test_results['imu_hfdang_mean'][0] = np.mean(estimator_status['vibe[1]'][late_start_index:early_end_index + 1])
temp = np.amax(estimator_status['vibe[2]'][late_start_index:early_end_index])
if (temp > 0.0):
test_results['imu_hfdvel_peak'][0] = temp
test_results['imu_hfdvel_mean'][0] = np.mean(estimator_status['vibe[2]'][late_start_index:early_end_index + 1])
# Magnetometer Sensor Checks
if (np.amax(yaw_aligned) > 0.5):
mag_num_red = (estimator_status['mag_test_ratio'][start_index:end_index + 1] > 1.0).sum()
mag_num_amber = (estimator_status['mag_test_ratio'][start_index:end_index + 1] > 0.5).sum() - mag_num_red
test_results['mag_percentage_red'][0] = 100.0 * mag_num_red / num_valid_values_trimmed
test_results['mag_percentage_amber'][0] = 100.0 * mag_num_amber / num_valid_values_trimmed
test_results['mag_test_max'][0] = np.amax(
estimator_status['mag_test_ratio'][late_start_index:early_end_index + 1])
test_results['mag_test_mean'][0] = np.mean(estimator_status['mag_test_ratio'][start_index:end_index])
test_results['magx_fail_percentage'][0] = 100.0 * (
magx_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
test_results['magy_fail_percentage'][0] = 100.0 * (
magy_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
test_results['magz_fail_percentage'][0] = 100.0 * (
magz_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
test_results['yaw_fail_percentage'][0] = 100.0 * (
yaw_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
# Velocity Sensor Checks
if (np.amax(using_gps) > 0.5):
vel_num_red = (estimator_status['vel_test_ratio'][start_index:end_index + 1] > 1.0).sum()
vel_num_amber = (estimator_status['vel_test_ratio'][start_index:end_index + 1] > 0.5).sum() - vel_num_red
test_results['vel_percentage_red'][0] = 100.0 * vel_num_red / num_valid_values
test_results['vel_percentage_amber'][0] = 100.0 * vel_num_amber / num_valid_values
test_results['vel_test_max'][0] = np.amax(estimator_status['vel_test_ratio'][start_index:end_index + 1])
test_results['vel_test_mean'][0] = np.mean(estimator_status['vel_test_ratio'][start_index:end_index + 1])
test_results['vel_fail_percentage'][0] = 100.0 * (
vel_innov_fail[start_index:end_index + 1] > 0.5).sum() / num_valid_values
# Position Sensor Checks
if ((np.amax(using_gps) > 0.5) or (np.amax(using_evpos) > 0.5)):
pos_num_red = (estimator_status['pos_test_ratio'][start_index:end_index + 1] > 1.0).sum()
pos_num_amber = (estimator_status['pos_test_ratio'][start_index:end_index + 1] > 0.5).sum() - pos_num_red
test_results['pos_percentage_red'][0] = 100.0 * pos_num_red / num_valid_values
test_results['pos_percentage_amber'][0] = 100.0 * pos_num_amber / num_valid_values
test_results['pos_test_max'][0] = np.amax(estimator_status['pos_test_ratio'][start_index:end_index + 1])
test_results['pos_test_mean'][0] = np.mean(estimator_status['pos_test_ratio'][start_index:end_index + 1])
test_results['pos_fail_percentage'][0] = 100.0 * (
posh_innov_fail[start_index:end_index + 1] > 0.5).sum() / num_valid_values
# Height Sensor Checks
hgt_num_red = (estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1] > 1.0).sum()
hgt_num_amber = (estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1] > 0.5).sum() - hgt_num_red
test_results['hgt_percentage_red'][0] = 100.0 * hgt_num_red / num_valid_values_trimmed
test_results['hgt_percentage_amber'][0] = 100.0 * hgt_num_amber / num_valid_values_trimmed
test_results['hgt_test_max'][0] = np.amax(estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1])
test_results['hgt_test_mean'][0] = np.mean(estimator_status['hgt_test_ratio'][late_start_index:early_end_index + 1])
test_results['hgt_fail_percentage'][0] = 100.0 * (
posv_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
# Airspeed Sensor Checks
if (tas_test_max > 0.0):
tas_num_red = (estimator_status['tas_test_ratio'][start_index:end_index + 1] > 1.0).sum()
tas_num_amber = (estimator_status['tas_test_ratio'][start_index:end_index + 1] > 0.5).sum() - tas_num_red
test_results['tas_percentage_red'][0] = 100.0 * tas_num_red / num_valid_values
test_results['tas_percentage_amber'][0] = 100.0 * tas_num_amber / num_valid_values
test_results['tas_test_max'][0] = np.amax(estimator_status['tas_test_ratio'][start_index:end_index + 1])
test_results['tas_test_mean'][0] = np.mean(estimator_status['tas_test_ratio'][start_index:end_index + 1])
test_results['tas_fail_percentage'][0] = 100.0 * (
tas_innov_fail[start_index:end_index + 1] > 0.5).sum() / num_valid_values
# HAGL Sensor Checks
if (hagl_test_max > 0.0):
hagl_num_red = (estimator_status['hagl_test_ratio'][start_index:end_index + 1] > 1.0).sum()
hagl_num_amber = (estimator_status['hagl_test_ratio'][start_index:end_index + 1] > 0.5).sum() - hagl_num_red
test_results['hagl_percentage_red'][0] = 100.0 * hagl_num_red / num_valid_values
test_results['hagl_percentage_amber'][0] = 100.0 * hagl_num_amber / num_valid_values
test_results['hagl_test_max'][0] = np.amax(estimator_status['hagl_test_ratio'][start_index:end_index + 1])
test_results['hagl_test_mean'][0] = np.mean(estimator_status['hagl_test_ratio'][start_index:end_index + 1])
test_results['hagl_fail_percentage'][0] = 100.0 * (
hagl_innov_fail[start_index:end_index + 1] > 0.5).sum() / num_valid_values
# optical flow sensor checks
if (np.amax(using_optflow) > 0.5):
test_results['ofx_fail_percentage'][0] = 100.0 * (
ofx_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
test_results['ofy_fail_percentage'][0] = 100.0 * (
ofy_innov_fail[late_start_index:early_end_index + 1] > 0.5).sum() / num_valid_values_trimmed
# IMU bias checks
test_results['imu_dang_bias_median'][0] = (np.median(estimator_status['states[10]']) ** 2 + np.median(
estimator_status['states[11]']) ** 2 + np.median(estimator_status['states[12]']) ** 2) ** 0.5
test_results['imu_dvel_bias_median'][0] = (np.median(estimator_status['states[13]']) ** 2 + np.median(
estimator_status['states[14]']) ** 2 + np.median(estimator_status['states[15]']) ** 2) ** 0.5
# Check for internal filter nummerical faults
test_results['filter_faults_max'][0] = np.amax(estimator_status['filter_fault_flags'])
# TODO - process the following bitmask's when they have been properly documented in the uORB topic
# estimator_status['health_flags']
# estimator_status['timeout_flags']
# calculate a master status - Fail, Warning, Pass
# check test results against levels to provide a master status
# check for warnings
if (test_results.get('mag_percentage_amber')[0] > check_levels.get('mag_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['mag_sensor_status'][0] = 'Warning'
if (test_results.get('vel_percentage_amber')[0] > check_levels.get('vel_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['vel_sensor_status'][0] = 'Warning'
if (test_results.get('pos_percentage_amber')[0] > check_levels.get('pos_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['pos_sensor_status'][0] = 'Warning'
if (test_results.get('hgt_percentage_amber')[0] > check_levels.get('hgt_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['hgt_sensor_status'][0] = 'Warning'
if (test_results.get('hagl_percentage_amber')[0] > check_levels.get('hagl_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['hagl_sensor_status'][0] = 'Warning'
if (test_results.get('tas_percentage_amber')[0] > check_levels.get('tas_amber_warn_pct')):
test_results['master_status'][0] = 'Warning'
test_results['tas_sensor_status'][0] = 'Warning'
# check for IMU sensor warnings
if ((test_results.get('imu_coning_peak')[0] > check_levels.get('imu_coning_peak_warn')) or
(test_results.get('imu_coning_mean')[0] > check_levels.get('imu_coning_mean_warn'))):
test_results['master_status'][0] = 'Warning'
test_results['imu_sensor_status'][0] = 'Warning'
test_results['imu_vibration_check'][0] = 'Warning'
print('IMU gyro coning check warning.')
if ((test_results.get('imu_hfdang_peak')[0] > check_levels.get('imu_hfdang_peak_warn')) or
(test_results.get('imu_hfdang_mean')[0] > check_levels.get('imu_hfdang_mean_warn'))):
test_results['master_status'][0] = 'Warning'
test_results['imu_sensor_status'][0] = 'Warning'
test_results['imu_vibration_check'][0] = 'Warning'
print('IMU gyro vibration check warning.')
if ((test_results.get('imu_hfdvel_peak')[0] > check_levels.get('imu_hfdvel_peak_warn')) or
(test_results.get('imu_hfdvel_mean')[0] > check_levels.get('imu_hfdvel_mean_warn'))):
test_results['master_status'][0] = 'Warning'
test_results['imu_sensor_status'][0] = 'Warning'
test_results['imu_vibration_check'][0] = 'Warning'
print('IMU accel vibration check warning.')
if ((test_results.get('imu_dang_bias_median')[0] > check_levels.get('imu_dang_bias_median_warn')) or
(test_results.get('imu_dvel_bias_median')[0] > check_levels.get('imu_dvel_bias_median_warn'))):
test_results['master_status'][0] = 'Warning'
test_results['imu_sensor_status'][0] = 'Warning'
test_results['imu_bias_check'][0] = 'Warning'
print('IMU bias check warning.')
if ((test_results.get('output_obs_ang_err_median')[0] > check_levels.get('obs_ang_err_median_warn')) or
(test_results.get('output_obs_vel_err_median')[0] > check_levels.get('obs_vel_err_median_warn')) or
(test_results.get('output_obs_pos_err_median')[0] > check_levels.get('obs_pos_err_median_warn'))):
test_results['master_status'][0] = 'Warning'
test_results['imu_sensor_status'][0] = 'Warning'
test_results['imu_output_predictor_check'][0] = 'Warning'
print('IMU output predictor check warning.')
# check for failures
if ((test_results.get('magx_fail_percentage')[0] > check_levels.get('mag_fail_pct')) or
(test_results.get('magy_fail_percentage')[0] > check_levels.get('mag_fail_pct')) or
(test_results.get('magz_fail_percentage')[0] > check_levels.get('mag_fail_pct')) or
(test_results.get('mag_percentage_amber')[0] > check_levels.get('mag_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['mag_sensor_status'][0] = 'Fail'
print('Magnetometer sensor check failure.')
if (test_results.get('yaw_fail_percentage')[0] > check_levels.get('yaw_fail_pct')):
test_results['master_status'][0] = 'Fail'
test_results['yaw_sensor_status'][0] = 'Fail'
print('Yaw sensor check failure.')
if ((test_results.get('vel_fail_percentage')[0] > check_levels.get('vel_fail_pct')) or
(test_results.get('vel_percentage_amber')[0] > check_levels.get('vel_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['vel_sensor_status'][0] = 'Fail'
print('Velocity sensor check failure.')
if ((test_results.get('pos_fail_percentage')[0] > check_levels.get('pos_fail_pct')) or
(test_results.get('pos_percentage_amber')[0] > check_levels.get('pos_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['pos_sensor_status'][0] = 'Fail'
print('Position sensor check failure.')
if ((test_results.get('hgt_fail_percentage')[0] > check_levels.get('hgt_fail_pct')) or
(test_results.get('hgt_percentage_amber')[0] > check_levels.get('hgt_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['hgt_sensor_status'][0] = 'Fail'
print('Height sensor check failure.')
if ((test_results.get('tas_fail_percentage')[0] > check_levels.get('tas_fail_pct')) or
(test_results.get('tas_percentage_amber')[0] > check_levels.get('tas_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['tas_sensor_status'][0] = 'Fail'
print('Airspeed sensor check failure.')
if ((test_results.get('hagl_fail_percentage')[0] > check_levels.get('hagl_fail_pct')) or
(test_results.get('hagl_percentage_amber')[0] > check_levels.get('hagl_amber_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['hagl_sensor_status'][0] = 'Fail'
print('Height above ground sensor check failure.')
if ((test_results.get('ofx_fail_percentage')[0] > check_levels.get('flow_fail_pct')) or
(test_results.get('ofy_fail_percentage')[0] > check_levels.get('flow_fail_pct'))):
test_results['master_status'][0] = 'Fail'
test_results['flow_sensor_status'][0] = 'Fail'
print('Optical flow sensor check failure.')
if (test_results.get('filter_faults_max')[0] > 0):
test_results['master_status'][0] = 'Fail'
test_results['filter_fault_status'][0] = 'Fail'
return test_results
| bsd-3-clause | 2,124,241,447,415,836,000 | 64.384226 | 534 | 0.619305 | false |
belokop/indico_bare | indico_zodbimport/modules/event_categories.py | 1 | 3197 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals, division
from operator import attrgetter
import transaction
from indico.core.db import db
from indico.modules.events.models.events import Event
from indico.util.console import verbose_iterator, cformat
from indico.util.struct.iterables import committing_iterator
from indico_zodbimport import Importer
class EventCategoriesImporter(Importer):
def has_data(self):
return bool(Event.query.filter(Event.category_id.isnot(None)).count())
def migrate(self):
self._load_data()
self.migrate_event_categories()
def _load_data(self):
self.category_mapping = {}
for category in self.zodb_root['categories'].itervalues():
self.category_mapping[int(category.id)] = map(int, reversed(category.getCategoryPath()))
def migrate_event_categories(self):
self.print_step("Migrating event categories")
delete_events = set()
for conf in committing_iterator(self._iter_events()):
try:
category_chain = self.category_mapping[int(conf._Conference__owners[0].id)]
except (IndexError, KeyError):
self.print_error(cformat('%{red!}Event has no category!'), event_id=conf.id)
delete_events.add(int(conf.id))
continue
Event.query.filter_by(id=int(conf.id)).update({Event.category_id: category_chain[0],
Event.category_chain: category_chain},
synchronize_session=False)
if not self.quiet:
self.print_success(repr(category_chain), event_id=conf.id)
for event_id in delete_events:
self.print_warning(cformat('%{yellow!}Deleting broken event {}').format(event_id))
Event.query.filter_by(id=event_id).update({Event.is_deleted: True}, synchronize_session=False)
if self.zodb_root['conferences'].has_key(str(event_id)):
del self.zodb_root['conferences'][str(event_id)]
db.session.commit()
transaction.commit()
def _iter_events(self):
it = self.zodb_root['conferences'].itervalues()
total = len(self.zodb_root['conferences'])
if self.quiet:
it = verbose_iterator(it, total, attrgetter('id'), lambda x: x.__dict__.get('title', ''))
for conf in self.flushing_iterator(it):
yield conf
| gpl-3.0 | 1,307,890,533,711,365,600 | 42.202703 | 106 | 0.649984 | false |
blancha/abcngspipelines | bischipseq/convert1StartTo0Start_batch.py | 1 | 2156 | #!/usr/bin/env python3
# Version 1.0
# Author Alexis Blanchet-Cohen
# Date: 15/06/2014
import argparse
import glob
import os
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description='Generate scripts to convert bedgraph files from one-based start to zero-based start.')
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory.", default="convert1StartTo0Start")
parser.add_argument("-i", "--inputDirectory", help="Input directory with bedgraph files.", default="../bedgraph/methylation_counts_sorted/")
parser.add_argument("-o", "--outputDirectory", help="Output directory with sorted bedgraph files.", default="../bedgraph/methylation_counts_sorted_0_start/")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# Process the command line arguments.
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
inputDirectory = os.path.abspath(args.inputDirectory)
outputDirectory = os.path.abspath(args.outputDirectory)
samples = util.getMergedsamples()
# Read configuration files.
config = util.readConfigurationFiles()
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Create output directory, if it does not exist yet.
if not os.path.exists(outputDirectory):
os.mkdir(outputDirectory)
for file in os.listdir(inputDirectory):
file = os.path.splitext(file)[0]
# Create script file.
scriptName = 'convert1StartTo0Start_' + file + '.sh'
script = open(scriptName, 'w')
util.writeHeader(script, config, "convert1StartTo0Start")
script.write("convert1StartTo0Start.py " + "\\\n")
script.write("--one_start_bedgraph " + inputDirectory + "/" + file + ".bedgraph " + "\\\n")
script.write("--zero_start_bedgraph " + outputDirectory + "/" + file + ".bedgraph")
script.close()
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
| gpl-3.0 | -3,264,305,320,030,537,000 | 40.461538 | 157 | 0.728664 | false |
CyberLabs-BR/face_detect | pyimagesearch/nn/conv/minivggnet.py | 1 | 1990 | # import the necessary packages
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras import backend as K
class MiniVGGNet:
@staticmethod
def build(width, height, depth, classes):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
# if we are using "channels first", update the input shape
# and channels dimension
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# first CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(32, (3, 3), padding="same",
input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# second CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# first (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(512))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
# return the constructed network architecture
return model | mit | 1,556,194,539,473,619,200 | 32.183333 | 60 | 0.724623 | false |
dl1ksv/gr-display | docs/doxygen/doxyxml/text.py | 1 | 1297 | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-display
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
"""
Utilities for extracting text from generated classes.
"""
from __future__ import unicode_literals
def is_string(txt):
if isinstance(txt, str):
return True
try:
if isinstance(txt, str):
return True
except NameError:
pass
return False
def description(obj):
if obj is None:
return None
return description_bit(obj).strip()
def description_bit(obj):
if hasattr(obj, 'content'):
contents = [description_bit(item) for item in obj.content]
result = ''.join(contents)
elif hasattr(obj, 'content_'):
contents = [description_bit(item) for item in obj.content_]
result = ''.join(contents)
elif hasattr(obj, 'value'):
result = description_bit(obj.value)
elif is_string(obj):
return obj
else:
raise Exception('Expecting a string or something with content, content_ or value attribute')
# If this bit is a paragraph then add one some line breaks.
if hasattr(obj, 'name') and obj.name == 'para':
result += "\n\n"
return result
| gpl-3.0 | 8,988,638,054,465,679,000 | 27.195652 | 100 | 0.643022 | false |
nickp60/riboSeed | riboSeed/riboScore.py | 1 | 24260 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
# Copyright 2017, National University of Ireland and The James Hutton Insitute
# Author: Nicholas Waters
#
# This code is part of the riboSeed package, and is governed by its licence.
# Please see the LICENSE file that should have been included as part of
# this package.
"""
"""
import os
import sys
import subprocess
import argparse
import multiprocessing
import glob
from Bio import SeqIO
import pandas as pd
from Bio.Blast.Applications import NcbiblastnCommandline
from .shared_methods import set_up_logging, combine_contigs
def get_args(test_args=None): # pragma: no cover
parser = argparse.ArgumentParser(prog="ribo score",
description="This does some simple blasting to detect correctness " +
"of riboSeed results")
parser.prog = "ribo score"
parser.add_argument("indir",
help="dir containing a genbank file, assembly files" +
"as fastas. Usually the 'mauve' dir in the riboSeed " +
"results")
parser.add_argument("-o", "--output", dest='output',
help="directory in which to place the output files",
default=None)
parser.add_argument("-l", "--flanking_length",
help="length of flanking regions, in bp; " +
"default: %(default)s",
default=1000, type=int, dest="flanking")
parser.add_argument("-p", "--min_percent", dest="min_percent",
help="minimum percent identity",
default=97, type=int)
parser.add_argument("-f", "--assembly_ext", dest="assembly_ext",
help="extenssion of reference, usually fasta",
default="fasta", type=str)
parser.add_argument("-g", "--ref_ext", dest="ref_ext",
help="extension of reference, usually .gb",
default="gb", type=str)
parser.add_argument("-F", "--blast_Full", dest="blast_full",
help="if true, blast full sequences along with " +
"just the flanking. Interpretation is not " +
"implemented currently as false positives cant " +
"be detected this way",
default=False, action="store_true")
parser.add_argument("-v", "--verbosity", dest='verbosity',
action="store",
default=2, type=int, choices=[1, 2, 3, 4, 5],
help="Logger writes debug to file in output dir; " +
"this sets verbosity level sent to stderr. " +
" 1 = debug(), 2 = info(), 3 = warning(), " +
"4 = error() and 5 = critical(); " +
"default: %(default)s")
# parser.add_argument("-t", "--blast_type",
# help="blastn or tblastx", default="tblastx")
if test_args is None:
args = parser.parse_args(sys.argv[2:])
else:
args = parser.parse_args(test_args)
return(args)
def make_nuc_nuc_recip_blast_cmds(
query_list, output, subject_file=None, logger=None):
"""given a file, make a blast cmd, and return path to output csv
"""
assert logger is not None, "must use logging"
blast_cmds = []
blast_outputs = []
recip_blast_outputs = []
for f in query_list:
# run forward, nuc aganst prot, blast
output_path_tab = str(
os.path.join(output,
os.path.splitext(os.path.basename(f))[0] +
"_vs_ref.tab"))
blast_cline = NcbiblastnCommandline(query=f,
subject=subject_file,
# evalue=.001,
outfmt=6,
#outfmt="'6 qaccver saccver pident length mismatch gapopen qstart qend sstart send evalue bitscore qlen'",
out=output_path_tab)
add_params = str(" -num_threads 1 -num_alignments 50")
blast_command = str(str(blast_cline) + add_params)
blast_cmds.append(blast_command)
blast_outputs.append(output_path_tab)
# run reverse, prot against nuc, blast
recip_output_path_tab = os.path.join(
output,
"ref_vs_" + os.path.splitext(os.path.basename(f))[0] + ".tab")
recip_blast_cline = NcbiblastnCommandline(
query=subject_file,
subject=f,
# evalue=.001,
outfmt=6,
#outfmt="'6 qaccver saccver pident length mismatch gapopen qstart qend sstart send evalue bitscore qlen'",
out=recip_output_path_tab)
recip_blast_command = str(str(recip_blast_cline) + add_params)
blast_cmds.append(recip_blast_command)
recip_blast_outputs.append(recip_output_path_tab)
return(blast_cmds, blast_outputs, recip_blast_outputs)
def merge_outfiles(filelist, outfile):
"""
"""
# only grab .tab files, ie, the blast output
filelist = [i for i in filelist if i.split(".")[-1:] == ['tab']]
if len(filelist) == 1:
# print("only one file found! no merging needed")
return(filelist)
else:
# print("merging all the blast results to %s" % outfile)
nfiles = len(filelist)
fout = open(outfile, "a")
# first file:
with open(filelist[0]) as firstf:
for line in firstf:
fout.write(line)
# now the rest:
for num in range(1, nfiles):
with open(filelist[num]) as otherf:
for line in otherf:
fout.write(line)
fout.close()
return(outfile)
def BLAST_tab_to_df(path):
colnames = ["query_id", "subject_id", "identity_perc", "alignment_length",
"mismatches", "gap_opens", "q_start", "q_end", "s_start",
"s_end", "evalue", "bit_score"]
with open(path) as tab:
raw_csv_results = pd.read_csv(
tab, comment="#", sep="\t", names=colnames)
return raw_csv_results
def filter_recip_BLAST_df(df1, df2, min_percent, min_lens, logger=None):
""" results from pd.read_csv with default BLAST output 6 columns
returns a df
"""
assert logger is not None, "must use a logger"
logger.debug("shape of blast results")
logger.debug("shape of recip blast results")
# df1['genome'] = df1.query_id.str.split('_').str.get(0)
# df2['genome'] = df2.subject_id.str.split('_').str.get(0)
df1['genome'] = df1.query_id
df2['genome'] = df2.subject_id
logger.debug(df1.shape)
logger.debug(df2.shape)
# recip structure
filtered = pd.DataFrame(columns=df1.columns)
unq_subject = df1.subject_id.unique()
unq_query = df1.genome.unique()
recip_hits = []
nonrecip_hits = []
for gene in unq_subject:
for genome in unq_query:
logger.debug("Checking %s in %s for reciprocity" % (gene, genome))
tempdf1 = df1.loc[(df1["subject_id"] == gene) &
(df1["genome"] == genome), ]
tempdf2 = df2.loc[(df2["query_id"] == gene) &
(df2["genome"] == genome), ]
if tempdf1.empty or tempdf2.empty:
logger.info("skipping %s in %s", gene, genome)
else:
subset1 = tempdf1.loc[
(tempdf1["identity_perc"] > min_percent)
]
subset2 = tempdf2.loc[
(tempdf2["identity_perc"] > min_percent)
]
logger.debug("grouped df shape: ")
logger.debug(tempdf1.shape)
logger.debug("grouped df2 shape: " )
logger.debug(tempdf2.shape)
if subset1.empty or subset2.empty:
logger.info("No reciprocol hits for %s in %s", gene, genome)
logger.debug(tempdf1)
logger.debug(tempdf2)
nonrecip_hits.append([gene, genome])
else:
if subset1.iloc[0]["query_id"] == subset2.iloc[0]["subject_id"]:
recip_hits.append([gene, genome])
logger.debug("Reciprocol hits for %s in %s!", gene, genome)
if subset1.iloc[0]["alignment_length"] >= \
(min_lens[subset1.iloc[0]["query_id"]] - 0):
filtered = filtered.append(subset1)
logger.info("%s in %s passed min len test!", gene, genome)
else:
pass
else:
nonrecip_hits.append([gene, genome])
logger.debug("No reciprocol hits for %s in %s",
gene, genome)
# logger.debug(subset.shape)
logger.debug("Non-reciprocal genes:")
logger.debug(nonrecip_hits)
logger.debug("Reciprocal genes:")
logger.debug(recip_hits)
logger.debug("filtered shape:")
logger.debug(filtered.shape)
return(filtered)
def checkBlastForMisjoin(df, fasta, ref_lens, BUF, flanking, logger=None):
""" results from pd.read_csv with default BLAST output 6 columns
returns a df
"""
logger.debug("length of references:")
logger.debug(ref_lens)
df['name'] = df.query_id.str.replace("_upstream", "").str.replace("_downstream", "")
# df['name2'] = df.name.str.replace("_downstream", "")
df['query_name'] = df['name'].str.split('flanking').str.get(0)
where = []
for i, row in df.iterrows():
where.append("down" if "downstream" in row['query_id'] else "up")
df['where'] = where
assert logger is not None, "must use a logger"
# print(ref_lens)
print("\n")
queries = df.query_name.unique()
# subjects = df.subject_id.unique()
sdf = df.loc[(df["alignment_length"] > (flanking * 0.9) - BUF)]
naughty_nice_list = []
for query in queries:
logger.debug("checking hits for %s", query)
tempdf = sdf.loc[(df["query_name"] == query)]
for i, row in tempdf.iterrows():
# print("outer row")
subject_start = None
# if both start the same (around 1), we have the first hit
if row["s_start"] - 1 < BUF and abs(row["q_start"] - 1) < BUF:
subject_start = row["subject_id"]
ref_len = ref_lens[row["subject_id"]]
logger.debug("checking %s and %s, len %d",
query, subject_start, ref_len)
# print(tempdf)
foundpair = False
for i, innerrow in tempdf.iterrows():
subject_len = ref_lens[innerrow["subject_id"]]
subject_end = innerrow["subject_id"]
# if hit extends to end of reference
logger.debug("subject len: %s", subject_len)
logger.debug(innerrow)
logger.debug(abs(innerrow["s_end"] - subject_len))
if (abs(innerrow["s_end"] - subject_len)) < BUF:
# if same contig
if subject_start == subject_end:
naughty_nice_list.append(
[fasta, "good", query, subject_start, subject_end])
foundpair = True
else:
naughty_nice_list.append(
[fasta, "bad", query, subject_start, subject_end]
)
foundpair = True
if not foundpair:
naughty_nice_list.append(
[fasta, "?", query, subject_start, "?"])
print("Results for %s:" % fasta)
for line in naughty_nice_list:
print("\t".join(line))
print("\n")
return(naughty_nice_list)
def write_results(df, fasta_name, outfile, logger=None):
#% parse output
assert logger is not None, "must use a logger"
logger.debug("writing out the results")
with open(outfile, "a") as outf:
outf.write("# {0} \n".format(fasta_name))
df.to_csv(outf)
def parseDirContents(dirname, ref_ext, assembly_ext):
"""retursn a tuple (ref, [assembly1, assembly2, etc])
"""
return (glob.glob(dirname + "*" + ref_ext)[0],
glob.glob(dirname + "*" + assembly_ext))
def getScanCmd(ref, outroot, other_args):
""" returns (cmd, path/to/dir/)
"""
if other_args != "":
other_args = " " + other_args # pad with space for easier testing
if ref.endswith(".gb"):
return (None, ref)
resulting_gb = os.path.join(outroot, "scan", "scannedScaffolds.gb")
return (
"ribo scan {0} --min_length 5000 -o {1}{2}".format(
ref,
os.path.join(outroot, "scan"),
other_args
), resulting_gb
)
def getSelectCmd(gb, outroot, other_args):
resulting_clusters = os.path.join(outroot, "select",
"riboSelect_grouped_loci.txt")
if other_args != "":
other_args = " " + other_args # pad with space for easier testing
return ("ribo select {0} -o {1}{2}".format(
gb,
os.path.join(outroot, "select"),
other_args
), resulting_clusters)
def getSnagCmd(scangb, cluster, flank, outroot, other_args=""):
if other_args != "":
other_args = " " + other_args # pad with space for easier testing
return ("ribo snag {0} {1} -l {2} --just_extract -o {3}{4}".format(
scangb,
cluster,
flank,
os.path.join(outroot, "snag"),
other_args
), os.path.join(outroot, "snag"))
def check_scan_select_snag_retruncodes(subreturns, logger):
if subreturns[0].returncode != 0:
logger.error("error with riboScan! Check the riboScan log files")
sys.exit(1)
if subreturns[1].returncode != 0:
logger.error("error with riboSelect! Check the riboSelect log files")
sys.exit(1)
if subreturns[2].returncode != 0:
logger.info("error with riboSnag! This often happens if " +
"the assembly doesnt reconstruct any rDNAs.")
# note the lack of sys exit
def main(args, logger=None):
if args.output is None:
args.output = os.path.dirname(
os.path.join(args.indir, "")
) + "_riboScored"
output_root = os.path.abspath(os.path.expanduser(args.output))
if not os.path.isdir(output_root):
sys.stderr.write("creating output directory %s\n" % output_root)
os.makedirs(output_root)
else:
sys.stderr.write("Output Directory already exists!\n")
sys.exit(1)
log_path = os.path.join(output_root, "riboScore.log")
if logger is None:
logger = set_up_logging(verbosity=args.verbosity,
outfile=log_path,
name=__name__)
logger.debug("All settings used:")
for k, v in sorted(vars(args).items()):
logger.debug("{0}: {1}".format(k, v))
if not os.path.isdir(os.path.join(args.indir, "")) or len(
os.listdir(os.path.join(args.indir, ""))) == 0:
logger.error("input directory doesnt exist or is empty! Exiting...")
sys.exit(1)
gb, fastas = parseDirContents(dirname=os.path.join(args.indir, ""),
ref_ext=args.ref_ext,
assembly_ext=args.assembly_ext)
# snags from reference
bs_dir1 = os.path.join(output_root, "bridgeSeeds_ref")
scancmd1, scangb1 = getScanCmd(ref=gb, outroot=bs_dir1, other_args="--name riboScore")
selectcmd1, cluster1 = getSelectCmd(gb=scangb1, outroot=bs_dir1,
other_args="-s 16S:23S")
snagcmd1, snagdir1 = getSnagCmd(scangb=scangb1, cluster=cluster1,
flank=args.flanking,
outroot=bs_dir1,
other_args="")
logger.info(
"Running riboScan, riboSelect, and riboSnag on reference: %s", gb)
report_list = []
for i in [scancmd1, selectcmd1, snagcmd1]:
if i is None:
continue
logger.debug(i)
subprocess.run(
[i],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=True)
for index, fasta in enumerate(fastas):
logger.debug("processing %s", fasta)
this_root = os.path.join(
args.output, os.path.splitext(os.path.basename(fasta))[0])
bs_dir2 = os.path.join(this_root, "bridgeSeeds_contigs")
os.makedirs(bs_dir2)
# snags from assembly
scancmd2, scangb2 = getScanCmd(ref=fasta, outroot=bs_dir2,
other_args='--name riboScore')
selectcmd2, cluster2 = getSelectCmd(gb=scangb2, outroot=bs_dir2,
other_args="-s 16S:23S")
snagcmd2, snagdir2 = getSnagCmd(scangb=scangb2, cluster=cluster2,
flank=args.flanking,
outroot=bs_dir2)
logger.info(
"Running riboScan, riboSelect, and riboSnag on " +
"%s, assembly %d of %d",
fasta, index + 1, len(fastas))
returncodes = []
for i in [scancmd2, selectcmd2, snagcmd2]:
if i is None:
continue
logger.debug(i)
returncodes.append(subprocess.run(
[i],
shell=sys.platform != "win32",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False)) # we check later due to likely de novo failure
check_scan_select_snag_retruncodes(
subreturns=returncodes, logger=logger)
ref_snags = sorted(glob.glob(
snagdir1 + "/*_riboSnag.fasta"))
if args.blast_full:
full_blast_results = os.path.join(this_root, "BLAST")
os.makedirs(full_blast_results)
combined_full_snags = combine_contigs(
contigs_dir=snagdir2,
pattern="*riboSnag",
contigs_name="combinedSnags",
logger=logger)
commands, paths_to_outputs, paths_to_recip_outputs = \
make_nuc_nuc_recip_blast_cmds(
query_list=ref_snags,
subject_file=combined_full_snags,
output=full_blast_results,
logger=logger)
else:
commands = []
contig_snags = sorted(glob.glob(
os.path.join(snagdir2, "") +
"*_riboSnag.fasta"))
contig_snags_flanking = sorted(glob.glob(
os.path.join(snagdir2, "flanking_regions_output", "") +
"*_riboSnag_flanking_regions.fasta"))
logger.debug(contig_snags)
logger.debug(contig_snags_flanking)
# combine the assembly contigs
if len(contig_snags) == 0:
report_list.append("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n".format(
os.path.abspath(os.path.expanduser(args.indir)), # 0
os.path.basename(fasta), # 1
len(ref_snags), # 2
0, # 3
0, # 4
0 # 5
))
continue
combined_flanking_snags = combine_contigs(
contigs_dir=os.path.join(
snagdir2, "flanking_regions_output", ""),
pattern="*riboSnag_flanking_regions",
contigs_name="combinedSnagFlanking",
logger=logger)
ref_snag_dict = {}
contig_snag_dict = {}
for snag in ref_snags:
rec = SeqIO.read(snag, "fasta")
ref_snag_dict[rec.id] = len(rec.seq)
for snag in contig_snags:
rec = SeqIO.read(snag, "fasta")
contig_snag_dict[rec.id] = len(rec.seq)
logger.debug(ref_snag_dict)
logger.debug(contig_snag_dict)
flanking_blast_results = os.path.join(this_root, "BLAST_flanking")
os.makedirs(flanking_blast_results)
f_commands, f_paths_to_outputs, f_paths_to_recip_outputs = \
make_nuc_nuc_recip_blast_cmds(
query_list=ref_snags,
subject_file=combined_flanking_snags,
output=flanking_blast_results,
logger=logger)
# check for existing blast results
pool = multiprocessing.Pool()
logger.debug("Running the following commands in parallel " +
"(this could take a while):")
logger.debug("\n" + "\n".join([x for x in commands + f_commands]))
logger.info("Running BLAST commands")
results = [
pool.apply_async(subprocess.run,
(cmd,),
{"shell": sys.platform != "win32",
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"check": True})
for cmd in commands + f_commands]
pool.close()
pool.join()
reslist = []
reslist.append([r.get() for r in results])
logger.info("Parsing BLAST results")
if args.blast_full:
merged_tab = merge_outfiles(
filelist=paths_to_outputs,
outfile=os.path.join(this_root, "merged_results.tab"))
recip_merged_tab = merge_outfiles(
filelist=paths_to_recip_outputs,
outfile=os.path.join(this_root, "recip_merged_results.tab"))
resultsdf = BLAST_tab_to_df(merged_tab)
recip_resultsdf = BLAST_tab_to_df(recip_merged_tab)
filtered_hits = filter_recip_BLAST_df(
df1=resultsdf,
df2=recip_resultsdf,
min_lens=ref_snag_dict,
min_percent=args.min_percent,
logger=logger)
write_results(
outfile=os.path.join(output_root,
"riboScore_hits_fulllength.txt"),
fasta_name=fasta,
df=filtered_hits, logger=logger)
f_merged_tab = merge_outfiles(
filelist=f_paths_to_outputs,
outfile=os.path.join(
this_root, "merged_flanking_results.tab"))
f_recip_merged_tab = merge_outfiles(
filelist=f_paths_to_recip_outputs,
outfile=os.path.join(
this_root, "recip_merged_flanking_results.tab"))
# this currently doesnt get used
f_resultsdf = BLAST_tab_to_df(f_merged_tab)
# we use the reciprocal results
f_recip_resultsdf = BLAST_tab_to_df(f_recip_merged_tab)
# 5 columns: [fasta, good/bad/?, query, startseq, end_seq]
flanking_hits = checkBlastForMisjoin(
fasta=fasta,
df=f_recip_resultsdf,
ref_lens=ref_snag_dict,
flanking=args.flanking,
BUF=50, logger=logger)
with open(os.path.join(output_root, "riboScore_hits.txt"), "a") as f:
for line in flanking_hits:
f.write("\t".join(line) + "\n")
good_hits = 0 + sum([1 for x in flanking_hits if x[1] == "good"])
ambig_hits = 0 + sum([1 for x in flanking_hits if x[1] == "?"])
bad_hits = 0 + sum([1 for x in flanking_hits if x[1] == "bad"])
report_list.append("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n".format(
os.path.abspath(os.path.expanduser(args.indir)), # 0
os.path.basename(fasta), # 1
len(ref_snags), # 2
good_hits, # 3
ambig_hits, # 4
bad_hits # 5
))
logger.debug("report list:")
logger.debug(report_list)
with open(os.path.join(output_root, "riboScore_report.txt"), "a") as r:
for line in report_list:
r.write(line)
| mit | -2,896,412,737,833,908,000 | 41.045061 | 150 | 0.530214 | false |
great-expectations/great_expectations | great_expectations/expectations/core/expect_column_min_to_be_between.py | 1 | 9139 | from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...render.renderer.renderer import renderer
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
handle_strict_min_max,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from ..expectation import ColumnExpectation
class ExpectColumnMinToBeBetween(ColumnExpectation):
"""Expect the column minimum to be between an min and max value
expect_column_min_to_be_between is a \
:func:`column_aggregate_expectation
<great_expectations.execution_engine.MetaExecutionEngine.column_aggregate_expectation>`.
Args:
column (str): \
The column name
min_value (comparable type or None): \
The minimal column minimum allowed.
max_value (comparable type or None): \
The maximal column minimum allowed.
strict_min (boolean):
If True, the minimal column minimum must be strictly larger than min_value, default=False
strict_max (boolean):
If True, the maximal column minimum must be strictly smaller than max_value, default=False
Keyword Args:
parse_strings_as_datetimes (Boolean or None): \
If True, parse min_value, max_values, and all non-null column values to datetimes before making \
comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"observed_value": (list) The actual column min
}
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound
* If max_value is None, then min_value is treated as a lower bound
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
}
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\
metric_dependencies = ("column.min",)
success_keys = ("min_value", "strict_min", "max_value", "strict_max")
# Default values
default_kwarg_values = {
"min_value": None,
"max_value": None,
"strict_min": None,
"strict_max": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
neccessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration=configuration)
self.validate_metric_value_between_configuration(configuration=configuration)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"min_value",
"max_value",
"parse_strings_as_datetimes",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
if (params["min_value"] is None) and (params["max_value"] is None):
template_str = "minimum value may have any numerical value."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = f"minimum value must be {at_least_str} $min_value and {at_most_str} $max_value."
elif params["min_value"] is None:
template_str = f"minimum value must be {at_most_str} $max_value."
elif params["max_value"] is None:
template_str = f"minimum value must be {at_least_str} $min_value."
if params.get("parse_strings_as_datetimes"):
template_str += " Values should be parsed as datetimes."
if include_column_name:
template_str = "$column " + template_str
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = conditional_template_str + ", then " + template_str
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
@classmethod
@renderer(renderer_type="renderer.descriptive.stats_table.min_row")
def _descriptive_stats_table_min_row_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
assert result, "Must pass in result."
return [
{
"content_block_type": "string_template",
"string_template": {
"template": "Minimum",
"tooltip": {"content": "expect_column_min_to_be_between"},
},
},
"{:.2f}".format(result.result["observed_value"]),
]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
return self._validate_metric_value_between(
metric_name="column.min",
configuration=configuration,
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| apache-2.0 | -5,748,120,654,164,605,000 | 39.617778 | 120 | 0.586497 | false |
bmun/huxley | huxley/core/admin/committee_feedback.py | 1 | 4452 | # Copyright (c) 2011-2021 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import csv
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect
from googleapiclient.discovery import build
from google.oauth2 import service_account
from huxley.core.models import CommitteeFeedback
class CommitteeFeedbackAdmin(admin.ModelAdmin):
search_fields = ('committee__name', )
def get_rows(self):
rows = []
rows.append([
'Committee', 'General Rating', 'General Comment', 'Chair 1',
'Chair 1 Rating', 'Chair 1 Comment', 'Chair 2 Name',
'Chair 2 Rating', 'Chair 2 Comment', 'Chair 3 Name',
'Chair 3 Rating', 'Chair 3 Comment', 'Chair 4 Name',
'Chair 4 Rating', 'Chair 4 Comment', 'Chair 5 Name',
'Chair 5 Rating', 'Chair 5 Comment', 'Chair 6 Name',
'Chair 6 Rating', 'Chair 6 Comment', 'Chair 7 Name',
'Chair 7 Rating', 'Chair 7 Comment', 'Chair 8 Name',
'Chair 8 Rating', 'Chair 8 Comment', 'Chair 9 Name',
'Chair 9 Rating', 'Chair 9 Comment', 'Chair 10 Name',
'Chair 10 Rating', 'Chair 10 Comment', 'Perception of Berkeley',
'Money Spent'
])
for feedback in CommitteeFeedback.objects.all().order_by(
'committee__name'):
rows.append([
feedback.committee.name, feedback.rating, feedback.comment,
feedback.chair_1_name, feedback.chair_1_rating,
feedback.chair_1_comment, feedback.chair_2_name,
feedback.chair_2_rating, feedback.chair_2_comment,
feedback.chair_3_name, feedback.chair_3_rating,
feedback.chair_3_comment, feedback.chair_4_name,
feedback.chair_4_rating, feedback.chair_4_comment,
feedback.chair_5_name, feedback.chair_5_rating,
feedback.chair_5_comment, feedback.chair_6_name,
feedback.chair_6_rating, feedback.chair_6_comment,
feedback.chair_7_name, feedback.chair_7_rating,
feedback.chair_7_comment, feedback.chair_8_name,
feedback.chair_8_rating, feedback.chair_8_comment,
feedback.chair_9_name, feedback.chair_9_rating,
feedback.chair_9_comment, feedback.chair_10_name,
feedback.chair_10_rating, feedback.chair_10_comment,
feedback.berkeley_perception, feedback.money_spent
])
return rows
def list(self, request):
'''Return a CSV file containing all committee feedback.'''
feedbacks = HttpResponse(content_type='text/csv')
feedbacks[
'Content-Disposition'] = 'attachment; filename="feedback.csv"'
writer = csv.writer(feedbacks)
for row in self.get_rows():
writer.writerow(row)
return feedbacks
def sheets(self, request):
if settings.SHEET_ID:
SHEET_RANGE = 'Feedback!A1:AI'
# Store credentials
creds = service_account.Credentials.from_service_account_file(
settings.SERVICE_ACCOUNT_FILE, scopes=settings.SCOPES)
data = self.get_rows()
body = {
'values': data,
}
service = build('sheets', 'v4', credentials=creds)
response = service.spreadsheets().values().clear(
spreadsheetId=settings.SHEET_ID,
range=SHEET_RANGE,
).execute()
response = service.spreadsheets().values().update(
spreadsheetId=settings.SHEET_ID,
range=SHEET_RANGE,
valueInputOption='USER_ENTERED',
body=body).execute()
return HttpResponseRedirect(
reverse('admin:core_committeefeedback_changelist'))
def get_urls(self):
return super(CommitteeFeedbackAdmin, self).get_urls() + [
url(r'list',
self.admin_site.admin_view(self.list),
name='core_committeefeedback_list'),
url(
r'sheets',
self.admin_site.admin_view(self.sheets),
name='core_committeefeedback_sheets',
),
]
| bsd-3-clause | -1,210,776,847,596,304,000 | 39.472727 | 77 | 0.590296 | false |
hankcs/udacity-deep-learning | 2_fullyconnected.py | 1 | 12463 | # coding: utf-8
# Deep Learning
# =============
#
# Assignment 2
# ------------
#
# Previously in `1_notmnist.ipynb`, we created a pickle with formatted datasets for training, development and testing on the [notMNIST dataset](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html).
#
# The goal of this assignment is to progressively train deeper and more accurate models using TensorFlow.
# In[ ]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
# First reload the data we generated in `1_notmnist.ipynb`.
# In[ ]:
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# Reformat into a shape that's more adapted to the models we're going to train:
# - data as a flat matrix,
# - labels as float 1-hot encodings.
# In[ ]:
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:, None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# We're first going to train a multinomial logistic regression using simple gradient descent.
#
# TensorFlow works like this:
# * First you describe the computation that you want to see performed: what the inputs, the variables, and the operations look like. These get created as nodes over a computation graph. This description is all contained within the block below:
#
# with graph.as_default():
# ...
#
# * Then you can run the operations on this graph as many times as you want by calling `session.run()`, providing it outputs to fetch from the graph that get returned. This runtime operation is all contained in the block below:
#
# with tf.Session(graph=graph) as session:
# ...
#
# Let's load all the data into TensorFlow and build the computation graph corresponding to our training:
# In[ ]:
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random values following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
# Let's run this computation and iterate:
# In[ ]:
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
# Let's now switch to stochastic gradient descent training instead, which is much faster.
#
# The graph will be similar, except that instead of holding all the training data into a constant node, we create a `Placeholder` node which will be fed actual data at every call of `session.run()`.
# In[ ]:
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
# Let's run it:
# In[ ]:
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
# ---
# Problem
# -------
#
# Turn the logistic regression example with SGD into a 1-hidden layer neural network
# with rectified linear units [nn.relu()](https://www.tensorflow.org/versions/r0.7/api_docs/python/nn.html#relu)
# and 1024 hidden nodes. This model should improve your validation / test accuracy.
#
# ---
batch_size = 128
hidden_size = 1024
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
W1 = tf.Variable(tf.truncated_normal([image_size * image_size, hidden_size]))
b1 = tf.Variable(tf.zeros([hidden_size]))
W2 = tf.Variable(tf.truncated_normal([hidden_size, num_labels]))
b2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
y1 = tf.nn.relu(tf.matmul(tf_train_dataset, W1) + b1)
logits = tf.matmul(y1, W2) + b2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
y1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, W1) + b1)
valid_logits = tf.matmul(y1_valid, W2) + b2
valid_prediction = tf.nn.softmax(valid_logits)
y1_test = tf.nn.relu(tf.matmul(tf_test_dataset, W1) + b1)
test_logits = tf.matmul(y1_test, W2) + b2
test_prediction = tf.nn.softmax(test_logits)
# Let's run it:
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
| gpl-3.0 | 6,008,777,394,889,813,000 | 40.405316 | 243 | 0.672872 | false |
uw-it-aca/uw-restclients | restclients/tests.py | 1 | 6019 | from restclients.test.uwnetid.password import UwPasswordTest
from restclients.test.uwnetid.subscription import EmailForwardingTest
from restclients.test.uwnetid.subscription_60 import KerberosSubsTest
from restclients.test.uwnetid.subscription_233 import Office365EduSubsTest
from restclients.test.uwnetid.subscription import NetidSubscriptionTest
from restclients.test.uwnetid.subscription import NetidPostSubscriptionTest
from restclients.test.util.date_formator import FormatorTest
from restclients.test.util.datetime_convertor import DatetimeConvertorTest
from restclients.test.util.retry import RetryTest
from restclients.test.bridge.models import TestBridgeModel
from restclients.test.bridge.user import TestBridgeUser
from restclients.test.bridge.custom_field import TestBridgeCustomFields
from restclients.test.hfs.idcard import HfsTest
from restclients.test.hrpws.appointee import AppointeeTest
from restclients.test.mailman.basic_list import TestMailmanBasicList
from restclients.test.mailman.instructor_term_list import\
TestMailmanInstructorList
from restclients.test.mailman.course_list import TestMailmanCourseLists
from restclients.test.library.mylibinfo import MyLibInfoTest
from restclients.test.library.currics import CurricsTest
from restclients.test.grad.committee import CommitteeTest
from restclients.test.grad.degree import DegreeTest
from restclients.test.grad.leave import LeaveTest
from restclients.test.grad.petition import PetitionTest
from restclients.test.sws.compatible import SWSTest
from restclients.test.sws.financial import SWSFinance
from restclients.test.sws.notice import SWSNotice
from restclients.test.sws.term import SWSTestTerm
from restclients.test.sws.err404.dao import SWSTestDAO404
from restclients.test.sws.err500.dao import SWSTestDAO500
from restclients.test.sws.invalid_dao import SWSTestInvalidDAO
from restclients.test.sws.file_implementation.dao import SWSTestFileDAO
from restclients.test.sws.schedule_data import SWSTestScheduleData
from restclients.test.sws.enrollment import SWSTestEnrollments
from restclients.test.sws.section import SWSTestSectionData
from restclients.test.sws.section_status import SWSTestSectionStatusData
from restclients.test.sws.independent_study import SWSIndependentStudy
from restclients.test.sws.instructor_no_regid import SWSMissingRegid
from restclients.test.sws.registrations import SWSTestRegistrations
from restclients.test.sws.campus import SWSTestCampus
from restclients.test.sws.college import SWSTestCollege
from restclients.test.sws.department import SWSTestDepartment
from restclients.test.sws.curriculum import SWSTestCurriculum
from restclients.test.sws.graderoster import SWSTestGradeRoster
from restclients.test.sws.dates import SWSTestDates
from restclients.test.pws.person import PWSTestPersonData
from restclients.test.pws.entity import PWSTestEntityData
from restclients.test.pws.card import IdCardTestCard
from restclients.test.pws.photo import IdCardTestPhoto
from restclients.test.pws.err404.dao import PWSTestDAO404
from restclients.test.pws.err404.pws import PWSTest404
from restclients.test.pws.err500.dao import PWSTestDAO500
from restclients.test.pws.err500.pws import PWSTest500
from restclients.test.pws.invalid_dao import PWSTestInvalidDAO
from restclients.test.pws.file_implementation.dao import PWSTestFileDAO
from restclients.test.kws.key import KWSTestKeyData
from restclients.test.gws.group import GWSGroupBasics
from restclients.test.gws.course_group import GWSCourseGroupBasics
from restclients.test.gws.search import GWSGroupSearch
from restclients.test.cache.none import NoCacheTest
from restclients.test.cache.time import TimeCacheTest
from restclients.test.cache.etag import ETagCacheTest
from restclients.test.cache.memcached import MemcachedCacheTest
from restclients.test.book.by_schedule import BookstoreScheduleTest
from restclients.test.amazon_sqs.queues import SQSQueue
from restclients.test.sms.send import SMS
from restclients.test.sms.invalid_phone_number import SMSInvalidNumbers
from restclients.test.nws.subscription import NWSTestSubscription
from restclients.test.nws.channel import NWSTestChannel
from restclients.test.nws.endpoint import NWSTestEndpoint
from restclients.test.nws.person import NWSTestPerson
from restclients.test.canvas.enrollments import CanvasTestEnrollment
from restclients.test.canvas.accounts import CanvasTestAccounts
from restclients.test.canvas.admins import CanvasTestAdmins
from restclients.test.canvas.roles import CanvasTestRoles
from restclients.test.canvas.courses import CanvasTestCourses
from restclients.test.canvas.sections import CanvasTestSections
from restclients.test.canvas.bad_sis_ids import CanvasBadSISIDs
from restclients.test.canvas.terms import CanvasTestTerms
from restclients.test.canvas.users import CanvasTestUsers
from restclients.test.canvas.submissions import CanvasTestSubmissions
from restclients.test.canvas.assignments import CanvasTestAssignments
from restclients.test.canvas.quizzes import CanvasTestQuizzes
from restclients.test.canvas.external_tools import CanvasTestExternalTools
from restclients.test.catalyst.gradebook import CatalystTestGradebook
from restclients.test.trumba.accounts import TrumbaTestAccounts
from restclients.test.trumba.calendar import TestCalendarParse
from restclients.test.trumba.calendars import TrumbaTestCalendars
from restclients.test.gws.trumba_group import TestGwsTrumbaGroup
from restclients.test.r25.events import R25TestEvents
from restclients.test.r25.spaces import R25TestSpaces
from restclients.test.myplan import MyPlanTestData
from restclients.test.o365.user import O365TestUser
from restclients.test.o365.license import O365TestLicense
from restclients.test.thread import ThreadsTest
from restclients.test.view import ViewTest
from restclients.test.delay import DegradedTestCase
from restclients.test.dao_implementation.mock import TestMock
from restclients.test.iasystem.evaluation import IASystemTest
from restclients.test.upass import UPassTest
| apache-2.0 | 3,110,429,502,172,440,000 | 48.743802 | 75 | 0.875395 | false |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/groupby/test_categorical.py | 1 | 39673 | from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import PY37
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas.util.testing as tm
from pandas.util.testing import assert_equal, assert_frame_equal, assert_series_equal
def cartesian_product_for_groupers(result, args, names):
""" Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper """
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index).sort_index()
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
assert_frame_equal(result, expected)
# GH#21636 flaky on py37; may be related to older numpy, see discussion
# https://github.com/MacPython/pandas-wheels/pull/64
@pytest.mark.xfail(PY37, reason="Flaky, GH-27902", strict=False)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC")
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(expected, [cat1, cat2], list("AB"))
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = pd.Series([1, 2, 3])
df = pd.DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = pd.Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = pd.Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
+ "for (ordered={}, observed={}, sort={})\n"
+ "Result:\n{}"
).format(ordered, observed, sort, result)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take_nd(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
with pytest.raises(ValueError):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True)
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False)
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False)
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = pd.Categorical(["first", "second", "third", "fourth"], ordered=True)
df = pd.DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = pd.DataFrame(
{"payload": [-2, -1], "col": pd.Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_sort():
# http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby # noqa: E501
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
assert_frame_equal(result_sort, df.groupby(col, sort=True, observed=False).first())
# when categories is ordered, group is ordered by category's order
assert_frame_equal(result_sort, df.groupby(col, sort=False, observed=False).first())
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
assert_frame_equal(result_sort, df.groupby(col, sort=True, observed=False).first())
assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame(
{
"key1": Categorical(list("abcbabcba")),
"key2": Categorical(
list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
),
"values": np.arange(9),
}
)
result = df.groupby(["key1", "key2"]).mean()
idx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
],
names=["key1", "key2"],
)
expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_index, expected",
[
(
True,
Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
),
data=[1, 2, 3],
name="x",
),
),
(
False,
DataFrame(
{
"a": Series([1, 1, 2], dtype="category"),
"b": [1, 2, 2],
"x": [1, 2, 3],
}
),
),
],
)
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame(
{"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
)
result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
assert_equal(result, expected)
@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
expected = Categorical(
[None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
res = ct.shift(1, fill_value=fill_value)
assert_equal(res, expected)
@pytest.fixture
def df_cat(df):
"""
DataFrame with multiple categorical columns and a column of integers.
Shortened so as not to contain all possible combinations of categories.
Useful for testing `observed` kwarg functionality on GroupBy objects.
Parameters
----------
df: DataFrame
Non-categorical, longer DataFrame from another fixture, used to derive
this one
Returns
-------
df_cat: DataFrame
"""
df_cat = df.copy()[:4] # leave out some groups
df_cat["A"] = df_cat["A"].astype("category")
df_cat["B"] = df_cat["B"].astype("category")
df_cat["C"] = Series([1, 2, 3, 4])
df_cat = df_cat.drop(["D"], axis=1)
return df_cat
@pytest.mark.parametrize(
"operation, kwargs", [("agg", dict(dtype="category")), ("apply", dict())]
)
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
index = MultiIndex.from_frame(
DataFrame(
{"A": ["foo", "foo", "bar", "bar"], "B": ["one", "two", "one", "three"]},
**kwargs
)
)
expected = Series(data=[1, 3, 2, 4], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
result = getattr(grouped, operation)(sum)
assert_series_equal(result, expected)
@pytest.mark.parametrize("operation", ["agg", "apply"])
@pytest.mark.parametrize("observed", [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
index, _ = MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
],
names=["A", "B"],
).sortlevel()
expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
result = getattr(grouped, operation)(sum)
assert_series_equal(result, expected)
@pytest.mark.parametrize(
"observed, index, data",
[
(
True,
MultiIndex.from_tuples(
[
("foo", "one", "min"),
("foo", "one", "max"),
("foo", "two", "min"),
("foo", "two", "max"),
("bar", "one", "min"),
("bar", "one", "max"),
("bar", "three", "min"),
("bar", "three", "max"),
],
names=["A", "B", None],
),
[1, 1, 3, 3, 2, 2, 4, 4],
),
(
False,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
(
None,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
],
)
def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name="C")
result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
lambda x: OrderedDict([("min", x.min()), ("max", x.max())])
)
assert_series_equal(result, expected)
| apache-2.0 | 5,703,122,923,592,596,000 | 32.422915 | 111 | 0.56149 | false |
Rhoana/membrane_cnn | assess_offset_smooth_average_ws.py | 1 | 4006 | import mahotas
import scipy.ndimage
import scipy.misc
import numpy as np
import gzip
import cPickle
import glob
import os
import h5py
import partition_comparison
#param_path = 'D:/dev/Rhoana/membrane_cnn/results/good3/'
param_path = 'D:/dev/Rhoana/membrane_cnn/results/stumpin/'
param_files = glob.glob(param_path + "*.h5")
target_boundaries = mahotas.imread(param_path + 'boundaries.png') > 0
offset_max = 32
target_boundaries = target_boundaries[offset_max:-offset_max,offset_max:-offset_max]
target_segs = np.uint32(mahotas.label(target_boundaries)[0])
param_files = [x for x in param_files if x.find('.ot.h5') == -1]
average_result = np.zeros(target_boundaries.shape, dtype=np.float32)
nresults = 0
blur_radius = 3;
y,x = np.ogrid[-blur_radius:blur_radius+1, -blur_radius:blur_radius+1]
disc = x*x + y*y <= blur_radius*blur_radius
for param_file in param_files:
if param_file.find('.ot.h5') != -1:
continue
print param_file
#net_output_file = param_file.replace('.h5','\\0005_classify_output_layer6_0.tif')
net_output_file = param_file.replace('.h5','\\0100_classify_output_layer6_0.tif')
net_output = mahotas.imread(net_output_file)
net_output = np.float32(net_output) / np.max(net_output)
offset_file = param_file.replace('.h5', '.sm.ot.h5')
h5off = h5py.File(offset_file, 'r')
best_offset = h5off['/best_offset'][...]
best_sigma = h5off['/best_sigma'][...]
h5off.close()
xoffset, yoffset = best_offset
offset_output = scipy.ndimage.filters.gaussian_filter(net_output, float(best_sigma))
offset_output = np.roll(offset_output, xoffset, axis=0)
offset_output = np.roll(offset_output, yoffset, axis=1)
#Crop
offset_output = offset_output[offset_max:-offset_max,offset_max:-offset_max]
average_result += offset_output
nresults += 1
average_result = average_result / nresults
sigma_range = arange(0, 3, 0.5)
thresh_range = arange(0.05,0.7,0.02)
sigma_range = [0]
#thresh_range = [0.3]
all_voi_results = []
for smooth_sigma in sigma_range:
best_score = Inf
best_sigma = 0
best_thresh = 0
best_result = None
smooth_output = scipy.ndimage.filters.gaussian_filter(average_result, smooth_sigma)
max_smooth = 2 ** 16 - 1
smooth_output = np.uint16((1 - smooth_output) * max_smooth)
thresh_voi_results = []
for thresh in thresh_range:
below_thresh = smooth_output < np.uint16(max_smooth * thresh)
#below_thresh = mahotas.morph.close(below_thresh.astype(np.bool), disc)
#below_thresh = mahotas.morph.open(below_thresh.astype(np.bool), disc)
seeds,nseeds = mahotas.label(below_thresh)
if nseeds == 0:
continue
ws = np.uint32(mahotas.cwatershed(smooth_output, seeds))
voi_score = partition_comparison.variation_of_information(target_segs.ravel(), ws.ravel())
thresh_voi_results.append(voi_score)
print 's={0:0.2f}, t={1:0.2f}, voi_score={2:0.4f}.'.format(smooth_sigma, thresh, voi_score)
dx, dy = np.gradient(ws)
result = np.logical_or(dx!=0, dy!=0)
figsize(20,20)
imshow(result, cmap=cm.gray)
plt.show()
if voi_score < best_score:
best_score = voi_score
best_sigma = smooth_sigma
best_thresh = thresh
dx, dy = np.gradient(ws)
best_result = np.logical_or(dx!=0, dy!=0)
all_voi_results.append(thresh_voi_results)
# figsize(20,20)
# imshow(best_result, cmap=cm.gray)
# plt.show()
print 'Best VoI score of {0} with {3} segments for sigma {1}, thresh {2}.'.format(best_score, best_sigma, best_thresh, nseeds)
plot_list = []
for voi_results in all_voi_results:
handle = plot(thresh_range, voi_results)[0]
plot_list.append(handle)
xlabel('Threshold')
ylabel('VoI Score')
legend(plot_list, [str(x) for x in sigma_range])
plt.show
figsize(20,20);
imshow(average_result,cmap=cm.gray)
# figsize(20,20);
# imshow(best_result,cmap=cm.gray)
| bsd-3-clause | -5,099,422,390,217,684,000 | 26.627586 | 130 | 0.656515 | false |
Aziiri-dev/kivychat | chatbox.py | 1 | 3631 | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from tabbox import TabBox
from utils import TextBoxLabel
# ============================================================================
class MessageTextInput(TextInput):
"""Adapted version of TextInput that handles SHIFT-ENTER and ENTER
for multi-line input and sending a message."""
def __init__(self, *args, **kwargs):
self.register_event_type('on_enter')
super(MessageTextInput, self).__init__(*args, **kwargs)
def keyboard_on_key_down(self, window, keycode, text, modifiers):
super(MessageTextInput, self).keyboard_on_key_down(window, keycode,
text, modifiers)
if keycode[1] == 'enter' and not modifiers:
self.dispatch('on_enter')
def on_enter(self, *args):
pass
class MessageBox(BoxLayout):
def __init__(self, userid, *args, **kwargs):
self.userid = userid
super(MessageBox, self).__init__(*args, **kwargs)
# create a grid for the scroll view to contain things
self.layout = GridLayout(cols=1, padding=(10, 15), spacing=8,
size_hint=(1, None))
self.layout.bind(minimum_height=self.layout.setter('height'))
self.ids.scroller.add_widget(self.layout)
self.ids.message_input.bind(on_enter=self.send_message)
def send_message(self, instance):
text = self.ids.message_input.text.rstrip('\r\n')
if text:
app = App.get_running_app()
app.root_box.chat_client.send_chat(self.userid, text)
self.add_message(text)
self.ids.message_input.text = ''
def add_message(self, text, msg_from=None):
if not msg_from:
msg_from = 'me'
from_color = 'ff0000'
else:
from_color = '0000ff'
text = '[color=%s]%s: [/color]%s' % (from_color, msg_from, text)
label = TextBoxLabel(text=text)
self.layout.add_widget(label)
self.ids.scroller.scroll_y = 0
class ChatBox(TabBox):
def __init__(self, *args, **kwargs):
super(ChatBox, self).__init__(*args, **kwargs)
self.chats = {}
def add_chat(self, friend, switch_to=True):
if friend.userid not in self.chats:
mbox = MessageBox(friend.userid)
tab = self.add_tab(friend.userid, friend.name)
tab.bind(on_activate=self.tab_activated)
self.chats[friend.userid] = {
'friend':friend,
'name':friend.name,
'message_box':mbox,
}
friend.bind(name=self.name_changed)
container = self.get_content_widget(friend.userid)
container.add_widget(mbox)
if switch_to:
self.switch_tab(friend.userid)
def remove_tab(self, userid):
super(ChatBox, self).remove_tab(userid)
del self.chats[userid]
if len(self.tabs) == 0:
app = App.get_running_app()
app.root_box.menu_remove_chat()
def tab_activated(self, instance, *args):
# clear the message counter for the friend that owns this tab
self.chats[instance.name]['friend'].message_count = 0
def name_changed(self, instance, *args):
tab = self.tabs[instance.userid]['tab']
tab.ids.tab_label.text = instance.name
def chat_received(self, friend, msg):
self.add_chat(friend, switch_to=False)
message_box = self.chats[friend.userid]['message_box']
message_box.add_message(msg, friend.base_name)
| mit | -6,658,644,810,960,035,000 | 33.254717 | 78 | 0.593776 | false |
ProgVal/PyCorewar | setup.py | 1 | 3375 | #!/usr/bin/env python
#
# Copyright (C) 2006 Jens Gutzeit <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Usage: python setup.py install
from distutils import core
from distutils.core import Extension
import sys
# General information about PyCorewar.
NAME = 'Corewar'
VERSION = '0.2.0'
DESCRIPTION = 'A fast MARS (Memory Array Redcode Simulator) for debugging '\
'and benchmarking'
AUTHOR_NAME = 'Jens Gutzeit'
AUTHOR_EMAIL = '[email protected]'
HOMEPAGE = 'http://corewars.jgutzeit.de/pycorewar/index.en.html'
DOWNLOAD = 'http://corewars.jgutzeit.de/pycorewar/download/'\
'PyCorwar-%s.tar.bz2' % VERSION
CLASSIFIERS = ['Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Intended Audience :: Developers',
'Operating System :: Unix',
'Topic :: Simulation',
'Programming Language :: C',
'Programming Language :: Python']
PLATFORMS = 'Python 2.6 and later.'
# FIXME: extra compiler arguments
# EXTRA_COMPILE_ARGS = ['-O6', '-funroll-all-loops']
EXTRA_COMPILE_ARGS = []
# Check Python version
if sys.version_info < (2, 6):
raise RuntimeError('PyCorewar requires at least Python 2.6 to build.')
core.setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR_NAME,
author_email=AUTHOR_EMAIL,
url=HOMEPAGE,
download_url=DOWNLOAD,
classifiers=CLASSIFIERS,
platforms=PLATFORMS,
packages=['Corewar', ],
package_dir={'Corewar': 'corewar'},
py_modules=['Corewar.Debugging', ],
ext_modules=[Extension('Corewar.Redcode',
include_dirs=['src/'],
sources=['src/Redcodemodule.c',],
extra_compile_args=EXTRA_COMPILE_ARGS),
Extension('Corewar._Debugging',
include_dirs=['src/'],
sources=['src/ProcessQueue.c',
'src/Core.c',
'src/_Debuggingmodule.c'],
extra_compile_args=EXTRA_COMPILE_ARGS),
Extension('Corewar.Benchmarking',
include_dirs=['src/'],
sources=['src/BenchWarrior.c',
'src/BenchPositioning.c',
'src/BenchMARS88.c',
'src/BenchMARS94nop.c',
'src/Benchmarkingmodule.c'],
extra_compile_args=EXTRA_COMPILE_ARGS)
],
)
| gpl-2.0 | 3,067,631,760,320,872 | 38.705882 | 77 | 0.589037 | false |
EmadMokhtar/Django | django/http/request.py | 11 | 21619 | import copy
import re
from io import BytesIO
from itertools import chain
from urllib.parse import quote, urlencode, urljoin, urlsplit
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost, ImproperlyConfigured, RequestDataTooBig,
)
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils.datastructures import (
CaseInsensitiveMapping, ImmutableList, MultiValueDict,
)
from django.utils.encoding import escape_uri_path, iri_to_uri
from django.utils.functional import cached_property
from django.utils.http import is_same_domain, limited_parse_qsl
RAISE_ERROR = object()
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:\d+)?$")
class UnreadablePostError(OSError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest:
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return '<%s>' % self.__class__.__name__
return '<%s: %s %r>' % (self.__class__.__name__, self.method, self.get_full_path())
@cached_property
def headers(self):
return HttpHeaders(self.META)
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = self.get_port()
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = ['localhost', '127.0.0.1', '[::1]']
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META:
port = self.META['HTTP_X_FORWARDED_PORT']
else:
port = self.META['SERVER_PORT']
return str(port)
def get_full_path(self, force_append_slash=False):
return self._get_full_path(self.path, force_append_slash)
def get_full_path_info(self, force_append_slash=False):
return self._get_full_path(self.path_info, force_append_slash)
def _get_full_path(self, path, force_append_slash):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s%s' % (
escape_uri_path(path),
'/' if force_append_slash and not path.endswith('/') else '',
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempt to return a signed cookie. If the signature fails or the
cookie has expired, raise an exception, unless the `default` argument
is provided, in which case return that value.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def get_raw_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return '{scheme}://{host}{path}'.format(
scheme=self.scheme,
host=self._get_raw_host(),
path=self.get_full_path(),
)
def build_absolute_uri(self, location=None):
"""
Build an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, build the absolute URI
using request.get_full_path(). If the location is absolute, convert it
to an RFC 3987 compliant URI and return it. If location is relative or
is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base
URL constructed from the request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
# Handle the simple, most common case. If the location is absolute
# and a scheme or host (netloc) isn't provided, skip an expensive
# urljoin() as long as no path segments are '.' or '..'.
if (bits.path.startswith('/') and not bits.scheme and not bits.netloc and
'/./' not in bits.path and '/../' not in bits.path):
# If location starts with '//' but has no netloc, reuse the
# schema and netloc from the current request. Strip the double
# slashes and continue as if it wasn't specified.
if location.startswith('//'):
location = location[2:]
location = self._current_scheme_host + location
else:
# Join the constructed URL with the provided location, which
# allows the provided location to apply query strings to the
# base path.
location = urljoin(self._current_scheme_host + self.path, location)
return iri_to_uri(location)
@cached_property
def _current_scheme_host(self):
return '{}://{}'.format(self.scheme, self.get_host())
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Return 'http' by
default.
"""
return 'http'
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header) == value:
return 'https'
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, 'GET'):
del self.GET
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Return a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
# Limit the maximum request data size that will be handled in-memory.
if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and
int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE):
raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.')
try:
self._body = self.read()
except OSError as e:
raise UnreadablePostError(*e.args) from e
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.content_type == 'multipart/form-data':
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
self._mark_post_parse_error()
raise
elif self.content_type == 'application/x-www-form-urlencoded':
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def __iter__(self):
return iter(self.readline, b'')
def readlines(self):
return list(self)
class HttpHeaders(CaseInsensitiveMapping):
HTTP_PREFIX = 'HTTP_'
# PEP 333 gives two headers which aren't prepended with HTTP_.
UNPREFIXED_HEADERS = {'CONTENT_TYPE', 'CONTENT_LENGTH'}
def __init__(self, environ):
headers = {}
for header, value in environ.items():
name = self.parse_header_name(header)
if name:
headers[name] = value
super().__init__(headers)
@classmethod
def parse_header_name(cls, header):
if header.startswith(cls.HTTP_PREFIX):
header = header[len(cls.HTTP_PREFIX):]
elif header not in cls.UNPREFIXED_HEADERS:
return None
return header.replace('_', '-').title()
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to str.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super().__init__()
self.encoding = encoding or settings.DEFAULT_CHARSET
query_string = query_string or ''
parse_qsl_kwargs = {
'keep_blank_values': True,
'fields_limit': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS,
'encoding': self.encoding,
}
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(self.encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in limited_parse_qsl(query_string, **parse_qsl_kwargs):
self.appendlist(key, value)
self._mutable = mutable
@classmethod
def fromkeys(cls, iterable, value='', mutable=False, encoding=None):
"""
Return a new QueryDict with keys (may be repeated) from an iterable and
values from value.
"""
q = cls('', mutable=True, encoding=encoding)
for key in iterable:
q.appendlist(key, value)
if not mutable:
q._mutable = False
return q
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super().__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in self.lists():
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in self.lists():
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super().setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super().setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super().pop(key, *args)
def popitem(self):
self._assert_mutable()
return super().popitem()
def clear(self):
self._assert_mutable()
super().clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super().setdefault(key, default)
def copy(self):
"""Return a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Return an encoded string of all query string arguments.
`safe` specifies characters which don't require quoting, for example::
>>> q = QueryDict(mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = safe.encode(self.encoding)
def encode(k, v):
return '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
def encode(k, v):
return urlencode({k: v})
for k, list_ in self.lists():
output.extend(
encode(k.encode(self.encoding), str(v).encode(self.encoding))
for v in list_
)
return '&'.join(output)
# It's neither necessary nor appropriate to use
# django.utils.encoding.force_str() for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Convert bytes objects to strings, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Return any non-bytes objects without change.
"""
if isinstance(s, bytes):
return str(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lowercased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
domain, port = bits if len(bits) == 2 else (bits[0], '')
# Remove a trailing dot (if present) from the domain.
domain = domain[:-1] if domain.endswith('.') else domain
return domain, port
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lowercased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
return any(pattern == '*' or is_same_domain(host, pattern) for pattern in allowed_hosts)
| mit | 8,745,942,704,095,478,000 | 35.456998 | 110 | 0.596882 | false |
boundary/ZenPacks.boundary.EventAdapter | setup.py | 1 | 2248 | NAME = "ZenPacks.boundary.EventAdapter"
VERSION = "1.0.0"
AUTHOR = "Boundary"
LICENSE = "Apache v2"
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.boundary']
PACKAGES = ['ZenPacks', 'ZenPacks.boundary', 'ZenPacks.boundary.EventAdapter']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = ">=4.2"
PREV_ZENPACK_NAME = ""
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name=NAME,
version=VERSION,
author=AUTHOR,
license=LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers=COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName=PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages=NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages=find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data=True,
# Tell setuptools what non-python files should also be included
# with the binary egg.
#package_data = {}
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires=INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points={
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe=False,
) | apache-2.0 | 9,127,007,901,335,010,000 | 35.274194 | 78 | 0.714858 | false |
jayme-github/headphones | headphones/__init__.py | 1 | 22431 | # This file is part of Headphones.
#
# Headphones is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
# NZBGet support added by CurlyMo <[email protected]> as a part of
# XBian - XBMC on the Raspberry Pi
import sys
import subprocess
import threading
import webbrowser
import sqlite3
import datetime
import os
import cherrypy
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
from headphones import versioncheck, logger
import headphones.config
# (append new extras to the end)
POSSIBLE_EXTRAS = [
"single",
"ep",
"compilation",
"soundtrack",
"live",
"remix",
"spokenword",
"audiobook",
"other",
"dj-mix",
"mixtape/street",
"broadcast",
"interview",
"demo"
]
PROG_DIR = None
FULL_PATH = None
ARGS = None
SIGNAL = None
SYS_PLATFORM = None
SYS_ENCODING = None
QUIET = False
VERBOSE = False
DAEMON = False
CREATEPID = False
PIDFILE = None
SCHED = BackgroundScheduler()
SCHED_LOCK = threading.Lock()
INIT_LOCK = threading.Lock()
_INITIALIZED = False
started = False
DATA_DIR = None
CONFIG = None
DB_FILE = None
LOG_LIST = []
INSTALL_TYPE = None
CURRENT_VERSION = None
LATEST_VERSION = None
COMMITS_BEHIND = None
LOSSY_MEDIA_FORMATS = ["mp3", "aac", "ogg", "ape", "m4a", "asf", "wma"]
LOSSLESS_MEDIA_FORMATS = ["flac"]
MEDIA_FORMATS = LOSSY_MEDIA_FORMATS + LOSSLESS_MEDIA_FORMATS
MIRRORLIST = ["musicbrainz.org", "headphones", "custom"]
UMASK = None
def initialize(config_file):
with INIT_LOCK:
global CONFIG
global _INITIALIZED
global CURRENT_VERSION
global LATEST_VERSION
global UMASK
CONFIG = headphones.config.Config(config_file)
assert CONFIG is not None
if _INITIALIZED:
return False
if CONFIG.HTTP_PORT < 21 or CONFIG.HTTP_PORT > 65535:
headphones.logger.warn(
'HTTP_PORT out of bounds: 21 < %s < 65535', CONFIG.HTTP_PORT)
CONFIG.HTTP_PORT = 8181
if CONFIG.HTTPS_CERT == '':
CONFIG.HTTPS_CERT = os.path.join(DATA_DIR, 'server.crt')
if CONFIG.HTTPS_KEY == '':
CONFIG.HTTPS_KEY = os.path.join(DATA_DIR, 'server.key')
if not CONFIG.LOG_DIR:
CONFIG.LOG_DIR = os.path.join(DATA_DIR, 'logs')
if not os.path.exists(CONFIG.LOG_DIR):
try:
os.makedirs(CONFIG.LOG_DIR)
except OSError:
CONFIG.LOG_DIR = None
if not QUIET:
sys.stderr.write("Unable to create the log directory. " \
"Logging to screen only.\n")
# Start the logger, disable console if needed
logger.initLogger(console=not QUIET, log_dir=CONFIG.LOG_DIR,
verbose=VERBOSE)
if not CONFIG.CACHE_DIR:
# Put the cache dir in the data dir for now
CONFIG.CACHE_DIR = os.path.join(DATA_DIR, 'cache')
if not os.path.exists(CONFIG.CACHE_DIR):
try:
os.makedirs(CONFIG.CACHE_DIR)
except OSError as e:
logger.error("Could not create cache dir '%s': %s", DATA_DIR, e)
# Sanity check for search interval. Set it to at least 6 hours
if CONFIG.SEARCH_INTERVAL and CONFIG.SEARCH_INTERVAL < 360:
logger.info("Search interval too low. Resetting to 6 hour minimum.")
CONFIG.SEARCH_INTERVAL = 360
# Initialize the database
logger.info('Checking to see if the database has all tables....')
try:
dbcheck()
except Exception as e:
logger.error("Can't connect to the database: %s", e)
# Get the currently installed version. Returns None, 'win32' or the git
# hash.
CURRENT_VERSION, CONFIG.GIT_BRANCH = versioncheck.getVersion()
# Write current version to a file, so we know which version did work.
# This allowes one to restore to that version. The idea is that if we
# arrive here, most parts of Headphones seem to work.
if CURRENT_VERSION:
version_lock_file = os.path.join(DATA_DIR, "version.lock")
try:
with open(version_lock_file, "w") as fp:
fp.write(CURRENT_VERSION)
except IOError as e:
logger.error("Unable to write current version to file '%s': %s",
version_lock_file, e)
# Check for new versions
if CONFIG.CHECK_GITHUB and CONFIG.CHECK_GITHUB_ON_STARTUP:
try:
LATEST_VERSION = versioncheck.checkGithub()
except:
logger.exception("Unhandled exception")
LATEST_VERSION = CURRENT_VERSION
else:
LATEST_VERSION = CURRENT_VERSION
# Store the original umask
UMASK = os.umask(0)
os.umask(UMASK)
_INITIALIZED = True
return True
def daemonize():
if threading.activeCount() != 1:
logger.warn(
'There are %r active threads. Daemonizing may cause'
' strange behavior.',
threading.enumerate())
sys.stdout.flush()
sys.stderr.flush()
# Do first fork
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("1st fork failed: %s [%d]", e.strerror, e.errno)
os.setsid()
# Make sure I can read my own files and shut out others
prev = os.umask(0) # @UndefinedVariable - only available in UNIX
os.umask(prev and int('077', 8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("2nd fork failed: %s [%d]", e.strerror, e.errno)
dev_null = file('/dev/null', 'r')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
si = open('/dev/null', "r")
so = open('/dev/null', "a+")
se = open('/dev/null', "a+")
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
pid = os.getpid()
logger.info('Daemonized to PID: %d', pid)
if CREATEPID:
logger.info("Writing PID %d to %s", pid, PIDFILE)
with file(PIDFILE, 'w') as fp:
fp.write("%s\n" % pid)
def launch_browser(host, port, root):
if host == '0.0.0.0':
host = 'localhost'
if CONFIG.ENABLE_HTTPS:
protocol = 'https'
else:
protocol = 'http'
try:
webbrowser.open('%s://%s:%i%s' % (protocol, host, port, root))
except Exception as e:
logger.error('Could not launch browser: %s', e)
def initialize_scheduler():
"""
Start the scheduled background tasks. Re-schedule if interval settings changed.
"""
from headphones import updater, searcher, librarysync, postprocessor, \
torrentfinished
with SCHED_LOCK:
# Check if scheduler should be started
start_jobs = not len(SCHED.get_jobs())
# Regular jobs
minutes = CONFIG.SEARCH_INTERVAL
schedule_job(searcher.searchforalbum, 'Search for Wanted', hours=0, minutes=minutes)
minutes = CONFIG.DOWNLOAD_SCAN_INTERVAL
schedule_job(postprocessor.checkFolder, 'Download Scan', hours=0, minutes=minutes)
hours = CONFIG.LIBRARYSCAN_INTERVAL
schedule_job(librarysync.libraryScan, 'Library Scan', hours=hours, minutes=0)
hours = CONFIG.UPDATE_DB_INTERVAL
schedule_job(updater.dbUpdate, 'MusicBrainz Update', hours=hours, minutes=0)
# Update check
if CONFIG.CHECK_GITHUB:
if CONFIG.CHECK_GITHUB_INTERVAL:
minutes = CONFIG.CHECK_GITHUB_INTERVAL
else:
minutes = 0
schedule_job(versioncheck.checkGithub, 'Check GitHub for updates', hours=0,
minutes=minutes)
# Remove Torrent + data if Post Processed and finished Seeding
minutes = CONFIG.TORRENT_REMOVAL_INTERVAL
schedule_job(torrentfinished.checkTorrentFinished, 'Torrent removal check', hours=0,
minutes=minutes)
# Start scheduler
if start_jobs and len(SCHED.get_jobs()):
try:
SCHED.start()
except Exception as e:
logger.info(e)
# Debug
# SCHED.print_jobs()
def schedule_job(function, name, hours=0, minutes=0):
"""
Start scheduled job if starting or restarting headphones.
Reschedule job if Interval Settings have changed.
Remove job if if Interval Settings changed to 0
"""
job = SCHED.get_job(name)
if job:
if hours == 0 and minutes == 0:
SCHED.remove_job(name)
logger.info("Removed background task: %s", name)
elif job.trigger.interval != datetime.timedelta(hours=hours, minutes=minutes):
SCHED.reschedule_job(name, trigger=IntervalTrigger(
hours=hours, minutes=minutes))
logger.info("Re-scheduled background task: %s", name)
elif hours > 0 or minutes > 0:
SCHED.add_job(function, id=name, trigger=IntervalTrigger(
hours=hours, minutes=minutes))
logger.info("Scheduled background task: %s", name)
def start():
global started
if _INITIALIZED:
initialize_scheduler()
started = True
def sig_handler(signum=None, frame=None):
if signum is not None:
logger.info("Signal %i caught, saving and exiting...", signum)
shutdown()
def dbcheck():
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS artists (ArtistID TEXT UNIQUE, ArtistName TEXT, ArtistSortName TEXT, DateAdded TEXT, Status TEXT, IncludeExtras INTEGER, LatestAlbum TEXT, ReleaseDate TEXT, AlbumID TEXT, HaveTracks INTEGER, TotalTracks INTEGER, LastUpdated TEXT, ArtworkURL TEXT, ThumbURL TEXT, Extras TEXT, Type TEXT, MetaCritic TEXT)')
# ReleaseFormat here means CD,Digital,Vinyl, etc. If using the default
# Headphones hybrid release, ReleaseID will equal AlbumID (AlbumID is
# releasegroup id)
c.execute(
'CREATE TABLE IF NOT EXISTS albums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, DateAdded TEXT, AlbumID TEXT UNIQUE, Status TEXT, Type TEXT, ArtworkURL TEXT, ThumbURL TEXT, ReleaseID TEXT, ReleaseCountry TEXT, ReleaseFormat TEXT, SearchTerm TEXT, CriticScore TEXT, UserScore TEXT)')
# Format here means mp3, flac, etc.
c.execute(
'CREATE TABLE IF NOT EXISTS tracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT, TrackNumber INTEGER, Location TEXT, BitRate INTEGER, CleanName TEXT, Format TEXT, ReleaseID TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS allalbums (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, ReleaseDate TEXT, AlbumID TEXT, Type TEXT, ReleaseID TEXT, ReleaseCountry TEXT, ReleaseFormat TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS alltracks (ArtistID TEXT, ArtistName TEXT, AlbumTitle TEXT, AlbumASIN TEXT, AlbumID TEXT, TrackTitle TEXT, TrackDuration, TrackID TEXT, TrackNumber INTEGER, Location TEXT, BitRate INTEGER, CleanName TEXT, Format TEXT, ReleaseID TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS snatched (AlbumID TEXT, Title TEXT, Size INTEGER, URL TEXT, DateAdded TEXT, Status TEXT, FolderName TEXT, Kind TEXT)')
# Matched is a temporary value used to see if there was a match found in
# alltracks
c.execute(
'CREATE TABLE IF NOT EXISTS have (ArtistName TEXT, AlbumTitle TEXT, TrackNumber TEXT, TrackTitle TEXT, TrackLength TEXT, BitRate TEXT, Genre TEXT, Date TEXT, TrackID TEXT, Location TEXT, CleanName TEXT, Format TEXT, Matched TEXT)')
c.execute(
'CREATE TABLE IF NOT EXISTS lastfmcloud (ArtistName TEXT, ArtistID TEXT, Count INTEGER)')
c.execute(
'CREATE TABLE IF NOT EXISTS descriptions (ArtistID TEXT, ReleaseGroupID TEXT, ReleaseID TEXT, Summary TEXT, Content TEXT, LastUpdated TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS blacklist (ArtistID TEXT UNIQUE)')
c.execute('CREATE TABLE IF NOT EXISTS newartists (ArtistName TEXT UNIQUE)')
c.execute(
'CREATE TABLE IF NOT EXISTS releases (ReleaseID TEXT, ReleaseGroupID TEXT, UNIQUE(ReleaseID, ReleaseGroupID))')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_albumid ON tracks(AlbumID ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS album_artistid_reldate ON albums(ArtistID ASC, ReleaseDate DESC)')
# Below creates indices to speed up Active Artist updating
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_relid ON alltracks(ReleaseID ASC, TrackID ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS allalbums_relid ON allalbums(ReleaseID ASC)')
c.execute('CREATE INDEX IF NOT EXISTS have_location ON have(Location ASC)')
# Below creates indices to speed up library scanning & matching
c.execute(
'CREATE INDEX IF NOT EXISTS have_Metadata ON have(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS have_CleanName ON have(CleanName ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_Metadata ON tracks(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_CleanName ON tracks(CleanName ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_Metadata ON alltracks(ArtistName ASC, AlbumTitle ASC, TrackTitle ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_CleanName ON alltracks(CleanName ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS tracks_Location ON tracks(Location ASC)')
c.execute(
'CREATE INDEX IF NOT EXISTS alltracks_Location ON alltracks(Location ASC)')
try:
c.execute('SELECT IncludeExtras from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN IncludeExtras INTEGER DEFAULT 0')
try:
c.execute('SELECT LatestAlbum from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN LatestAlbum TEXT')
try:
c.execute('SELECT ReleaseDate from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN ReleaseDate TEXT')
try:
c.execute('SELECT AlbumID from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN AlbumID TEXT')
try:
c.execute('SELECT HaveTracks from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN HaveTracks INTEGER DEFAULT 0')
try:
c.execute('SELECT TotalTracks from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN TotalTracks INTEGER DEFAULT 0')
try:
c.execute('SELECT Type from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN Type TEXT DEFAULT "Album"')
try:
c.execute('SELECT TrackNumber from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN TrackNumber INTEGER')
try:
c.execute('SELECT FolderName from snatched')
except sqlite3.OperationalError:
c.execute('ALTER TABLE snatched ADD COLUMN FolderName TEXT')
try:
c.execute('SELECT Location from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN Location TEXT')
try:
c.execute('SELECT Location from have')
except sqlite3.OperationalError:
c.execute('ALTER TABLE have ADD COLUMN Location TEXT')
try:
c.execute('SELECT BitRate from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN BitRate INTEGER')
try:
c.execute('SELECT CleanName from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN CleanName TEXT')
try:
c.execute('SELECT CleanName from have')
except sqlite3.OperationalError:
c.execute('ALTER TABLE have ADD COLUMN CleanName TEXT')
# Add the Format column
try:
c.execute('SELECT Format from have')
except sqlite3.OperationalError:
c.execute('ALTER TABLE have ADD COLUMN Format TEXT DEFAULT NULL')
try:
c.execute('SELECT Format from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN Format TEXT DEFAULT NULL')
try:
c.execute('SELECT LastUpdated from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN LastUpdated TEXT DEFAULT NULL')
try:
c.execute('SELECT ArtworkURL from artists')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE artists ADD COLUMN ArtworkURL TEXT DEFAULT NULL')
try:
c.execute('SELECT ArtworkURL from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN ArtworkURL TEXT DEFAULT NULL')
try:
c.execute('SELECT ThumbURL from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN ThumbURL TEXT DEFAULT NULL')
try:
c.execute('SELECT ThumbURL from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN ThumbURL TEXT DEFAULT NULL')
try:
c.execute('SELECT ArtistID from descriptions')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE descriptions ADD COLUMN ArtistID TEXT DEFAULT NULL')
try:
c.execute('SELECT LastUpdated from descriptions')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE descriptions ADD COLUMN LastUpdated TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseID from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN ReleaseID TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseFormat from albums')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE albums ADD COLUMN ReleaseFormat TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseCountry from albums')
except sqlite3.OperationalError:
c.execute(
'ALTER TABLE albums ADD COLUMN ReleaseCountry TEXT DEFAULT NULL')
try:
c.execute('SELECT ReleaseID from tracks')
except sqlite3.OperationalError:
c.execute('ALTER TABLE tracks ADD COLUMN ReleaseID TEXT DEFAULT NULL')
try:
c.execute('SELECT Matched from have')
except sqlite3.OperationalError:
c.execute('ALTER TABLE have ADD COLUMN Matched TEXT DEFAULT NULL')
try:
c.execute('SELECT Extras from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN Extras TEXT DEFAULT NULL')
# Need to update some stuff when people are upgrading and have 'include
# extras' set globally/for an artist
if CONFIG.INCLUDE_EXTRAS:
CONFIG.EXTRAS = "1,2,3,4,5,6,7,8"
logger.info("Copying over current artist IncludeExtras information")
artists = c.execute(
'SELECT ArtistID, IncludeExtras from artists').fetchall()
for artist in artists:
if artist[1]:
c.execute(
'UPDATE artists SET Extras=? WHERE ArtistID=?', ("1,2,3,4,5,6,7,8", artist[0]))
try:
c.execute('SELECT Kind from snatched')
except sqlite3.OperationalError:
c.execute('ALTER TABLE snatched ADD COLUMN Kind TEXT DEFAULT NULL')
try:
c.execute('SELECT SearchTerm from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN SearchTerm TEXT DEFAULT NULL')
try:
c.execute('SELECT CriticScore from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN CriticScore TEXT DEFAULT NULL')
try:
c.execute('SELECT UserScore from albums')
except sqlite3.OperationalError:
c.execute('ALTER TABLE albums ADD COLUMN UserScore TEXT DEFAULT NULL')
try:
c.execute('SELECT Type from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN Type TEXT DEFAULT NULL')
try:
c.execute('SELECT MetaCritic from artists')
except sqlite3.OperationalError:
c.execute('ALTER TABLE artists ADD COLUMN MetaCritic TEXT DEFAULT NULL')
conn.commit()
c.close()
def shutdown(restart=False, update=False):
cherrypy.engine.exit()
SCHED.shutdown(wait=False)
CONFIG.write()
if not restart and not update:
logger.info('Headphones is shutting down...')
if update:
logger.info('Headphones is updating...')
try:
versioncheck.update()
except Exception as e:
logger.warn('Headphones failed to update: %s. Restarting.', e)
if CREATEPID:
logger.info('Removing pidfile %s', PIDFILE)
os.remove(PIDFILE)
if restart:
logger.info('Headphones is restarting...')
popen_list = [sys.executable, FULL_PATH]
popen_list += ARGS
if '--nolaunch' not in popen_list:
popen_list += ['--nolaunch']
logger.info('Restarting Headphones with %s', popen_list)
subprocess.Popen(popen_list, cwd=os.getcwd())
os._exit(0)
| gpl-3.0 | -4,735,171,405,905,738,000 | 34.158307 | 340 | 0.652044 | false |
mardiros/apium | test.py | 1 | 1324 | import sys
import traceback
import asyncio
from apium.registry import get_driver
from apium.config import Configurator
from apium.proxy import apium
@asyncio.coroutine
def routine(future, config):
try:
Configurator.from_yaml(config)
yield from get_driver().connect_broker()
get_driver().attach_signals()
result = yield from apium.apium.task.sample.add(1, 2)
print("1 + 2 =", result)
result = yield from apium.dotted.multiply(2, 16)
print("2 * 16 = ", result)
result = yield from apium.apium.task.sample.divide(8, 2)
print("8 / 2 = ", result)
result = yield from apium.noop(1, task_options={'timeout': 2})
print ("wait for", result, "seconds")
result = yield from apium.aionoop(2, task_options={'timeout': 1})
print (result)
except Exception as exc:
traceback.print_exc()
finally:
try:
yield from get_driver().disconnect_broker()
except Exception as exc:
traceback.print_exc()
future.set_result(None)
def main(argv=sys.argv):
future = asyncio.Future()
loop = asyncio.get_event_loop()
loop.call_soon(asyncio.Task(routine(future, argv[1])))
loop.run_until_complete(future)
loop.stop()
if __name__ == '__main__':
main()
| bsd-3-clause | -5,135,659,628,212,368,000 | 25.48 | 73 | 0.619335 | false |
cogitare-ai/cogitare | tests/test_data/test_dataholder.py | 1 | 6616 | from tests.common import TestCase
import mock
from functools import reduce
import numpy as np
import pytest
import torch
from cogitare.data import AbsDataHolder, TensorHolder, NumpyHolder, CallableHolder, AutoHolder
_data = torch.rand((100, 32))
def get_data(idx):
return _data[idx]
class _DataHolderAbs(object):
def test_create(self):
self.holder(self.data, **self.kwargs)
with pytest.raises(ValueError) as info:
self.holder(self.data, mode='asd')
self.assertIn('"mode" must be one of:', str(info.value))
def test_repr(self):
dh = self.holder(self.data, **self.kwargs)
out = repr(dh)
self.assertIn('100x1 samples', out)
dh = self.holder(self.data, batch_size=5, **self.kwargs)
out = repr(dh)
self.assertIn('20x5 samples', out)
def test_getitem(self):
dh = self.holder(self.data, batch_size=5, **self.kwargs)
for i in range(10):
self.assertEqual(dh[i], self._data[i])
def test_on_sample_loaded(self):
def f(x):
return x * 2
dh = self.holder(self.data, on_sample_loaded=f, **self.kwargs)
for i in range(10):
self.assertEqual(dh[i], self._data[i] * 2)
def test_len(self):
dh = self.holder(self.data, batch_size=9, **self.kwargs)
self.assertEqual(12, len(dh))
dh = self.holder(self.data, batch_size=9, drop_last=True, **self.kwargs)
self.assertEqual(11, len(dh))
def test_split(self):
dh = self.holder(self.data, batch_size=9, **self.kwargs)
self.assertEqual(12, len(dh))
dh1, dh2 = dh.split(0.6)
self.assertEqual(dh1.total_samples, 60)
self.assertEqual(dh2.total_samples, 40)
self.assertEqual(len(dh1), 7)
self.assertEqual(len(dh2), 5)
self.assertEqual(len(np.intersect1d(dh1.indices, dh2.indices)), 0)
def test_split_chunks(self):
dh = self.holder(self.data, batch_size=10, **self.kwargs)
self.assertEqual(10, len(dh))
holders = dh.split_chunks(10)
for holder in holders:
self.assertEqual(holder.total_samples, 10)
self.assertEqual(len(holder), 1)
indices = [holders[i].indices for i in range(10)]
self.assertEqual(len(reduce(np.intersect1d, indices)), 0)
def test_shuffle(self):
dh = self.holder(self.data, batch_size=10, **self.kwargs)
dh.shuffle = mock.MagicMock(return_value=None)
next(dh)
assert dh.shuffle.called
dh = self.holder(self.data, batch_size=10, shuffle=False, **self.kwargs)
dh.shuffle = mock.MagicMock(return_value=None)
next(dh)
assert not dh.shuffle.called
def test_batch(self):
dh = self.holder(self.data, batch_size=10, **self.kwargs)
self.assertEqual(dh._current_batch, 0)
next(dh)
self.assertEqual(dh._current_batch, 1)
next(dh)
self.assertEqual(dh._current_batch, 2)
dh.reset()
self.assertEqual(dh._current_batch, 0)
next(dh)
self.assertEqual(dh._current_batch, 1)
dh.batch_size = 3
next(dh)
self.assertEqual(dh._current_batch, 1)
self.assertEqual(dh.batch_size, dh._batch_size)
def test_set_total_samples(self):
dh = self.holder(self.data, batch_size=10, **self.kwargs)
dh.total_samples = 90
self.assertEqual(len(dh), 9)
dh.total_samples = 100
self.assertEqual(len(dh), 10)
if self.holder != CallableHolder:
with pytest.raises(ValueError) as info:
dh.total_samples = 110
self.assertIn('The value must be lesser or equal to the', str(info.value))
with pytest.raises(ValueError) as info:
dh.total_samples = 0
self.assertIn('number of samples must be greater or equal to 1', str(info.value))
def test_on_batch_loaded(self):
def f(batch):
return batch[2]
dh = self.holder(self.data, shuffle=False, batch_size=10, on_batch_loaded=f, **self.kwargs)
batch = next(dh)
self.assertEqual(batch, self._data[2])
def test_iter_batches(self):
for mode in ['threaded', 'multiprocessing', 'sequential']:
dh = self.holder(self.data, batch_size=10, mode=mode, **self.kwargs)
for i in range(5):
for batch in dh:
self.assertEqual(len(batch), 10)
dh = self.holder(self.data, batch_size=8, drop_last=True, **self.kwargs)
self.assertEqual(len(dh), 12)
for batch in dh:
self.assertEqual(len(batch), 8)
def test_iter_single(self):
dh = self.holder(self.data, batch_size=1, **self.kwargs)
for batch in dh:
self.assertIsInstance(batch, list)
self.assertEqual(len(batch), 1)
dh = self.holder(self.data, batch_size=1, single=True, **self.kwargs)
for batch in dh:
assert torch.is_tensor(batch)
class TestAbsInterface(TestCase):
def test_get_sample(self):
class A(AbsDataHolder):
def get_sample(self, key):
return super(A, self).get_sample(key)
a = A(None)
a.get_sample(0)
class TestTensorHolder(TestCase, _DataHolderAbs):
data = torch.rand((100, 32))
_data = data
holder = TensorHolder
kwargs = {}
class TestNumpyHolder(TestCase, _DataHolderAbs):
@property
def holder(self):
return NumpyHolder
data = np.random.rand(100, 32)
_data = torch.from_numpy(data)
kwargs = {}
class TestCallableHolder(TestCase, _DataHolderAbs):
@property
def holder(self):
return CallableHolder
kwargs = {'total_samples': 100}
_data = _data
@property
def data(self):
return get_data
def test_not_total_samples(self):
dh = CallableHolder(self.data)
with pytest.raises(ValueError) as info:
dh.total_samples
self.assertIn('"total_samples" not defined', str(info.value))
class TestAutoHolder(TestCase):
def test_tensor(self):
dh = AutoHolder(_data)
self.assertIsInstance(dh, TensorHolder)
def test_numpy(self):
dh = AutoHolder(np.random.rand(3, 3))
self.assertIsInstance(dh, TensorHolder)
def test_callable(self):
dh = AutoHolder(get_data)
self.assertIsInstance(dh, CallableHolder)
def test_unknown(self):
with pytest.raises(ValueError) as info:
AutoHolder('asd')
self.assertIn('Unable to infer data type', str(info.value))
| mit | -8,549,881,287,331,141,000 | 28.274336 | 99 | 0.606106 | false |
teythoon/Insekta | insekta/network/models.py | 1 | 2172 | import random
from django.db import models, transaction, IntegrityError
from django.conf import settings
from insekta.common.dblock import dblock
from insekta.network.utils import iterate_nets, int_to_ip
LOCK_NETWORK_ADDRESS = 947295
class NetworkError(Exception):
pass
class AddressManager(models.Manager):
def get_free(self):
"""Get a free address and mark it as in use."""
try:
with dblock(LOCK_NETWORK_ADDRESS):
address = self.get_query_set().filter(in_use=False)[0]
address.take_address()
return address
except IndexError:
raise NetworkError('No more free addresses.')
@transaction.commit_manually
def fill_pool(self):
"""Insert all addresses into the pool."""
def random_16():
return ''.join(random.choice('0123456789abcdef') for _i in (0, 1))
new_addresses = []
oui = getattr(settings, 'VM_MAC_OUI', '52:54:00')
if not hasattr(settings, 'VM_IP_BLOCK'):
raise NetworkError('Please set VM_IP_BLOCK in settings.py')
if not hasattr(settings, 'VM_NET_SIZE'):
raise NetworkError('Please set VM_NET_SIZE in settings.py')
vm_nets = iterate_nets(settings.VM_IP_BLOCK, settings.VM_NET_SIZE)
for net_ip_int in vm_nets:
mac = ':'.join((oui, random_16(), random_16(), random_16()))
ip = int_to_ip(net_ip_int + 2)
try:
new_addresses.append(Address.objects.create(mac=mac, ip=ip))
except IntegrityError:
transaction.rollback()
else:
transaction.commit()
return new_addresses
class Address(models.Model):
ip = models.IPAddressField(unique=True)
mac = models.CharField(max_length=17, unique=True)
in_use = models.BooleanField(default=False)
objects = AddressManager()
def __unicode__(self):
return '{0} with IP {1}'.format(self.mac, self.ip)
def take_address(self):
self.in_use = True
self.save()
def return_address(self):
self.in_use = False
self.save()
| mit | 4,215,938,950,256,456,700 | 30.478261 | 78 | 0.601289 | false |
nijel/weblate | weblate/checks/tests/test_format_checks.py | 1 | 41257 | #
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Tests for quality checks."""
from django.test import SimpleTestCase
from weblate.checks.format import (
CFormatCheck,
CSharpFormatCheck,
ESTemplateLiteralsCheck,
I18NextInterpolationCheck,
JavaFormatCheck,
JavaMessageFormatCheck,
LuaFormatCheck,
MultipleUnnamedFormatsCheck,
PercentPlaceholdersCheck,
PerlFormatCheck,
PHPFormatCheck,
PythonBraceFormatCheck,
PythonFormatCheck,
SchemeFormatCheck,
VueFormattingCheck,
)
from weblate.checks.models import Check
from weblate.checks.qt import QtFormatCheck, QtPluralCheck
from weblate.checks.ruby import RubyFormatCheck
from weblate.checks.tests.test_checks import CheckTestCase, MockUnit
from weblate.lang.models import Language
from weblate.trans.models import Component, Translation, Unit
from weblate.trans.tests.test_views import FixtureTestCase
from weblate.trans.util import join_plural
class PythonFormatCheckTest(CheckTestCase):
check = PythonFormatCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"python-format",
"%sstring%d",
[(0, 2, "%s"), (8, 10, "%d")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_format(self):
self.assertFalse(self.check.check_format("%s string", "%s string", False, None))
def test_space_format(self):
self.assertTrue(
self.check.check_format("%d % string", "%d % other", False, None)
)
def test_percent_format(self):
self.assertFalse(
self.check.check_format("%d%% string", "%d%% string", False, None)
)
self.assertTrue(
self.check.check_format("12%% string", "12% string", False, None)
)
self.assertTrue(self.check.check_format("Save 12%%.", "Save 12%.", False, None))
self.assertFalse(
self.check.check_format("Save 12%%.", "Save 12 percent.", False, None)
)
def test_named_format(self):
self.assertFalse(
self.check.check_format("%(name)s string", "%(name)s string", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("%s string", "string", False, None))
def test_missing_named_format(self):
self.assertTrue(
self.check.check_format("%(name)s string", "string", False, None)
)
def test_missing_named_format_ignore(self):
self.assertFalse(
self.check.check_format("%(name)s string", "string", True, None)
)
def test_wrong_format(self):
self.assertTrue(self.check.check_format("%s string", "%c string", False, None))
def test_reordered_format(self):
self.assertTrue(
self.check.check_format("%s %d string", "%d %s string", False, None)
)
def test_wrong_named_format(self):
self.assertTrue(
self.check.check_format("%(name)s string", "%(jmeno)s string", False, None)
)
def test_reordered_named_format(self):
self.assertFalse(
self.check.check_format(
"%(name)s %(foo)s string", "%(foo)s %(name)s string", False, None
)
)
def test_reordered_named_format_long(self):
self.assertFalse(
self.check.check_format(
"%(count)d strings into %(languages)d languages %(percent)d%%",
"%(languages)d dil içinde %(count)d satır %%%(percent)d",
False,
None,
)
)
def test_feedback(self):
self.assertEqual(
self.check.check_format("%(count)d", "%(languages)d", False, None),
{"missing": ["(count)d"], "extra": ["(languages)d"]},
)
self.assertEqual(
self.check.check_format("%(count)d", "count", False, None),
{"missing": ["(count)d"], "extra": []},
)
self.assertEqual(
self.check.check_format(
"%(count)d", "%(count)d %(languages)d", False, None
),
{"missing": [], "extra": ["(languages)d"]},
)
self.assertEqual(
self.check.check_format("%d", "%s", False, None),
{"missing": ["d"], "extra": ["s"]},
)
self.assertEqual(
self.check.check_format("%d", "ds", False, None),
{"missing": ["d"], "extra": []},
)
self.assertEqual(
self.check.check_format("%d", "%d %s", False, None),
{"missing": [], "extra": ["s"]},
)
self.assertEqual(
self.check.check_format("%d %d", "%d", False, None),
{"missing": ["d"], "extra": []},
)
def test_description(self):
unit = Unit(
source="%(count)d",
target="%(languages)d",
extra_flags="python-format",
)
check = Check(unit=unit)
self.assertEqual(
self.check.get_description(check),
"Following format strings are missing: %(count)d<br />"
"Following format strings are extra: %(languages)d",
)
class PHPFormatCheckTest(CheckTestCase):
check = PHPFormatCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"php-format",
"%sstring%d",
[(0, 2, "%s"), (8, 10, "%d")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_format(self):
self.assertFalse(self.check.check_format("%s string", "%s string", False, None))
def test_named_format(self):
self.assertFalse(
self.check.check_format("%1$s string", "%1$s string", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("%s string", "string", False, None))
def test_missing_named_format(self):
self.assertTrue(self.check.check_format("%1$s string", "string", False, None))
def test_missing_named_format_ignore(self):
self.assertFalse(self.check.check_format("%1$s string", "string", True, None))
def test_wrong_format(self):
self.assertTrue(self.check.check_format("%s string", "%c string", False, None))
def test_double_format(self):
self.assertTrue(
self.check.check_format("%s string", "%s%s string", False, None)
)
def test_reorder_format(self):
self.assertFalse(
self.check.check_format("%1$s %2$s string", "%2$s %1$s string", False, None)
)
def test_wrong_named_format(self):
self.assertTrue(
self.check.check_format("%1$s string", "%s string", False, None)
)
def test_wrong_percent_format(self):
self.assertTrue(
self.check.check_format("%s%% (0.1%%)", "%s%% (0.1%x)", False, None)
)
def test_missing_percent_format(self):
self.assertFalse(
self.check.check_format("%s%% %%", "%s%% percent", False, None)
)
def test_space_format(self):
self.assertTrue(
self.check.check_format("%d % string", "%d % other", False, None)
)
class SchemeFormatCheckTest(CheckTestCase):
check = SchemeFormatCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"scheme-format",
"~sstring~d",
[(0, 2, "~s"), (8, 10, "~d")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_format(self):
self.assertFalse(self.check.check_format("~s string", "~s string", False, None))
def test_named_format(self):
self.assertFalse(
self.check.check_format("~0@*~s string", "~0@*~s string", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("~s string", "string", False, None))
def test_missing_named_format(self):
self.assertTrue(self.check.check_format("~1@*~s string", "string", False, None))
def test_missing_named_format_ignore(self):
self.assertFalse(self.check.check_format("~1@*~s string", "string", True, None))
def test_wrong_format(self):
self.assertTrue(self.check.check_format("~s string", "~c string", False, None))
def test_double_format(self):
self.assertTrue(
self.check.check_format("~s string", "~s~s string", False, None)
)
def test_reorder_format(self):
self.assertFalse(
self.check.check_format(
"~1@*~s ~2@*~s string", "~2@*~s ~1@*~s string", False, None
)
)
def test_wrong_named_format(self):
self.assertTrue(
self.check.check_format("~1@*~s string", "~s string", False, None)
)
def test_wrong_tilde_format(self):
self.assertTrue(
self.check.check_format("~s~~ (0.1~~)", "~s~~ (0.1~x)", False, None)
)
def test_missing_tilde_format(self):
self.assertFalse(self.check.check_format("~s~~ ~~", "~s~~ tilde", False, None))
class CFormatCheckTest(CheckTestCase):
check = CFormatCheck()
flag = "c-format"
def setUp(self):
super().setUp()
self.test_highlight = (self.flag, "%sstring%d", [(0, 2, "%s"), (8, 10, "%d")])
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_format(self):
self.assertFalse(self.check.check_format("%s string", "%s string", False, None))
def test_named_format(self):
self.assertFalse(
self.check.check_format("%10s string", "%10s string", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("%s string", "string", False, None))
def test_missing_named_format(self):
self.assertTrue(self.check.check_format("%10s string", "string", False, None))
def test_missing_named_format_ignore(self):
self.assertFalse(self.check.check_format("%10s string", "string", True, None))
def test_wrong_format(self):
self.assertTrue(self.check.check_format("%s string", "%c string", False, None))
def test_wrong_named_format(self):
self.assertTrue(
self.check.check_format("%10s string", "%20s string", False, None)
)
def test_reorder_format(self):
self.assertFalse(
self.check.check_format("%1$s %2$s string", "%2$s %1$s string", False, None)
)
def test_locale_delimiter(self):
self.assertFalse(
self.check.check_format("lines: %6.3f", "radky: %'6.3f", False, None)
)
def test_ld_format(self):
self.assertFalse(
self.check.check_format(
"%ld bytes (free %ld bytes, used %ld bytes)",
"%l octets (%l octets libres, %l octets utilisés)",
True,
None,
)
)
def test_parenthesis(self):
self.assertFalse(self.check.check_format("(%.0lf%%)", "(%%%.0lf)", False, None))
class LuaFormatCheckTest(CFormatCheckTest):
check = LuaFormatCheck()
flag = "lua-format"
class PerlFormatCheckTest(CFormatCheckTest):
check = PerlFormatCheck()
flag = "perl-format"
class PythonBraceFormatCheckTest(CheckTestCase):
check = PythonBraceFormatCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"python-brace-format",
"{0}string{1}",
[(0, 3, "{0}"), (9, 12, "{1}")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_position_format(self):
self.assertFalse(
self.check.check_format("{} string {}", "{} string {}", False, None)
)
def test_wrong_position_format(self):
self.assertTrue(
self.check.check_format("{} string", "{} string {}", False, None)
)
def test_named_format(self):
self.assertFalse(
self.check.check_format("{s1} string {s2}", "{s1} string {s2}", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("{} string", "string", False, None))
def test_missing_named_format(self):
self.assertTrue(self.check.check_format("{s1} string", "string", False, None))
def test_missing_named_format_ignore(self):
self.assertFalse(self.check.check_format("{s} string", "string", True, None))
def test_wrong_format(self):
self.assertTrue(
self.check.check_format("{s} string", "{c} string", False, None)
)
def test_escaping(self):
self.assertFalse(self.check.check_format("{{ string }}", "string", False, None))
def test_attribute_format(self):
self.assertFalse(
self.check.check_format("{s.foo} string", "{s.foo} string", False, None)
)
def test_wrong_attribute_format(self):
self.assertTrue(
self.check.check_format("{s.foo} string", "{s.bar} string", False, None)
)
class CSharpFormatCheckTest(CheckTestCase):
check = CSharpFormatCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"c-sharp-format",
"{0}string{1}",
[(0, 3, "{0}"), (9, 12, "{1}")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_escaping_no_position(self):
self.assertFalse(self.check.check_format("{{ string }}", "string", False, None))
def test_simple_format(self):
self.assertFalse(
self.check.check_format("{0} strins", "{0} string", False, None)
)
def test_format_with_width(self):
self.assertFalse(
self.check.check_format("{0,1} strins", "{0,1} string", False, None)
)
def test_format_with_flag(self):
self.assertFalse(
self.check.check_format("{0:C2} strins", "{0:C2} string", False, None)
)
def test_full_format(self):
self.assertFalse(
self.check.check_format("{0,1:N0} strins", "{0,1:N0} string", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("{0} strins", "string", False, None))
def test_missing_width_format(self):
self.assertTrue(self.check.check_format("{0,1} strins", "string", False, None))
def test_missing_flag_format(self):
self.assertTrue(self.check.check_format("{0:C1} strins", "string", False, None))
def test_missing_full_format(self):
self.assertTrue(
self.check.check_format("{0,1:C3} strins", "string", False, None)
)
def test_wrong_format(self):
self.assertTrue(
self.check.check_format("{0} string", "{1} string", False, None)
)
def test_missing_named_format_ignore(self):
self.assertFalse(self.check.check_format("{0} string", "string", True, None))
def test_escaping_with_position(self):
self.assertFalse(self.check.check_format("{{ 0 }}", "string", False, None))
def test_wrong_attribute_format(self):
self.assertTrue(
self.check.check_format("{0} string", "{1} string", False, None)
)
def test_reordered_format(self):
self.assertFalse(
self.check.check_format("{0} string {1}", "{1} string {0}", False, None)
)
class JavaFormatCheckTest(CheckTestCase):
check = JavaFormatCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"java-format",
"%1s string %2s",
[(0, 3, "%1s"), (11, 14, "%2s")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_escaping(self):
self.assertFalse(self.check.check_format("%% s %%", "string", False, None))
def test_format(self):
self.assertFalse(self.check.check_format("%s string", "%s string", False, None))
def test_time_format(self):
self.assertFalse(
self.check.check_format("%1$tH strins", "%1$tH string", False, None)
)
def test_wrong_position_format(self):
self.assertTrue(
self.check.check_format("%s string", "%s string %s", False, None)
)
def test_named_format(self):
self.assertFalse(
self.check.check_format("%1s string %2s", "%1s string %2s", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("%1s string", "string", False, None))
def test_missing_named_format(self):
self.assertTrue(self.check.check_format("%1$05d string", "string", False, None))
def test_wrong_argument_format(self):
self.assertTrue(
self.check.check_format("%1s string", "%2s string", False, None)
)
def test_wrong_format(self):
self.assertTrue(self.check.check_format("%s strins", "%d string", False, None))
def test_missing_named_format_ignore(self):
self.assertFalse(self.check.check_format("%1s string", "string", True, None))
def test_reordered_format(self):
self.assertTrue(
self.check.check_format("%1s string %2d", "%2d string %1s", False, None)
)
class JavaMessageFormatCheckTest(CheckTestCase):
check = JavaMessageFormatCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"java-messageformat",
"{0}string{1}",
[(0, 3, "{0}"), (9, 12, "{1}")],
)
self.unit = MockUnit(source="source")
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, self.unit))
def test_escaping_no_position(self):
self.assertFalse(
self.check.check_format("{{ string }}", "string", False, self.unit)
)
def test_simple_format(self):
self.assertFalse(
self.check.check_format("{0} strins", "{0} string", False, self.unit)
)
def test_format_with_width(self):
self.assertFalse(
self.check.check_format("{0,1} strins", "{0,1} string", False, self.unit)
)
def test_format_with_flag(self):
self.assertFalse(
self.check.check_format("{0:C2} strins", "{0:C2} string", False, self.unit)
)
def test_full_format(self):
self.assertFalse(
self.check.check_format(
"{0,1:N0} strins", "{0,1:N0} string", False, self.unit
)
)
def test_missing_format(self):
self.assertTrue(
self.check.check_format("{0} strins", "string", False, self.unit)
)
def test_missing_type_format(self):
self.assertTrue(
self.check.check_format("{0,number} strins", "string", False, self.unit)
)
def test_missing_flag_format(self):
self.assertTrue(
self.check.check_format("{0} strins", "string", False, self.unit)
)
def test_missing_full_format(self):
self.assertTrue(
self.check.check_format(
"{0,number,integer} strins", "string", False, self.unit
)
)
def test_wrong_format(self):
self.assertTrue(
self.check.check_format("{0} string", "{1} string", False, self.unit)
)
def test_missing_named_format_ignore(self):
self.assertFalse(
self.check.check_format("{0} string", "string", True, self.unit)
)
def test_escaping_with_position(self):
self.assertFalse(self.check.check_format("{{ 0 }}", "string", False, self.unit))
def test_wrong_attribute_format(self):
self.assertTrue(
self.check.check_format("{0} string", "{1} string", False, self.unit)
)
def test_reordered_format(self):
self.assertFalse(
self.check.check_format(
"{0} string {1}", "{1} string {0}", False, self.unit
)
)
def test_skip(self):
unit = MockUnit(source="source")
self.assertTrue(self.check.should_skip(unit))
unit = MockUnit(source="source", flags="java-messageformat")
self.assertFalse(self.check.should_skip(unit))
unit = MockUnit(source="source", flags="auto-java-messageformat")
self.assertTrue(self.check.should_skip(unit))
unit = MockUnit(source="{0}", flags="auto-java-messageformat")
self.assertFalse(self.check.should_skip(unit))
def test_quotes(self):
self.assertFalse(
self.check.check_format(
"{0} string {1}", "'{1}' strin''g '{0}'", False, self.unit
)
)
self.assertTrue(
self.check.check_format(
"{0} string {1}", "'{1}' strin''g '{0}", False, self.unit
)
)
self.assertTrue(
self.check.check_format(
"{0} string {1}", "'{1}' strin'g '{0}'", False, self.unit
)
)
def test_description(self):
unit = Unit(
source="{0}''s brush is {1} centimeters tall",
target="{0}'s brush is {1} centimeters tall",
extra_flags="java-messageformat",
translation=Translation(
component=Component(
file_format="auto",
source_language=Language("en"),
),
language=Language("cs"),
),
)
check = Check(unit=unit)
self.assertEqual(
self.check.get_description(check),
"You need to pair up an apostrophe with another one.",
)
class QtFormatCheckTest(CheckTestCase):
check = QtFormatCheck()
flag = "qt-format"
def setUp(self):
super().setUp()
self.test_highlight = (self.flag, "%1string%2", [(0, 2, "%1"), (8, 10, "%2")])
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_simple_format(self):
self.assertFalse(self.check.check_format("%1 strins", "%1 string", False, None))
def test_missing_format(self):
self.assertTrue(self.check.check_format("%1 strins", "string", False, None))
def test_wrong_format(self):
self.assertTrue(self.check.check_format("%1 string", "%2 string", False, None))
def test_reordered_format(self):
self.assertFalse(
self.check.check_format("%1 string %2", "%2 string %1", False, None)
)
def test_reused_format(self):
self.assertFalse(
self.check.check_format("%1 string %1", "%1 string %1", False, None)
)
class QtPluralCheckTest(CheckTestCase):
check = QtPluralCheck()
flag = "qt-plural-format"
def setUp(self):
super().setUp()
self.test_highlight = (self.flag, "%Lnstring", [(0, 3, "%Ln")])
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_plural_format(self):
self.assertFalse(
self.check.check_format("%n string(s)", "%n string", False, None)
)
def test_plural_localized_format(self):
self.assertFalse(
self.check.check_format("%Ln string(s)", "%Ln string", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("%n string(s)", "string", False, None))
class RubyFormatCheckTest(CheckTestCase):
check = RubyFormatCheck()
flag = "ruby-format"
def test_check_highlight(self):
self.test_highlight = (self.flag, "%dstring%s", [(0, 2, "%d"), (8, 10, "%s")])
super().test_check_highlight()
def test_check_highlight_named(self):
self.test_highlight = (
self.flag,
"%<int>dstring%<str>s",
[(0, 7, "%<int>d"), (13, 20, "%<str>s")],
)
super().test_check_highlight()
def test_check_highlight_named_template(self):
self.test_highlight = (
self.flag,
"%{int}string%{str}",
[(0, 6, "%{int}"), (12, 18, "%{str}")],
)
super().test_check_highlight()
def test_check_highlight_complex_named_template(self):
self.test_highlight = (
self.flag,
"%8.8{foo}string%+08.2<float>fstring",
[(0, 9, "%8.8{foo}"), (15, 29, "%+08.2<float>f")],
)
super().test_check_highlight()
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_format(self):
self.assertFalse(self.check.check_format("%s string", "%s string", False, None))
def test_space_format(self):
self.assertTrue(
self.check.check_format("%d % string", "%d % other", False, None)
)
def test_percent_format(self):
self.assertFalse(
self.check.check_format("%d%% string", "%d%% string", False, None)
)
def test_named_format(self):
self.assertFalse(
self.check.check_format("%<name>s string", "%<name>s string", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("%s string", "string", False, None))
def test_missing_named_format(self):
self.assertTrue(
self.check.check_format("%<name>s string", "string", False, None)
)
def test_missing_named_format_ignore(self):
self.assertFalse(
self.check.check_format("%<name>s string", "string", True, None)
)
def test_wrong_format(self):
self.assertTrue(self.check.check_format("%s string", "%c string", False, None))
def test_reordered_format(self):
self.assertTrue(
self.check.check_format("%s %d string", "%d %s string", False, None)
)
def test_wrong_named_format(self):
self.assertTrue(
self.check.check_format("%<name>s string", "%<jmeno>s string", False, None)
)
def test_reordered_named_format(self):
self.assertFalse(
self.check.check_format(
"%<name>s %<foo>s string",
"%<foo>s %<name>s string",
False,
None,
)
)
def test_reordered_named_format_long(self):
self.assertFalse(
self.check.check_format(
"%<count>d strings into %<languages>d languages %<percent>d%%",
"%<languages>d dil içinde %<count>d satır %%%<percent>d",
False,
None,
)
)
def test_formatting_named_format(self):
self.assertFalse(
self.check.check_format(
"%+08.2<foo>f string", "%+08.2<foo>f string", False, None
)
)
def test_missing_named_template_format(self):
self.assertTrue(
self.check.check_format("%{name} string", "string", False, None)
)
def test_missing_named_template_format_ignore(self):
self.assertFalse(
self.check.check_format("%{name} string", "string", True, None)
)
def test_wrong_named_template_format(self):
self.assertTrue(
self.check.check_format("%{name} string", "%{jmeno} string", False, None)
)
def test_reordered_named_template_format(self):
self.assertFalse(
self.check.check_format(
"%{name} %{foo} string",
"%{foo} %{name} string",
False,
None,
)
)
def test_formatting_named_template_format(self):
self.assertFalse(
self.check.check_format("%8.8{foo} string", "%8.8{foo} string", False, None)
)
def test_reordered_named_template_format_long(self):
self.assertFalse(
self.check.check_format(
"%{count} strings into %{languages} languages %{percent}%%",
"%{languages} dil içinde %{count} satır %%%{percent}",
False,
None,
)
)
class PluralTest(FixtureTestCase):
check = PythonFormatCheck()
def do_check(self, sources, targets, translation):
return self.check.check_target_unit(
sources,
targets,
Unit(
translation=translation,
source=join_plural(sources),
target=join_plural(targets),
),
)
def test_arabic(self):
arabic = Language.objects.get(code="ar")
translation = Translation(language=arabic, plural=arabic.plural)
# Singular, correct format string
self.assertFalse(self.do_check(["hello %s"], ["hell %s"], translation))
# Singular, missing format string
self.assertTrue(self.do_check(["hello %s"], ["hell"], translation))
# Plural, correct format string
self.assertFalse(self.do_check(["hello %s"] * 2, ["hell %s"] * 6, translation))
# Plural, missing format string
self.assertTrue(self.do_check(["hello %s"] * 2, ["hell"] * 6, translation))
# Plural, correct format string (missing on single value plurals)
self.assertFalse(
self.do_check(
["hello %s"] * 2, ["hell"] * 3 + ["hello %s"] * 3, translation
)
)
# Plural, missing format string on multi value plural
self.assertTrue(
self.do_check(
["hello %s"] * 2, ["hell"] * 4 + ["hello %s"] * 2, translation
)
)
def test_non_format_singular(self):
czech = Language.objects.get(code="cs")
translation = Translation(language=czech, plural=czech.plural)
self.assertFalse(
self.do_check(
["One apple", "%d apples"],
["%d jablko", "%d jablka", "%d jablek"],
translation,
)
)
self.assertFalse(
self.do_check(
["One apple", "%d apples"],
["Jedno jablko", "%d jablka", "%d jablek"],
translation,
)
)
self.assertTrue(
self.do_check(
["One apple", "%d apples"],
["Jedno jablko", "jablka", "%d jablek"],
translation,
)
)
def test_non_format_singular_named(self):
language = Language.objects.get(code="cs")
translation = Translation(language=language, plural=language.plural)
self.assertFalse(
self.do_check(
["One apple", "%(count)s apples"],
["%(count)s jablko", "%(count)s jablka", "%(count)s jablek"],
translation,
)
)
self.assertFalse(
self.do_check(
["One apple", "%(count)s apples"],
["Jedno jablko", "%(count)s jablka", "%(count)s jablek"],
translation,
)
)
self.assertTrue(
self.do_check(
["One apple", "%(count)s apples"],
["Jedno jablko", "jablka", "%(count)s jablek"],
translation,
)
)
def test_non_format_singular_named_be(self):
language = Language.objects.get(code="be")
translation = Translation(language=language, plural=language.plural)
self.assertTrue(
self.do_check(
["One apple", "%(count)s apples"],
["Jedno jablko", "%(count)s jablka", "%(count)s jablek"],
translation,
)
)
def test_non_format_singular_named_kab(self):
language = Language.objects.get(code="kab")
translation = Translation(language=language, plural=language.plural)
self.assertFalse(
self.do_check(
["One apple", "%(count)s apples"],
["Jedno jablko", "%(count)s jablka", "%(count)s jablek"],
translation,
)
)
def test_french_singular(self):
language = Language.objects.get(code="fr")
translation = Translation(language=language, plural=language.plural)
self.assertFalse(
self.do_check(
["One apple", "%(count)s apples"],
["Jedno jablko", "%(count)s jablek"],
translation,
)
)
self.assertFalse(
self.do_check(
["%(count)s apple", "%(count)s apples"],
["%(count)s jablko", "%(count)s jablek"],
translation,
)
)
self.assertFalse(
self.do_check(
["One apple", "%(count)s apples"],
["%(count)s jablko", "%(count)s jablek"],
translation,
)
)
self.assertFalse(
self.do_check(
["%(count)s apple", "%(count)s apples"],
["Jedno jablko", "%(count)s jablek"],
translation,
)
)
class I18NextInterpolationCheckTest(CheckTestCase):
check = I18NextInterpolationCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"i18next-interpolation",
"{{foo}} string {{bar}}",
[(0, 7, "{{foo}}"), (15, 22, "{{bar}}")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_format(self):
self.assertFalse(
self.check.check_format("{{foo}} string", "{{foo}} string", False, None)
)
self.assertFalse(
self.check.check_format("{{ foo }} string", "{{ foo }} string", False, None)
)
self.assertFalse(
self.check.check_format("{{ foo }} string", "{{foo}} string", False, None)
)
def test_nesting(self):
self.assertFalse(
self.check.check_format("$t(bar) string", "$t(bar) other", False, None)
)
self.assertFalse(
self.check.check_format("$t( bar ) string", "$t( bar ) other", False, None)
)
self.assertFalse(
self.check.check_format("$t( bar ) string", "$t(bar) other", False, None)
)
def test_missing_format(self):
self.assertTrue(
self.check.check_format("{{foo}} string", "string", False, None)
)
def test_missing_nesting(self):
self.assertTrue(self.check.check_format("$t(bar) string", "other", False, None))
def test_wrong_format(self):
self.assertTrue(
self.check.check_format("{{foo}} string", "{{bar}} string", False, None)
)
class ESTemplateLiteralsCheckTest(CheckTestCase):
check = ESTemplateLiteralsCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"es-format",
"${foo} string ${bar}",
[(0, 6, "${foo}"), (14, 20, "${bar}")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_format(self):
self.assertFalse(
self.check.check_format("${foo} string", "${foo} string", False, None)
)
self.assertFalse(
self.check.check_format("${ foo } string", "${ foo } string", False, None)
)
self.assertFalse(
self.check.check_format("${ foo } string", "${foo} string", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("${foo} string", "string", False, None))
def test_wrong_format(self):
self.assertTrue(
self.check.check_format("${foo} string", "${bar} string", False, None)
)
def test_description(self):
unit = Unit(
source="${foo}",
target="${bar}",
extra_flags="es-format",
)
check = Check(unit=unit)
self.assertEqual(
self.check.get_description(check),
"Following format strings are missing: ${foo}<br />"
"Following format strings are extra: ${bar}",
)
class PercentPlaceholdersCheckTest(CheckTestCase):
check = PercentPlaceholdersCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"percent-placeholders",
"%foo% string %bar%",
[(0, 5, "%foo%"), (13, 18, "%bar%")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_format(self):
self.assertFalse(
self.check.check_format("%foo% string", "%foo% string", False, None)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("%foo% string", "string", False, None))
def test_wrong_format(self):
self.assertTrue(
self.check.check_format("%foo% string", "%bar% string", False, None)
)
class VueFormattingCheckTest(CheckTestCase):
check = VueFormattingCheck()
def setUp(self):
super().setUp()
self.test_highlight = (
"vue-format",
"{foo} string %{bar}",
[(0, 5, "{foo}"), (13, 19, "%{bar}")],
)
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False, None))
def test_format(self):
self.assertFalse(
self.check.check_format("%{foo} string", "%{foo} string", False, None)
)
self.assertFalse(
self.check.check_format("{foo} string", "{foo} string", False, None)
)
self.assertFalse(
self.check.check_format(
"@.lower:message.homeAddress string",
"@.lower:message.homeAddress string",
False,
None,
)
)
self.assertFalse(
self.check.check_format(
"@:message.the_world string",
"@:message.the_world string",
False,
None,
)
)
self.assertFalse(
self.check.check_format(
"@:(message.dio) string",
"@:(message.dio) string",
False,
None,
)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("%{foo} string", "string", False, None))
self.assertTrue(self.check.check_format("{foo} string", "string", False, None))
self.assertTrue(
self.check.check_format(
"@.lower:message.homeAddress string",
"string",
False,
None,
)
)
self.assertTrue(
self.check.check_format("@:message.the_world string", "string", False, None)
)
self.assertTrue(
self.check.check_format("@:(message.dio) string", "string", False, None)
)
def test_wrong_format(self):
self.assertTrue(
self.check.check_format("%{foo} string", "%{bar} string", False, None)
)
self.assertTrue(
self.check.check_format("{foo} string", "{bar} string", False, None)
)
class MultipleUnnamedFormatsCheckTestCase(SimpleTestCase):
check = MultipleUnnamedFormatsCheck()
def test_none_flag(self):
self.assertFalse(self.check.check_source(["text"], MockUnit()))
def test_none_format(self):
self.assertFalse(self.check.check_source(["text"], MockUnit(flags="c-format")))
def test_good(self):
self.assertFalse(
self.check.check_source(["%1$s %2$s"], MockUnit(flags="c-format"))
)
def test_bad_c(self):
self.assertTrue(self.check.check_source(["%s %s"], MockUnit(flags="c-format")))
def test_bad_python(self):
self.assertTrue(
self.check.check_source(["{} {}"], MockUnit(flags="python-brace-format"))
)
def test_good_multi_format(self):
self.assertFalse(
self.check.check_source(
["Test %s"], MockUnit(flags="c-format,python-format")
)
)
| gpl-3.0 | -3,113,829,845,770,306,000 | 31.477953 | 88 | 0.553349 | false |
datapackages/tabulator-py | tests/formats/test_datapackage.py | 1 | 2251 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import json
import pytest
from mock import Mock
from tabulator import Stream
from tabulator.parsers.datapackage import DataPackageParser
# Stream
def test_stream_datapackage():
with Stream('data/datapackage.json', resource=0, headers=1) as stream:
assert stream.headers == ['id', 'name']
assert stream.read(keyed=True) == [
{'id': 1, 'name': 'english'},
{'id': 2, 'name': '中国人'}]
def test_second_resource():
with Stream('data/datapackage.json', resource=1, headers=1) as stream:
assert stream.headers == ['id', 'name']
assert stream.read(keyed=True) == [
{'id': 1, 'name': '中国人'},
{'id': 2, 'name': 'english'}
]
def test_named_resource():
curdir = os.getcwd()
try:
os.chdir('data/')
with Stream('datapackage.json', resource='number-two', headers=1) as stream:
assert stream.headers == ['id', 'name']
assert stream.read(keyed=True) == [
{'id': 1, 'name': '中国人'},
{'id': 2, 'name': 'english'},
]
finally:
os.chdir(curdir)
# Parser
def test_datapackage_parser():
source = 'data/datapackage.json'
parser = DataPackageParser(None)
assert parser.closed is True
parser.open(source)
assert parser.closed is False
assert list(parser.extended_rows) == [
(1, ['id', 'name'], [1, 'english']),
(2, ['id', 'name'], [2, '中国人']),
]
assert len(list(parser.extended_rows)) == 0
parser.reset()
assert len(list(parser.extended_rows)) == 2
parser.close()
assert parser.closed
def test_datapackage_list():
curdir= os.getcwd()
try:
os.chdir('data/')
stream = json.load(open('datapackage.json'))
parser = DataPackageParser(None)
parser.open(stream)
assert list(parser.extended_rows) == [
(1, ['id', 'name'], [1, 'english']),
(2, ['id', 'name'], [2, '中国人'])
]
finally:
os.chdir(curdir)
| mit | 4,696,167,510,624,650,000 | 24.528736 | 84 | 0.568663 | false |
AndyKrivovjas/notes | app/wsgi.py | 1 | 1418 | """
WSGI config for example project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "example.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | 6,965,944,710,248,095,000 | 43.3125 | 79 | 0.792666 | false |
coreymcdermott/artbot | artbot_scraper/spiders/verge_gallery.py | 1 | 1562 | # -*- coding: utf-8 -*-
import re
from dateutil import parser
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from artbot_scraper.items import EventItem
from pytz import timezone
class VergeGallerySpider(CrawlSpider):
name = 'Verge Gallery'
allowed_domains = ['verge-gallery.net']
start_urls = ['https://verge-gallery.net/category/current/']
rules = (Rule(LinkExtractor(allow=('/\d+\/\d+\/\d+'), deny=('eoi', '2017-program', 'twitter', 'facebook')), callback='parse_exhibition'),)
def parse_exhibition(self, response):
item = EventItem()
item['url'] = response.url
item['venue'] = self.name
item['title'] = response.xpath('//h2[contains(@class, "entry-title")]/text()').extract_first().strip()
item['description'] = response.xpath('//hr//following-sibling::p[1]//text()').extract_first().strip()
item['image'] = response.xpath('//figure[contains(@class, "featured-image")]//img/@src').extract_first().strip()
season = response.xpath('//h2[contains(@class, "entry-title")]/text()').extract_first()
match = re.search('(?P<start>(?<=\:\:\s)\w+\s+\d+)[\s\-\–]*(?P<end>\w+\s+\d+)', season)
if (match):
tz = timezone('Australia/Sydney')
item['start'] = tz.localize(parser.parse(match.group('start')))
item['end'] = tz.localize(parser.parse(match.group('end')))
yield item
| mit | -6,156,051,172,381,962,000 | 49.322581 | 152 | 0.58141 | false |
romses/LXC-Web-Panel | tests/browser.py | 1 | 4844 | import subprocess
import mechanize
import cookielib
import unittest
import shutil
import os
from flask import Flask
from flask.ext.testing import LiveServerTestCase
from lwp.app import app
from lwp.utils import connect_db
class TestWebBrowser(LiveServerTestCase):
"""
These tests are made using a stateful programmatic web browsing
and use the cookie and standard login form to operate on the lwp.
"""
@classmethod
def setUpClass(cls):
# cleanup
shutil.copyfile('lwp.db.base', '/tmp/db.sql')
shutil.rmtree('/tmp/lxc', ignore_errors=True)
cj = cookielib.LWPCookieJar()
cls.br = mechanize.Browser()
cls.br.set_cookiejar(cj)
def create_app(self):
app.config['DATABASE'] = '/tmp/db.sql'
return app
def test_00_login(self):
"""
login with the standard admin/admin
"""
self.br.open(self.get_server_url() + "/login")
resp = self.br.response()
assert self.br.viewing_html()
# select login form and fill it
self.br.select_form(name="form-signin")
self.br['username'] = "admin"
self.br['password'] = "admin"
resp = self.br.submit()
assert '/home' in resp.geturl()
def test_01_home_render(self):
"""
we are now logged in, create a container and check that
it is displayed in home page, the stopped badge is displayed
"""
subprocess.check_output('lxc-create -n mocktest_00_lxc', shell=True)
self.br.open(self.get_server_url() + "/home")
resp = self.br.response().read()
assert self.br.viewing_html()
assert 'mocktest_00_lxc' in resp
assert 'Stopped' in resp
def test_02_start_container(self):
"""
the container exists, start it using /action and check badge on home
"""
self.br.open(self.get_server_url() + "/action?action=start&name=mocktest_00_lxc")
self.br.open(self.get_server_url() + "/home")
resp = self.br.response().read()
assert self.br.viewing_html()
assert 'mocktest_00_lxc' in resp
assert 'Running' in resp
def test_03_freeze_container(self):
"""
freeze the container using /action and check badge on home
"""
self.br.open(self.get_server_url() + "/action?action=freeze&name=mocktest_00_lxc")
self.br.open(self.get_server_url() + "/home")
resp = self.br.response().read()
assert self.br.viewing_html()
assert 'mocktest_00_lxc' in resp
assert 'Frozen' in resp
def test_04_unfreeze_container(self):
"""
unfreeze container using /action and check badge on home
"""
self.br.open(self.get_server_url() + "/action?action=unfreeze&name=mocktest_00_lxc")
self.br.open(self.get_server_url() + "/home")
resp = self.br.response().read()
assert self.br.viewing_html()
assert 'mocktest_00_lxc' in resp
assert 'Running' in resp
def test_05_stop_container(self):
"""
try to stop it
"""
self.br.open(self.get_server_url() + "/action?action=stop&name=mocktest_00_lxc")
self.br.open(self.get_server_url() + "/home")
resp = self.br.response().read()
assert self.br.viewing_html()
assert 'mocktest_00_lxc' in resp
assert 'Stopped' in resp
def test_06_refresh_info(self):
"""
the _refresh_info should return json object with host info
"""
self.br.open(self.get_server_url() + '/_refresh_info')
j_data = self.br.response().read()
assert 'cpu' in j_data
assert 'disk' in j_data
assert 'uptime' in j_data
def test_07_create_container(self):
"""
try to create "test_created_container"
"""
self.br.open(self.get_server_url() + "/home")
# select create-container form and fill it
self.br.select_form(name="create_container")
self.br['name'] = "test_created_container"
resp = self.br.submit()
assert '/home' in resp.geturl()
assert 'mocktest_00_lxc' in resp.read()
def test_08_create_token(self):
"""
try to create "test_created_container"
"""
self.br.open(self.get_server_url() + "/lwp/tokens")
# select create-container form and fill it
self.br.select_form(name="lwp_token")
self.br['token'] = "mechanize_token"
self.br['description'] = "my_token_desc"
resp = self.br.submit()
body = resp.read()
assert '/lwp/tokens' in resp.geturl()
assert 'mechanize_token' in body
assert 'my_token_desc' in body
if __name__ == '__main__':
unittest.main()
| mit | 4,193,830,475,493,360,000 | 29.275 | 92 | 0.586705 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/usbcreator/backends/udisks/backend.py | 1 | 15116 | import dbus
import logging
from dbus.mainloop.glib import DBusGMainLoop
from usbcreator.backends.base import Backend
from usbcreator.misc import *
DISKS_IFACE = 'org.freedesktop.UDisks'
DEVICE_IFACE = 'org.freedesktop.UDisks.Device'
PROPS_IFACE = 'org.freedesktop.DBus.Properties'
import time
class UDisksBackend(Backend):
def __init__(self, allow_system_internal=False, bus=None, show_all=False):
Backend.__init__(self)
self.mounted_source = ''
self.formatting = []
self.show_all = show_all
self.allow_system_internal = allow_system_internal
logging.debug('UDisksBackend')
DBusGMainLoop(set_as_default=True)
if bus:
self.bus = bus
else:
self.bus = dbus.SystemBus()
udisks_obj = self.bus.get_object(DISKS_IFACE,
'/org/freedesktop/UDisks')
self.udisks = dbus.Interface(udisks_obj, DISKS_IFACE)
self.helper = self.bus.get_object('com.ubuntu.USBCreator',
'/com/ubuntu/USBCreator')
self.helper = dbus.Interface(self.helper, 'com.ubuntu.USBCreator')
# Adapted from udisk's test harness.
# This is why the entire backend needs to be its own thread.
def retry_mount(self, device):
'''Try to mount until it does not fail with "Busy".'''
timeout = 10
dev_obj = self.bus.get_object(DISKS_IFACE, device)
props = dbus.Interface(dev_obj, dbus.PROPERTIES_IFACE)
device_i = dbus.Interface(dev_obj, DEVICE_IFACE)
while timeout >= 0:
if props.Get(device, 'device-is-mounted'):
break
try:
device_i.FilesystemMount('', [])
except dbus.DBusException, e:
if e._dbus_error_name != 'org.freedesktop.UDisks.Error.Busy':
raise
logging.debug('Busy.')
time.sleep(0.3)
timeout -= 1
# Device detection and processing functions.
def detect_devices(self):
'''Start looking for new devices to add. Devices added will be sent to
the fronted using frontend.device_added. Devices will only be added as
they arrive if a main loop is present.'''
logging.debug('detect_devices')
self.bus.add_signal_receiver(self._device_added,
'DeviceAdded',
DISKS_IFACE,
DISKS_IFACE,
'/org/freedesktop/UDisks')
self.bus.add_signal_receiver(self._device_changed,
'DeviceChanged',
DISKS_IFACE,
DISKS_IFACE,
'/org/freedesktop/UDisks')
self.bus.add_signal_receiver(self._device_removed,
'DeviceRemoved',
DISKS_IFACE,
DISKS_IFACE,
'/org/freedesktop/UDisks')
def handle_reply(res):
for r in res:
self._device_added(r)
def handle_error(err):
logging.error('Unable to enumerate devices: %s' % str(err))
self.udisks.EnumerateDevices(reply_handler=handle_reply,
error_handler=handle_error)
def _device_added(self, device):
logging.debug('device_added: %s' % device)
udisks_obj = self.bus.get_object(DISKS_IFACE, device)
d = dbus.Interface(udisks_obj, 'org.freedesktop.DBus.Properties')
if d.Get(device, 'device-is-optical-disc'):
self._add_cd(device)
if (self.allow_system_internal or
not d.Get(device, 'device-is-system-internal')):
if d.Get(device, 'device-is-partition'):
self._add_partition(device)
elif d.Get(device, 'device-is-drive'):
if not d.Get(device, 'device-is-optical-disc'):
self._add_drive(device)
def _device_changed(self, device):
udisks_obj = self.bus.get_object(DISKS_IFACE, device)
d = dbus.Interface(udisks_obj, 'org.freedesktop.DBus.Properties')
logging.debug('device change %s' % str(device))
# As this will happen in the same event, the frontend wont change
# (though it needs to make sure the list is sorted, otherwise it will).
self._device_removed(device)
self._device_added(device)
def _add_cd(self, device):
logging.debug('cd added: %s' % device)
dk = self.bus.get_object(DISKS_IFACE, device)
def get(prop):
return dk.Get(device, prop, dbus_interface=PROPS_IFACE)
label = get('id-label')
if not get('device-is-mounted'):
try:
mp = dk.FilesystemMount('', [], dbus_interface=DEVICE_IFACE)
except dbus.DBusException, e:
logging.exception('Could not mount the device:')
return
mount = get('device-mount-paths')[0]
device_file = get('device-file')
total, free = fs_size(mount)
self.sources[device] = {
'device' : device_file,
'size' : total,
'label' : label,
'type' : SOURCE_CD,
}
if callable(self.source_added_cb):
self.source_added_cb(device)
def _add_partition(self, device):
logging.debug('partition added: %s' % device)
dk = self.bus.get_object(DISKS_IFACE, device)
def get(prop):
return dk.Get(device, prop, dbus_interface=PROPS_IFACE)
model = get('DriveModel')
vendor = get('DriveVendor')
fstype = get('id-type')
logging.debug('id-type: %s' % fstype)
if fstype == 'vfat':
status = CAN_USE
else:
status = NEED_FORMAT
label = get('id-label')
logging.debug('id-label: %s' % label)
parent = get('partition-slave')
if fstype == 'vfat' and not get('device-is-mounted'):
parent_i = self.bus.get_object(DISKS_IFACE, parent)
parent_f = parent_i.Get(parent, 'device-file', dbus_interface=PROPS_IFACE)
if device not in self.formatting and parent not in self.formatting:
try:
self.retry_mount(device)
except:
logging.exception('Could not mount the device:')
return
mount = get('device-mount-paths') or ''
if mount:
mount = mount[0]
total, free = fs_size(mount)
else:
# FIXME evand 2009-09-11: This is going to have weird side effects.
# If the device cannot be mounted, but is a vfat filesystem, that
# is. Is this really the right approach?
total = get('partition-size')
free = -1
logging.debug('mount: %s' % mount)
device_file = get('device-file')
if total > 0:
self.targets[unicode(device)] = {
'vendor' : vendor,
'model' : model,
'label' : unicode(label),
'free' : free,
'device' : unicode(device_file),
'capacity' : total,
'status' : status,
'mountpoint' : mount,
'persist' : 0,
'parent' : unicode(parent),
'formatting' : False,
}
self._update_free(unicode(device))
if self.show_all:
if callable(self.target_added_cb):
self.target_added_cb(device)
else:
if status != NEED_FORMAT:
if unicode(parent) in self.targets:
if callable(self.target_removed_cb):
self.target_removed_cb(parent)
if callable(self.target_added_cb):
self.target_added_cb(device)
else:
logging.debug('not adding device: 0 byte partition.')
def _add_drive(self, device):
logging.debug('disk added: %s' % device)
dk = self.bus.get_object(DISKS_IFACE, device)
def get(prop):
return dk.Get(device, prop, dbus_interface=PROPS_IFACE)
model = get('DriveModel')
vendor = get('DriveVendor')
device_file = get('device-file')
size = get('device-size')
if size > 0:
self.targets[unicode(device)] = {
'vendor' : vendor,
'model' : model,
'label' : '',
'free' : -1,
'device' : unicode(device_file),
'capacity' : size,
'status' : NEED_FORMAT,
'mountpoint' : None,
'persist' : 0,
'parent' : None,
'formatting' : False,
}
if callable(self.target_added_cb):
if self.show_all:
self.target_added_cb(device)
else:
children = [x for x in self.targets
if self.targets[x]['parent'] == unicode(device) and
self.targets[x]['status'] != NEED_FORMAT]
if not children:
self.target_added_cb(device)
else:
logging.debug('not adding device: 0 byte disk.')
def _device_removed(self, device):
logging.debug('Device has been removed from the system: %s' % device)
if device in self.sources:
if callable(self.source_removed_cb):
self.source_removed_cb(device)
self.sources.pop(device)
elif device in self.targets:
if callable(self.target_removed_cb):
self.target_removed_cb(device)
self.targets.pop(device)
# Device manipulation functions.
def _is_casper_cd(self, filename):
cmd = ['isoinfo', '-J', '-i', filename, '-x', '/.disk/info']
try:
output = popen(cmd, stderr=None)
if output:
return output
except USBCreatorProcessException:
# TODO evand 2009-07-26: Error dialog.
logging.error('Could not extract .disk/info.')
return None
def open(self, udi):
mp = self.targets[udi]['mountpoint']
if not mp:
try:
dk = self.bus.get_object(DISKS_IFACE, udi)
mp = dk.FilesystemMount('', [], dbus_interface=DEVICE_IFACE)
except dbus.DBusException:
logging.exception('Could not mount the device:')
return ''
try:
popen(['mount', '-o', 'remount,rw', mp])
except USBCreatorProcessException:
logging.exception('Could not mount the device:')
return ''
return mp
def format_done(self, dev=None):
if dev in self.targets:
p = self.targets[dev]['parent']
if p and p in self.targets:
dev = p
self.targets[dev]['formatting'] = False
self.formatting.remove(dev)
def format_failed(self, message, dev=None):
self.format_done(dev)
self.format_failed_cb(message)
def format(self, device):
try:
dk = self.bus.get_object(DISKS_IFACE, device)
dev = dk.Get(device, 'device-file', dbus_interface=PROPS_IFACE)
if dk.Get(dev, 'device-is-partition', dbus_interface=PROPS_IFACE):
dev = dk.Get(dev, 'partition-slave', dbus_interface=PROPS_IFACE)
dk = self.bus.get_object(DISKS_IFACE, dev)
dev = dk.Get(device, 'device-file', dbus_interface=PROPS_IFACE)
p = self.targets[device]['parent']
if p and p in self.targets:
self.formatting.append(p)
self.targets[p]['formatting'] = True
else:
self.formatting.append(device)
self.targets[device]['formatting'] = True
self.helper.Format(dev, self.allow_system_internal,
# There must be a better way...
reply_handler=lambda: self.format_done(device),
error_handler=lambda x: self.format_failed(x, device))
except dbus.DBusException:
# Could not talk to usb-creator-helper or devkit.
logging.exception('Could not format the device:')
def install(self, source, target, persist, allow_system_internal=False):
# TODO evand 2009-07-31: Lock source and target.
logging.debug('install source: %s' % source)
logging.debug('install target: %s' % target)
logging.debug('install persistence: %d' % persist)
# There's no going back now...
self.bus.remove_signal_receiver(self._device_added,
'DeviceAdded',
DISKS_IFACE,
DISKS_IFACE,
'/org/freedesktop/UDisks')
self.bus.remove_signal_receiver(self._device_changed,
'DeviceChanged',
DISKS_IFACE,
DISKS_IFACE,
'/org/freedesktop/UDisks')
self.bus.remove_signal_receiver(self._device_removed,
'DeviceRemoved',
DISKS_IFACE,
DISKS_IFACE,
'/org/freedesktop/UDisks')
stype = self.sources[source]['type']
if stype == SOURCE_CD:
dk = self.bus.get_object(DISKS_IFACE, source)
def get(prop):
return dk.Get(source, prop, dbus_interface=PROPS_IFACE)
if not get('device-is-mounted'):
source = dk.FilesystemMount('', [], dbus_interface=DEVICE_IFACE)
else:
source = get('device-mount-paths')[0]
elif stype == SOURCE_ISO:
isofile = self.sources[source]['device']
source = self.helper.MountISO(isofile)
self.mounted_source = source
dk = self.bus.get_object(DISKS_IFACE, target)
def get(prop):
return dk.Get(target, prop, dbus_interface=PROPS_IFACE)
dev = get('device-file')
if stype == SOURCE_IMG:
target = None
self.helper.Unmount(target)
else:
if not get('device-is-mounted'):
target = dk.FilesystemMount('', [], dbus_interface=DEVICE_IFACE)
else:
target = get('device-mount-paths')[0]
self.helper.RemountRW(dev)
Backend.install(self, source, target, persist, device=dev,
allow_system_internal=allow_system_internal)
def cancel_install(self):
Backend.cancel_install(self)
self.unmount()
def unmount(self):
try:
if self.mounted_source:
self.helper.UnmountFile(self.mounted_source)
except:
# TODO let the user know via the frontend.
logging.exception('Could not unmount the source ISO.')
def shutdown(self):
try:
self.helper.Shutdown()
except dbus.DBusException:
logging.exception('Could not shut down the dbus service.')
| gpl-3.0 | -237,449,567,481,498,020 | 39.309333 | 86 | 0.538833 | false |
jesusaurus/openstack-tests | nova/tests/10ssh.py | 1 | 3384 | # Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import subprocess as sp
import multiprocessing as mp
from time import sleep
from datetime import datetime
import csv
logger = logging.getLogger('nova_test.ssh')
queue = mp.Queue()
times = mp.Queue()
def ssh(user, host):
tryLimit = 5
count = 0
result = {}
result['host'] = host
result[host] = {}
result[host]['ssh_open'] = datetime.now()
backoff = 1
factor = 2
while count < tryLimit:
try:
proc = sp.Popen(['ssh',
'-o StrictHostKeyChecking=no',
'-o UserKnownHostsFile=/dev/null',
'{0}@{1}'.format(user,host),
'/bin/true'],
stdout=sp.PIPE,
stderr=sp.PIPE)
(out, err) = proc.communicate()
if proc.returncode is 0:
result[host]['ssh_close'] = datetime.now()
result[host]['ssh_total'] = result[host]['ssh_close'] - result[host]['ssh_open']
times.put(result)
if out not in [None, ""]:
logger.debug(out.strip())
logger.info("Successful ssh to {0}".format(host))
break
else:
logger.info("Unsuccessful ssh to {0}".format(host))
if out not in [None, ""]:
logger.info(out.strip())
if err not in [None, ""]:
logger.warn(err.strip())
except Exception as e:
queue.put(e)
logger.debug("Sleeping for {0}".format(factor ** backoff))
sleep(factor ** backoff)
backoff += 1
count += 1
if 'ssh_total' not in result[host]:
msg = 'Could not ssh to {0}'.format(host)
logger.error(msg)
queue.put(Exception(msg))
def run(servers, **kwargs):
logger.info('Entering ssh test')
ips = [ servers[x]['ip'] for x in servers.keys() ]
procs = {}
for ip in ips:
procs[ip] = mp.Process(target=ssh, args=('ubuntu', ip))
procs[ip].start()
for ip in ips:
procs[ip].join()
if not queue.empty():
logger.error('At least one exception raised, reraising.')
raise queue.get()
while not times.empty():
time = times.get()
for server in servers.keys():
if servers[server]['ip'] == time['host']:
servers[server]['time'].update(time[time['host']])
with open('ssh.csv', 'w+b') as f:
output = csv.writer(f)
output.writerow([ i for i in range(len(servers.keys())) ])
output.writerow([ servers[x]['time']['ssh_total'].seconds / 60.0 for x in servers.keys() ])
| apache-2.0 | -2,958,073,583,366,884,000 | 32.84 | 99 | 0.5526 | false |
bistromath/gr-air-modes | python/html_template.py | 1 | 6101 | #!/usr/bin/env python
#HTML template for Mode S map display
#Nick Foster, 2013
def html_template(my_apikey, my_position, json_file):
if my_position is None:
my_position = [37, -122]
return """
<html>
<head>
<title>ADS-B Aircraft Map</title>
<meta name="viewport" content="initial-scale=1.0, user-scalable=no" />
<meta http-equiv="content-type" content="text/html;charset=utf-8" />
<style type="text/css">
.labels {
color: blue;
background-color: white;
font-family: "Lucida Grande", "Arial", sans-serif;
font-size: 13px;
font-weight: bold;
text-align: center;
width: 70px;
border: none;
white-space: nowrap;
}
</style>
<script type="text/javascript" src="http://maps.google.com/maps/api/js?key=%s">
</script>
<script type="text/javascript" src="https://raw.githubusercontent.com/googlemaps/v3-utility-library/master/markerwithlabel/src/markerwithlabel.js">
</script>
<script type="text/javascript">
var map;
var markers = [];
var defaultLocation = new google.maps.LatLng(%f, %f);
var defaultZoomLevel = 9;
function requestJSONP() {
var script = document.createElement("script");
script.src = "%s?" + Math.random();
script.params = Math.random();
document.getElementsByTagName('head')[0].appendChild(script);
};
var planeMarker;
var planes = [];
function clearMarkers() {
for (var i = 0; i < planes.length; i++) {
planes[i].setMap(null);
}
planes = [];
};
function jsonp_callback(results) { // from JSONP
airplanes = {};
for (var i = 0; i < results.length; i++) {
airplanes[results[i].icao] = {
center: new google.maps.LatLng(results[i].lat, results[i].lon),
heading: results[i].hdg,
altitude: results[i].alt,
type: results[i].type,
ident: results[i].ident,
speed: results[i].speed,
vertical: results[i].vertical,
highlight: results[i].highlight
};
}
// clearMarkers();
refreshIcons();
}
function refreshIcons() {
//prune the list
for(var i = 0; i < planes.length; i++) {
icao = planes[i].get("icao")
if(!(icao in airplanes)) {
planes[i].setMap(null)
planes.splice(i, 1);
};
};
for (var airplane in airplanes) {
if (airplanes[airplane].highlight != 0) {
icon_file = "http://www.nerdnetworks.org/~bistromath/airplane_sprite_highlight.png";
} else {
icon_file = "http://www.nerdnetworks.org/~bistromath/airplane_sprite.png";
};
var plane_icon = {
url: icon_file,
size: new google.maps.Size(128,128),
origin: new google.maps.Point(parseInt(airplanes[airplane].heading/10)*128,0),
anchor: new google.maps.Point(64,64),
//scaledSize: new google.maps.Size(4608,126)
};
if (airplanes[airplane].ident.length != 8) {
identstr = airplane;
} else {
identstr = airplanes[airplane].ident;
};
var planeOptions = {
map: map,
position: airplanes[airplane].center,
icao: airplane,
icon: plane_icon,
labelContent: identstr,
labelAnchor: new google.maps.Point(35, -32),
labelClass: "labels",
labelStyle: {opacity: 0.75}
};
var i = 0;
for(i; i<planes.length; i++) {
if(planes[i].get("icao") == airplane) {
planes[i].setPosition(airplanes[airplane].center);
if(planes[i].get("icon") != plane_icon) {
planes[i].setIcon(plane_icon); //handles highlight and heading
};
if(planes[i].get("labelContent") != identstr) {
planes[i].set("labelContent", identstr);
};
break;
};
};
if(i == planes.length) {
planeMarker = new MarkerWithLabel(planeOptions);
planes.push(planeMarker);
};
};
};
function initialize()
{
var myOptions =
{
zoom: defaultZoomLevel,
center: defaultLocation,
disableDefaultUI: true,
mapTypeId: google.maps.MapTypeId.TERRAIN
};
map = new google.maps.Map(document.getElementById("map_canvas"), myOptions);
requestJSONP();
setInterval("requestJSONP()", 1000);
};
</script>
</head>
<body onload="initialize()">
<div id="map_canvas" style="width:100%%; height:100%%">
</div>
</body>
</html>""" % (my_apikey, my_position[0], my_position[1], json_file)
| gpl-3.0 | -5,583,018,547,611,028,000 | 38.36129 | 155 | 0.431733 | false |
petrutlucian94/nova_dev | nova/virt/hyperv/volumeops.py | 1 | 14466 | # Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
import ctypes
import hashlib
import os
import time
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt import driver
from nova import paths
from nova import utils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyper_volumeops_opts = [
cfg.IntOpt('volume_attach_retry_count',
default=10,
help='The number of times to retry to attach a volume'),
cfg.IntOpt('volume_attach_retry_interval',
default=5,
help='Interval between volume attachment attempts, in seconds'),
cfg.IntOpt('mounted_disk_query_retry_count',
default=10,
help='The number of times to retry checking for a disk mounted '
'via iSCSI.'),
cfg.IntOpt('mounted_disk_query_retry_interval',
default=5,
help='Interval between checks for a mounted iSCSI '
'disk, in seconds.'),
cfg.StrOpt('smbfs_mount_point_base',
default='C:\OpenStack\Instances\_mnt',
help='Directory where smbfs links are created to SMB shares'),
]
CONF = cfg.CONF
CONF.register_opts(hyper_volumeops_opts, 'hyperv')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
class VolumeOps(object):
"""
Management class for Volume-related tasks
"""
def __init__(self):
self._hostutils = utilsfactory.get_hostutils()
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_volumeutils()
self._initiator = None
self._default_root_device = 'vda'
def ebs_root_in_block_devices(self, block_device_info):
return self._volutils.volume_in_mapping(self._default_root_device,
block_device_info)
def attach_volumes(self, block_device_info, instance_name, ebs_root):
mapping = driver.block_device_info_get_mapping(block_device_info)
if ebs_root:
self.attach_volume(mapping[0]['connection_info'],
instance_name, True)
mapping = mapping[1:]
for vol in mapping:
self.attach_volume(vol['connection_info'], instance_name)
def login_storage_targets(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self._login_storage_target(vol['connection_info'])
def _login_storage_target(self, connection_info):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
# Check if we already logged in
if self._volutils.get_device_number_for_target(target_iqn, target_lun):
LOG.debug(_("Already logged in on storage target. No need to "
"login. Portal: %(target_portal)s, "
"IQN: %(target_iqn)s, LUN: %(target_lun)s"),
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
else:
LOG.debug(_("Logging in on storage target. Portal: "
"%(target_portal)s, IQN: %(target_iqn)s, "
"LUN: %(target_lun)s"),
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
self._volutils.login_storage_target(target_lun, target_iqn,
target_portal)
# Wait for the target to be mounted
self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
"""
Attach a volume to the SCSI controller or to the IDE controller if
ebs_root is True
"""
target_iqn = None
LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s"),
{'connection_info': connection_info,
'instance_name': instance_name})
try:
self._login_storage_target(connection_info)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
if ebs_root:
#Find the IDE controller for the vm.
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
#Attaching to the first slot
slot = 0
else:
#Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._get_free_controller_slot(ctrller_path)
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
slot,
mounted_disk_path)
except Exception as exn:
LOG.exception(_('Attach volume failed: %s'), exn)
if target_iqn:
self._volutils.logout_storage_target(target_iqn)
raise vmutils.HyperVException(_('Unable to attach volume '
'to instance %s') % instance_name)
def _get_free_controller_slot(self, scsi_controller_path):
#Slots starts from 0, so the length of the disks gives us the free slot
return self._vmutils.get_attached_disks_count(scsi_controller_path)
def detach_volumes(self, block_device_info, instance_name):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self.detach_volume(vol['connection_info'], instance_name)
def logout_storage_target(self, target_iqn):
LOG.debug(_("Logging off storage target %s"), target_iqn)
self._volutils.logout_storage_target(target_iqn)
def detach_volume(self, connection_info, instance_name):
"""Detach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s "
"from %(instance_name)s"),
{'connection_info': connection_info,
'instance_name': instance_name})
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
LOG.debug(_("Detaching physical disk from instance: %s"),
mounted_disk_path)
self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
self.logout_storage_target(target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = self._volutils.get_iscsi_initiator()
if not self._initiator:
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': CONF.my_ip,
'host': CONF.host,
'initiator': self._initiator,
}
def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
wait_for_device=False):
# The WMI query in get_device_number_for_target can incorrectly
# return no data when the system is under load. This issue can
# be avoided by adding a retry.
for i in xrange(CONF.hyperv.mounted_disk_query_retry_count):
device_number = self._volutils.get_device_number_for_target(
target_iqn, target_lun)
if device_number is None:
attempt = i + 1
LOG.debug(_('Attempt %d to get device_number '
'from get_device_number_for_target failed. '
'Retrying...') % attempt)
time.sleep(CONF.hyperv.mounted_disk_query_retry_interval)
else:
break
if device_number is None:
raise exception.NotFound(_('Unable to find a mounted disk for '
'target_iqn: %s') % target_iqn)
LOG.debug(_('Device number: %(device_number)s, '
'target lun: %(target_lun)s'),
{'device_number': device_number, 'target_lun': target_lun})
#Finding Mounted disk drive
for i in range(0, CONF.hyperv.volume_attach_retry_count):
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
device_number)
if mounted_disk_path or not wait_for_device:
break
time.sleep(CONF.hyperv.volume_attach_retry_interval)
if not mounted_disk_path:
raise exception.NotFound(_('Unable to find a mounted disk '
'for target_iqn: %s') % target_iqn)
return mounted_disk_path
def disconnect_volume(self, physical_drive_path):
#Get the session_id of the ISCSI connection
session_id = self._volutils.get_session_id_from_mounted_disk(
physical_drive_path)
#Logging out the target
self._volutils.execute_log_out(session_id)
def get_target_from_disk_path(self, physical_drive_path):
return self._volutils.get_target_from_disk_path(physical_drive_path)
class HyperVSMBFSVolumeOps(VolumeOps):
def __init__(self):
super(HyperVSMBFSVolumeOps, self).__init__()
self.mount_base = CONF.hyperv.smbfs_mount_point_base
self.pathutils = utilsfactory.get_pathutils()
@staticmethod
def get_hash_str(base_str):
"""returns string that represents hash of base_str (in hex format)."""
return hashlib.md5(base_str).hexdigest()
def get_local_disk_path(self, connection_info):
export = connection_info['data']['export']
disk_name = connection_info['data']['name']
export_hash = self.get_hash_str(export)
disk_path = os.path.join(self.mount_base, export_hash, disk_name)
return disk_path
def _mount_smbfs(self, export_path, options=None):
smb_opts = {}
if options:
username = options.get('username')
password = options.get('password')
if username != 'guest':
smb_opts['UserName'] = username
smb_opts['Password'] = password
self.pathutils.mount_smb(export_path, smb_opts)
def _ensure_mounted(self, export_path, options=None):
if not os.path.isdir(self.mount_base):
os.makedirs(self.mount_base)
export_hash = self.get_hash_str(export_path)
norm_path = export_path.replace('/', '\\')
self._mount_smbfs(norm_path, options)
link_path = os.path.join(self.mount_base, export_hash)
if os.path.exists(link_path):
if not self.pathutils.is_symlink(link_path):
raise vmutils.HyperVException(_("Link path already exists "
"and its not a symlink"))
else:
self.pathutils.create_sym_link(
link_path, norm_path, target_is_dir=True)
def parse_options(self, option_str):
opts_dict = {}
opts_list = []
if option_str:
for i in option_str.split():
if i == '-o':
continue
for j in i.split(','):
tmp_opt = j.split('=')
if len(tmp_opt) > 1:
opts_dict[tmp_opt[0]] = tmp_opt[1]
else:
opts_list.append(tmp_opt[0])
return opts_list, opts_dict
def attach_volume(self, connection_info, instance_name):
opts_str = connection_info['data'].get('options')
opts = self.parse_options(opts_str)
export = connection_info['data']['export']
self._ensure_mounted(export, opts[1])
disk_path = self.get_local_disk_path(connection_info)
try:
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._get_free_controller_slot(ctrller_path)
self._vmutils.attach_ide_drive(instance_name,
disk_path,
ctrller_path,
slot,
is_scsi=True)
except Exception as exn:
LOG.exception(_('Attach volume failed: %s'), exn)
raise vmutils.HyperVException(_('Unable to attach volume '
'to instance %s') % instance_name)
def detach_volume(self, connection_info, instance_name):
LOG.debug(_("Detach_volume: %(connection_info)s "
"from %(instance_name)s"),
{'connection_info': connection_info,
'instance_name': instance_name})
mounted_disk_path = self.get_local_disk_path(connection_info)
self._vmutils.detach_vhd_disk(
instance_name, mounted_disk_path)
| apache-2.0 | -8,743,473,869,114,289,000 | 40.688761 | 79 | 0.564427 | false |
okfse/froide | froide/account/tests.py | 1 | 26629 | import re
import datetime
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from django.utils.six import text_type as str
from django.test import TestCase
from django.contrib.admin.sites import AdminSite
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.core import mail
from django.contrib.messages.storage import default_storage
from froide.publicbody.models import PublicBody
from froide.foirequest.models import FoiRequest, FoiMessage
from froide.foirequest.tests import factories
from .models import AccountManager
from .utils import merge_accounts
from .admin import UserAdmin
User = get_user_model()
class AccountTest(TestCase):
def setUp(self):
factories.make_world()
def test_account_page(self):
ok = self.client.login(username='sw', password='wrong')
self.assertFalse(ok)
ok = self.client.login(username='sw', password='froide')
self.assertTrue(ok)
response = self.client.get(reverse('account-show'))
self.assertEqual(response.status_code, 200)
def test_login_page(self):
self.client.logout()
response = self.client.get(reverse('account-show'))
self.assertEqual(response.status_code, 302)
self.client.get(reverse('account-login'))
response = self.client.post(reverse('account-login'),
{"email": "[email protected]",
"password": "foobar"})
self.assertEqual(response.status_code, 400)
response = self.client.post(reverse('account-login'),
{"email": "[email protected]",
"password": "dummy"})
self.assertEqual(response.status_code, 400)
response = self.client.post(reverse('account-login'),
{"email": "[email protected]",
"password": "froide"})
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('account-show'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('account-login'),
{"email": "[email protected]",
"password": "froide"})
# already logged in, login again gives 302
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('account-show'), response.url)
response = self.client.get(reverse('account-logout'))
self.assertEqual(response.status_code, 405)
response = self.client.post(reverse('account-logout'))
self.assertEqual(response.status_code, 302)
response = self.client.get(reverse('account-login') + "?simple")
self.assertIn("simple_base.html", map(lambda x: x.name,
response.templates))
response = self.client.post(reverse('account-login') + "?simple",
{"email": "[email protected]",
"password": "froide"})
self.assertTrue(response.status_code, 302)
self.assertIn("simple", response.url)
user = User.objects.get(email="[email protected]")
user.is_active = False
user.save()
self.client.logout()
response = self.client.post(reverse('account-login'),
{"email": "[email protected]",
"password": "froide"})
# inactive users can't login
self.assertEqual(response.status_code, 400)
response = self.client.get(reverse('account-show'))
self.assertEqual(response.status_code, 302)
def test_signup(self):
mail.outbox = []
post = {"first_name": "Horst",
"last_name": "Porst",
"organization": "Porst AG",
"terms": "on",
"user_email": "horst.porst"}
self.client.login(username='sw', password='froide')
response = self.client.post(reverse('account-signup'), post)
self.assertTrue(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
self.client.logout()
response = self.client.post(reverse('account-signup'), post)
self.assertEqual(response.status_code, 400)
post['user_email'] = '[email protected]'
post['address'] = 'MyOwnPrivateStree 5\n31415 Pi-Ville'
response = self.client.post(reverse('account-signup'), post)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email=post['user_email'])
self.assertEqual(user.first_name, post['first_name'])
self.assertEqual(user.last_name, post['last_name'])
self.assertEqual(user.address, post['address'])
self.assertEqual(user.organization, post['organization'])
self.assertEqual(mail.outbox[0].to[0], post['user_email'])
# sign up with email that is not confirmed
response = self.client.post(reverse('account-signup'), post)
self.assertTrue(response.status_code, 400)
# sign up with email that is confirmed
message = mail.outbox[0]
match = re.search('/%d/(\w+)/' % user.pk, message.body)
response = self.client.get(reverse('account-confirm',
kwargs={'user_id': user.pk,
'secret': match.group(1)}))
self.assertEqual(response.status_code, 302)
self.client.logout()
user = User.objects.get(id=user.pk)
self.assertTrue(user.is_active)
response = self.client.post(reverse('account-signup'), post)
self.assertTrue(response.status_code, 400)
def test_overlong_name_signup(self):
post = {
"first_name": "Horst" * 6 + 'a',
"last_name": "Porst" * 6,
"terms": "on",
"user_email": '[email protected]',
"address": 'MyOwnPrivateStree 5\n31415 Pi-Ville'
}
self.client.logout()
response = self.client.post(reverse('account-signup'), post)
self.assertEqual(response.status_code, 400)
post['first_name'] = post['first_name'][:-1]
response = self.client.post(reverse('account-signup'), post)
self.assertEqual(response.status_code, 302)
def test_signup_same_name(self):
self.client.logout()
post = {
"first_name": "Horst",
"last_name": "Porst",
"terms": "on",
"user_email": '[email protected]',
"address": 'MyOwnPrivateStree 5\n31415 Pi-Ville'
}
response = self.client.post(reverse('account-signup'), post)
self.assertEqual(response.status_code, 302)
post['user_email'] = '[email protected]'
response = self.client.post(reverse('account-signup'), post)
self.assertEqual(response.status_code, 302)
user = User.objects.get(email='[email protected]')
self.assertEqual(user.username, 'h.porst_1')
def test_confirmation_process(self):
self.client.logout()
user, password = AccountManager.create_user(first_name=u"Stefan",
last_name=u"Wehrmeyer", user_email="[email protected]",
address=u"SomeRandomAddress\n11234 Bern", private=True)
AccountManager(user).send_confirmation_mail(password=password)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
match = re.search('/%d/(\w+)/' % user.pk, message.body)
response = self.client.get(reverse('account-confirm',
kwargs={'user_id': user.pk,
'secret': match.group(1)}))
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('account-show'), response.url)
response = self.client.get(response.url)
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('account-show'))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('account-confirm',
kwargs={'user_id': user.pk,
'secret': 'a' * 32}))
self.assertEqual(response.status_code, 302)
self.client.logout()
response = self.client.get(reverse('account-confirm',
kwargs={'user_id': user.pk,
'secret': match.group(1)}))
# user is already active, link does not exist
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, reverse('account-login'))
# deactivate user
user = User.objects.get(pk=user.pk)
user.is_active = False
# set last_login back artificially so it's not the same
# as in secret link
user.last_login = user.last_login - datetime.timedelta(seconds=10)
user.save()
response = self.client.get(reverse('account-confirm',
kwargs={'user_id': user.pk,
'secret': match.group(1)}))
# user is inactive, but link was already used
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('account-login'), response.url)
def test_next_link_login(self):
mes = FoiMessage.objects.all()[0]
url = mes.get_absolute_url()
enc_url = url.replace('#', '%23') # FIX: fake uri encode
response = self.client.get(reverse('account-login') + '?next=%s' % enc_url)
# occurences in hidden inputs of login, signup and forgotten password
self.assertTrue(response.content.decode('utf-8').count(url), 3)
response = self.client.post(reverse('account-login'),
{"email": "[email protected]",
'next': url,
"password": "froide"})
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith(url))
def test_next_link_signup(self):
self.client.logout()
mail.outbox = []
mes = FoiMessage.objects.all()[0]
url = mes.get_absolute_url()
post = {
"first_name": "Horst",
"last_name": "Porst",
"terms": "on",
'user_email': '[email protected]',
'address': 'MyOwnPrivateStree 5\n31415 Pi-Ville',
'next': url
}
response = self.client.post(reverse('account-signup'), post)
self.assertTrue(response.status_code, 302)
user = User.objects.get(email=post['user_email'])
message = mail.outbox[0]
match = re.search('/%d/(\w+)/' % user.pk, message.body)
response = self.client.get(reverse('account-confirm',
kwargs={'user_id': user.pk,
'secret': match.group(1)}))
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith(url))
def test_change_password(self):
response = self.client.get(reverse('account-change_password'))
self.assertEqual(response.status_code, 405)
data = {"new_password1": "froide1",
"new_password2": "froide2"}
response = self.client.post(reverse('account-change_password'), data)
self.assertEqual(response.status_code, 403)
ok = self.client.login(username='sw', password='froide')
response = self.client.post(reverse('account-change_password'), data)
self.assertEqual(response.status_code, 400)
data["new_password2"] = "froide1"
response = self.client.post(reverse('account-change_password'), data)
self.assertEqual(response.status_code, 302)
self.client.logout()
ok = self.client.login(username='sw', password='froide')
self.assertFalse(ok)
ok = self.client.login(username='sw', password='froide1')
self.assertTrue(ok)
def test_send_reset_password_link(self):
mail.outbox = []
response = self.client.get(reverse('account-send_reset_password_link'))
self.assertEqual(response.status_code, 405)
ok = self.client.login(username='sw', password='froide')
data = {"email": "[email protected]"}
response = self.client.post(reverse('account-send_reset_password_link'))
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
self.client.logout()
response = self.client.post(reverse('account-send_reset_password_link'), data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
data['email'] = '[email protected]'
response = self.client.post(reverse('account-send_reset_password_link'), data)
self.assertEqual(response.status_code, 302)
message = mail.outbox[0]
match = re.search('/account/reset/([^/]+)/([^/]+)/', message.body)
uidb64 = match.group(1)
token = match.group(2)
response = self.client.get(reverse('account-password_reset_confirm',
kwargs={"uidb64": uidb64, "token": "2y1-d0b8c8b186fdc63ccc6"}))
self.assertEqual(response.status_code, 200)
self.assertFalse(response.context['validlink'])
response = self.client.get(reverse('account-password_reset_confirm',
kwargs={"uidb64": uidb64, "token": token}))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['validlink'])
data = {"new_password1": "froide4",
"new_password2": "froide4"}
response = self.client.post(reverse('account-password_reset_confirm',
kwargs={"uidb64": uidb64, "token": token}), data)
self.assertEqual(response.status_code, 302)
# we are already logged in after redirect
# due to extra magic in wrapping view
response = self.client.get(reverse('account-show'))
self.assertEqual(response.status_code, 200)
self.client.logout()
ok = self.client.login(username='sw', password='froide4')
self.assertTrue(ok)
def test_next_password_reset(self):
mail.outbox = []
mes = FoiMessage.objects.all()[0]
url = mes.get_absolute_url()
data = {
'email': '[email protected]',
'next': url
}
response = self.client.post(reverse('account-send_reset_password_link'), data)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith(url))
message = mail.outbox[0]
match = re.search('/account/reset/([^/]+)/([^/]+)/', message.body)
uidb64 = match.group(1)
token = match.group(2)
response = self.client.get(reverse('account-password_reset_confirm',
kwargs={"uidb64": uidb64, "token": token}))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['validlink'])
data = {"new_password1": "froide4",
"new_password2": "froide4"}
response = self.client.post(reverse('account-password_reset_confirm',
kwargs={"uidb64": uidb64, "token": token}), data)
self.assertEqual(response.status_code, 302)
self.assertTrue(response.url.endswith(url))
def test_private_name(self):
user = User.objects.get(username="dummy")
user.private = True
user.save()
self.client.login(username='dummy', password='froide')
pb = PublicBody.objects.all()[0]
post = {"subject": "Request - Private name",
"body": "This is a test body",
"public": "on",
"law": pb.default_law.pk}
response = self.client.post(reverse('foirequest-submit_request',
kwargs={"public_body": pb.slug}), post)
self.assertEqual(response.status_code, 302)
req = FoiRequest.objects.filter(user=user, public_body=pb).order_by("-id")[0]
self.client.logout() # log out to remove Account link
response = self.client.get(reverse('foirequest-show',
kwargs={"slug": req.slug}))
self.assertEqual(response.status_code, 200)
self.assertNotIn(user.get_full_name().encode("utf-8"),
response.content)
self.assertNotIn(user.last_name.encode("utf-8"),
response.content)
self.assertEqual('', user.get_absolute_url())
def test_change_address(self):
data = {}
response = self.client.post(reverse('account-change_address'), data)
self.assertEqual(response.status_code, 403)
ok = self.client.login(username='sw', password='froide')
self.assertTrue(ok)
response = self.client.post(reverse('account-change_address'), data)
self.assertEqual(response.status_code, 400)
data["address"] = ""
response = self.client.post(reverse('account-change_address'), data)
self.assertEqual(response.status_code, 400)
data["address"] = "Some Value"
response = self.client.post(reverse('account-change_address'), data)
self.assertEqual(response.status_code, 302)
user = User.objects.get(username='sw')
self.assertEqual(user.address, data['address'])
def test_go(self):
user = User.objects.get(username='dummy')
other_user = User.objects.get(username='sw')
# test url is not cached and does not cause 404
test_url = reverse('foirequest-make_request')
# Try logging in via link: success
autologin = user.get_autologin_url(test_url)
response = self.client.get(autologin)
self.assertEqual(response.status_code, 302)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'], user)
self.assertTrue(response.context['user'].is_authenticated())
self.client.logout()
# Try logging in via link: other user is authenticated
ok = self.client.login(username='sw', password='froide')
self.assertTrue(ok)
autologin = user.get_autologin_url(test_url)
response = self.client.get(autologin)
self.assertEqual(response.status_code, 302)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['user'], other_user)
self.assertTrue(response.context['user'].is_authenticated())
self.client.logout()
# Try logging in via link: user not active
autologin = user.get_autologin_url(test_url)
user.is_active = False
user.save()
response = self.client.get(autologin)
self.assertEqual(response.status_code, 404)
response = self.client.get(test_url)
self.assertTrue(response.context['user'].is_anonymous())
# Try logging in via link: wrong user id
autologin = reverse('account-go', kwargs=dict(
user_id='80000', secret='a' * 32, url=test_url
))
response = self.client.get(autologin)
self.assertEqual(response.status_code, 404)
response = self.client.get(test_url)
self.assertTrue(response.context['user'].is_anonymous())
user.is_active = True
user.save()
# Try logging in via link: wrong secret
autologin = reverse('account-go', kwargs=dict(
user_id=str(user.id), secret='a' * 32, url=test_url
))
response = self.client.get(autologin)
self.assertEqual(response.status_code, 302)
response = self.client.get(test_url)
self.assertTrue(response.context['user'].is_anonymous())
def test_profile_page(self):
user = User.objects.get(username='sw')
response = self.client.get(reverse('account-profile',
kwargs={'slug': user.username}))
self.assertEqual(response.status_code, 200)
user2 = factories.UserFactory.create()
user2.private = True
user2.save()
response = self.client.get(reverse('account-profile',
kwargs={'slug': user2.username}))
self.assertEqual(response.status_code, 404)
def test_change_email(self):
mail.outbox = []
new_email = '[email protected]'
user = User.objects.get(username='sw')
response = self.client.post(reverse('account-change_email'),
{
'email': 'not-email',
}
)
self.assertEqual(response.status_code, 403)
self.assertEqual(len(mail.outbox), 0)
self.client.login(username='sw', password='froide')
response = self.client.post(reverse('account-change_email'),
{
'email': 'not-email',
}
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
response = self.client.post(reverse('account-change_email'),
{
'email': user.email
}
)
self.assertEqual(response.status_code, 400)
response = self.client.post(reverse('account-change_email'),
{
'email': new_email,
}
)
self.assertEqual(response.status_code, 302)
user = User.objects.get(pk=user.pk)
self.assertNotEqual(user.email, new_email)
self.assertEqual(len(mail.outbox), 1)
url_kwargs = {
"user_id": user.pk,
"secret": 'f' * 32,
"email": new_email
}
url = '%s?%s' % (
reverse('account-change_email'),
urlencode(url_kwargs)
)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
user = User.objects.get(pk=user.pk)
self.assertNotEqual(user.email, new_email)
email = mail.outbox[0]
self.assertEqual(email.to[0], new_email)
match = re.search(r'https?\://[^/]+(/.*)', email.body)
url = match.group(1)
bad_url = url.replace('user_id=%d' % user.pk, 'user_id=999999')
response = self.client.get(bad_url)
self.assertEqual(response.status_code, 302)
user = User.objects.get(pk=user.pk)
self.assertNotEqual(user.email, new_email)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
user = User.objects.get(pk=user.pk)
self.assertEqual(user.email, new_email)
def test_account_delete(self):
response = self.client.get(reverse('account-settings'))
self.assertEqual(response.status_code, 302)
response = self.client.post(reverse('account-delete_account'),
{
'password': 'froide',
'confirmation': 'Freedom of Information Act'
}
)
self.assertEqual(response.status_code, 403)
user = User.objects.get(username='sw')
self.client.login(username='sw', password='froide')
response = self.client.get(reverse('account-settings'))
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('account-delete_account'),
{
'password': 'bad-password',
'confirmation': 'Freedom of Information Act'
}
)
self.assertEqual(response.status_code, 400)
response = self.client.post(reverse('account-delete_account'),
{
'password': 'froide',
'confirmation': 'Strange Information Act'
}
)
self.assertEqual(response.status_code, 400)
response = self.client.post(reverse('account-delete_account'),
{
'password': 'froide',
'confirmation': 'Freedom of Information Act'
}
)
self.assertEqual(response.status_code, 302)
user = User.objects.get(pk=user.pk)
self.assertEqual(user.first_name, '')
self.assertEqual(user.last_name, '')
self.assertEqual(user.email, '')
self.assertEqual(user.username, 'u%s' % user.pk)
self.assertEqual(user.address, '')
self.assertEqual(user.organization, '')
self.assertEqual(user.organization_url, '')
self.assertTrue(user.private)
def test_merge_account(self):
from froide.foirequestfollower.models import FoiRequestFollower
from froide.foirequestfollower.tests import FoiRequestFollowerFactory
new_user = factories.UserFactory.create()
new_req = factories.FoiRequestFactory.create()
req = FoiRequest.objects.all()[0]
old_user = req.user
FoiRequestFollowerFactory.create(
user=new_user,
request=new_req
)
FoiRequestFollowerFactory.create(
user=old_user,
request=new_req
)
mes = req.messages
self.assertEqual(mes[0].sender_user, old_user)
merge_accounts(old_user, new_user)
self.assertEqual(1,
FoiRequestFollower.objects.filter(request=new_req).count())
req = FoiRequest.objects.get(pk=req.pk)
mes = req.messages
self.assertEqual(req.user, new_user)
self.assertEqual(mes[0].sender_user, new_user)
def test_send_mass_mail(self):
from froide.account.management.commands.send_mass_mail import Command
user_count = User.objects.all().count()
mail.outbox = []
command = Command()
subject, content = 'Test', 'Testing-Content'
list(command.send_mail(subject, content))
self.assertEqual(len(mail.outbox), user_count)
class AdminActionTest(TestCase):
def setUp(self):
self.site = factories.make_world()
self.admin_site = AdminSite()
self.user_admin = UserAdmin(User, self.admin_site)
self.factory = RequestFactory()
self.user = User.objects.get(username='sw')
self.user.is_superuser = True
def test_send_mail(self):
users = User.objects.all()
req = self.factory.post('/', {})
req.user = self.user
result = self.user_admin.send_mail(req, users)
self.assertEqual(result.status_code, 200)
req = self.factory.post('/', {
'subject': 'Test',
'body': '^{name}|{first_name}|{last_name}|'
})
req.user = self.user
req._messages = default_storage(req)
mail.outbox = []
result = self.user_admin.send_mail(req, users)
self.assertIsNone(result)
self.assertEqual(len(mail.outbox), users.count())
message = mail.outbox[0]
user = users[0]
self.assertIn('|%s|' % user.first_name, message.body)
self.assertIn('|%s|' % user.last_name, message.body)
self.assertIn('^%s|' % user.get_full_name(), message.body)
| mit | 6,879,809,669,334,446,000 | 41.06793 | 86 | 0.606144 | false |
openslack/openslack-crawler | scripts/stackoverflow.py | 1 | 2266 | import datetime
import time, pymongo
from elasticsearch import Elasticsearch
from django.utils.html import strip_tags
es = Elasticsearch([{'host': "192.168.0.107"}])
conn = pymongo.MongoClient("192.168.0.107", 27017)
stackoverflowdb = conn.stackoverflow
def save_es(items):
index = "it"
_type = "stackoverflow_questions"
from elasticsearch.helpers import bulk
es.indices.create(index=index, body={
'settings': {
'number_of_shards': 4,
'number_of_replicas': 0,
},
"mappings": {
_type: {
"_all": {
"analyzer": "ik_smart",
"search_analyzer": "ik_smart",
"term_vector": "no",
"store": "false"
},
"properties": {
'body': {
'type': 'string',
'analyzer': 'ik_smart'
},
'title': {
'type': 'string',
'analyzer': 'ik_smart'
},
}
}
}
},ignore=400)
bulk_items=[]
i=0
for item in items:
now = datetime.datetime.now() - datetime.timedelta(hours=8)
body = {"@timestamp": now}
_id = str(item["_id"])
del item["_id"]
body.update(item)
if not es.exists(index=index, doc_type=_type, id=_id):
es.index(index=index, doc_type=_type, body=body, ignore=400, timestamp=now, id=_id, request_timeout=30)
i += 1
print i
# bulk_items.append({'_type': 'stackoverflow',
# '_id': _id,
# '_source': body,
# })
# if i == 10000:
# success, _ = bulk(es, bulk_items, index='it', raise_on_error=True, request_timeout=1800)
# print('Performed %d actions' % success)
# bulk_items = []
# i = 0
# es.indices.refresh(index=index)
if __name__ == '__main__':
# es.indices.delete("it",ignore=400)
items = stackoverflowdb.question.find({},{"body_markdown":0})
save_es(items)
| apache-2.0 | 6,205,382,843,209,666,000 | 32.820896 | 115 | 0.44925 | false |
JulyKikuAkita/PythonPrac | cs15211/SubarrayProductLessThanK.py | 1 | 3568 | __source__ = 'https://leetcode.com/problems/subarray-product-less-than-k/'
# Time: O(N)
# Space: O(1)
#
# Description: Leetcode # 713. Subarray Product Less Than K
#
# Your are given an array of positive integers nums.
#
# Count and print the number of (contiguous) subarrays
# where the product of all the elements in the subarray is less than k.
#
# Example 1:
# Input: nums = [10, 5, 2, 6], k = 100
# Output: 8
# Explanation: The 8 subarrays that have product less than 100 are:
# [10], [5], [2], [6], [10, 5], [5, 2], [2, 6], [5, 2, 6].
# Note that [10, 5, 2] is not included as the product of 100 is not strictly less than k.
# Note:
#
# 0 < nums.length <= 50000.
# 0 < nums[i] < 1000.
# 0 <= k < 10^6.
#
import unittest
# 284ms 29.27%
class Solution(object):
def numSubarrayProductLessThanK(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if k <= 1: return 0
prod = 1
ans = left = 0
for right, val in enumerate(nums):
prod *= val
while prod >= k:
prod /= nums[left]
left += 1
if prod < k:
ans += right - left + 1
return ans
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/subarray-product-less-than-k/solution/
Approach #2: Sliding Window [Accepted]
Time Complexity: O(N), where N is the length of nums. left can only be incremented at most N times.
Space Complexity: O(1), the space used by prod, left, and ans.
# 18ms 38.88%
class Solution {
public int numSubarrayProductLessThanK(int[] nums, int k) {
if ( k <= 1) return 0;
int prod = 1, ans = 0, left = 0;
for (int right = 0; right < nums.length; right++) {
prod *= nums[right];
while (prod >= k) prod /= nums[left++];
ans += right - left + 1;
}
return ans;
}
}
# only for reference
Because \log(\prod_i x_i) = \sum_i \log x_i, we can reduce the problem to subarray sums instead of subarray products.
Algorithm
After this transformation where every value x becomes log(x),
let us take prefix sums prefix[i+1] = nums[0] + nums[1] + ... + nums[i].
Now we are left with the problem of finding, for each i,
the largest j so that nums[i] + ... + nums[j] = prefix[j] - prefix[i] < k.
Because prefix is a monotone increasing array, this can be solved with binary search.
We add the width of the interval [i, j] to our answer, which counts all subarrays [i, k] with k <= j.
Complexity Analysis
Time Complexity: O(NlogN), where N is the length of nums.
Inside our for loop, each binary search operation takes O(logN) time.
Space Complexity: O(N), the space used by prefix.
# 89ms 0.98%
class Solution {
public int numSubarrayProductLessThanK(int[] nums, int k) {
if (k == 0) return 0;
double logk = Math.log(k);
double[] prefix = new double[nums.length + 1];
for (int i = 0; i < nums.length; i++) {
prefix[i+1] = prefix[i] + Math.log(nums[i]);
}
int ans = 0;
for (int i = 0; i < prefix.length; i++) {
int lo = i + 1, hi = prefix.length;
while (lo < hi) {
int mi = lo + (hi - lo) / 2;
if (prefix[mi] < prefix[i] + logk - 1e-9) lo = mi + 1;
else hi = mi;
}
ans += lo - i - 1;
}
return ans;
}
}
'''
| apache-2.0 | 8,405,352,198,940,700,000 | 30.575221 | 117 | 0.57343 | false |
crsmithdev/arrow | arrow/locales.py | 1 | 115090 | """Provides internationalization for arrow in over 60 languages and dialects."""
import sys
from math import trunc
from typing import (
Any,
ClassVar,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
if sys.version_info < (3, 8): # pragma: no cover
from typing_extensions import Literal
else:
from typing import Literal # pragma: no cover
TimeFrameLiteral = Literal[
"now",
"second",
"seconds",
"minute",
"minutes",
"hour",
"hours",
"day",
"days",
"week",
"weeks",
"month",
"months",
"year",
"years",
"2-hours",
"2-days",
"2-weeks",
"2-months",
"2-years",
]
_TimeFrameElements = Union[
str, Sequence[str], Mapping[str, str], Mapping[str, Sequence[str]]
]
_locale_map: Dict[str, Type["Locale"]] = dict()
def get_locale(name: str) -> "Locale":
"""Returns an appropriate :class:`Locale <arrow.locales.Locale>`
corresponding to an input locale name.
:param name: the name of the locale.
"""
normalized_locale_name = name.lower().replace("_", "-")
locale_cls = _locale_map.get(normalized_locale_name)
if locale_cls is None:
raise ValueError(f"Unsupported locale {normalized_locale_name!r}.")
return locale_cls()
def get_locale_by_class_name(name: str) -> "Locale":
"""Returns an appropriate :class:`Locale <arrow.locales.Locale>`
corresponding to an locale class name.
:param name: the name of the locale class.
"""
locale_cls: Optional[Type[Locale]] = globals().get(name)
if locale_cls is None:
raise ValueError(f"Unsupported locale {name!r}.")
return locale_cls()
class Locale:
"""Represents locale-specific data and functionality."""
names: ClassVar[List[str]] = []
timeframes: ClassVar[Mapping[TimeFrameLiteral, _TimeFrameElements]] = {
"now": "",
"second": "",
"seconds": "",
"minute": "",
"minutes": "",
"hour": "",
"hours": "",
"day": "",
"days": "",
"week": "",
"weeks": "",
"month": "",
"months": "",
"year": "",
"years": "",
}
meridians: ClassVar[Dict[str, str]] = {"am": "", "pm": "", "AM": "", "PM": ""}
past: ClassVar[str]
future: ClassVar[str]
and_word: ClassVar[Optional[str]] = None
month_names: ClassVar[List[str]] = []
month_abbreviations: ClassVar[List[str]] = []
day_names: ClassVar[List[str]] = []
day_abbreviations: ClassVar[List[str]] = []
ordinal_day_re: ClassVar[str] = r"(\d+)"
_month_name_to_ordinal: Optional[Dict[str, int]]
def __init_subclass__(cls, **kwargs: Any) -> None:
for locale_name in cls.names:
if locale_name in _locale_map:
raise LookupError(f"Duplicated locale name: {locale_name}")
_locale_map[locale_name.lower().replace("_", "-")] = cls
def __init__(self) -> None:
self._month_name_to_ordinal = None
def describe(
self,
timeframe: TimeFrameLiteral,
delta: Union[float, int] = 0,
only_distance: bool = False,
) -> str:
"""Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
humanized = self._format_timeframe(timeframe, delta)
if not only_distance:
humanized = self._format_relative(humanized, timeframe, delta)
return humanized
def describe_multi(
self,
timeframes: Sequence[Tuple[TimeFrameLiteral, Union[int, float]]],
only_distance: bool = False,
) -> str:
"""Describes a delta within multiple timeframes in plain language.
:param timeframes: a list of string, quantity pairs each representing a timeframe and delta.
:param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords
"""
parts = [
self._format_timeframe(timeframe, delta) for timeframe, delta in timeframes
]
if self.and_word:
parts.insert(-1, self.and_word)
humanized = " ".join(parts)
if not only_distance:
humanized = self._format_relative(humanized, *timeframes[-1])
return humanized
def day_name(self, day: int) -> str:
"""Returns the day name for a specified day of the week.
:param day: the ``int`` day of the week (1-7).
"""
return self.day_names[day]
def day_abbreviation(self, day: int) -> str:
"""Returns the day abbreviation for a specified day of the week.
:param day: the ``int`` day of the week (1-7).
"""
return self.day_abbreviations[day]
def month_name(self, month: int) -> str:
"""Returns the month name for a specified month of the year.
:param month: the ``int`` month of the year (1-12).
"""
return self.month_names[month]
def month_abbreviation(self, month: int) -> str:
"""Returns the month abbreviation for a specified month of the year.
:param month: the ``int`` month of the year (1-12).
"""
return self.month_abbreviations[month]
def month_number(self, name: str) -> Optional[int]:
"""Returns the month number for a month specified by name or abbreviation.
:param name: the month name or abbreviation.
"""
if self._month_name_to_ordinal is None:
self._month_name_to_ordinal = self._name_to_ordinal(self.month_names)
self._month_name_to_ordinal.update(
self._name_to_ordinal(self.month_abbreviations)
)
return self._month_name_to_ordinal.get(name)
def year_full(self, year: int) -> str:
"""Returns the year for specific locale if available
:param year: the ``int`` year (4-digit)
"""
return f"{year:04d}"
def year_abbreviation(self, year: int) -> str:
"""Returns the year for specific locale if available
:param year: the ``int`` year (4-digit)
"""
return f"{year:04d}"[2:]
def meridian(self, hour: int, token: Any) -> Optional[str]:
"""Returns the meridian indicator for a specified hour and format token.
:param hour: the ``int`` hour of the day.
:param token: the format token.
"""
if token == "a":
return self.meridians["am"] if hour < 12 else self.meridians["pm"]
if token == "A":
return self.meridians["AM"] if hour < 12 else self.meridians["PM"]
return None
def ordinal_number(self, n: int) -> str:
"""Returns the ordinal format of a given integer
:param n: an integer
"""
return self._ordinal_number(n)
def _ordinal_number(self, n: int) -> str:
return f"{n}"
def _name_to_ordinal(self, lst: Sequence[str]) -> Dict[str, int]:
return {elem.lower(): i for i, elem in enumerate(lst[1:], 1)}
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
# TODO: remove cast
return cast(str, self.timeframes[timeframe]).format(trunc(abs(delta)))
def _format_relative(
self,
humanized: str,
timeframe: TimeFrameLiteral,
delta: Union[float, int],
) -> str:
if timeframe == "now":
return humanized
direction = self.past if delta < 0 else self.future
return direction.format(humanized)
class EnglishLocale(Locale):
names = [
"en",
"en-us",
"en-gb",
"en-au",
"en-be",
"en-jp",
"en-za",
"en-ca",
"en-ph",
]
past = "{0} ago"
future = "in {0}"
and_word = "and"
timeframes = {
"now": "just now",
"second": "a second",
"seconds": "{0} seconds",
"minute": "a minute",
"minutes": "{0} minutes",
"hour": "an hour",
"hours": "{0} hours",
"day": "a day",
"days": "{0} days",
"week": "a week",
"weeks": "{0} weeks",
"month": "a month",
"months": "{0} months",
"year": "a year",
"years": "{0} years",
}
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
month_names = [
"",
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
day_names = [
"",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
ordinal_day_re = r"((?P<value>[2-3]?1(?=st)|[2-3]?2(?=nd)|[2-3]?3(?=rd)|[1-3]?[04-9](?=th)|1[1-3](?=th))(st|nd|rd|th))"
def _ordinal_number(self, n: int) -> str:
if n % 100 not in (11, 12, 13):
remainder = abs(n) % 10
if remainder == 1:
return f"{n}st"
elif remainder == 2:
return f"{n}nd"
elif remainder == 3:
return f"{n}rd"
return f"{n}th"
def describe(
self,
timeframe: TimeFrameLiteral,
delta: Union[int, float] = 0,
only_distance: bool = False,
) -> str:
"""Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
humanized = super().describe(timeframe, delta, only_distance)
if only_distance and timeframe == "now":
humanized = "instantly"
return humanized
class ItalianLocale(Locale):
names = ["it", "it-it"]
past = "{0} fa"
future = "tra {0}"
and_word = "e"
timeframes = {
"now": "adesso",
"second": "un secondo",
"seconds": "{0} qualche secondo",
"minute": "un minuto",
"minutes": "{0} minuti",
"hour": "un'ora",
"hours": "{0} ore",
"day": "un giorno",
"days": "{0} giorni",
"week": "una settimana,",
"weeks": "{0} settimane",
"month": "un mese",
"months": "{0} mesi",
"year": "un anno",
"years": "{0} anni",
}
month_names = [
"",
"gennaio",
"febbraio",
"marzo",
"aprile",
"maggio",
"giugno",
"luglio",
"agosto",
"settembre",
"ottobre",
"novembre",
"dicembre",
]
month_abbreviations = [
"",
"gen",
"feb",
"mar",
"apr",
"mag",
"giu",
"lug",
"ago",
"set",
"ott",
"nov",
"dic",
]
day_names = [
"",
"lunedì",
"martedì",
"mercoledì",
"giovedì",
"venerdì",
"sabato",
"domenica",
]
day_abbreviations = ["", "lun", "mar", "mer", "gio", "ven", "sab", "dom"]
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=[ºª]))[ºª])"
def _ordinal_number(self, n: int) -> str:
return f"{n}º"
class SpanishLocale(Locale):
names = ["es", "es-es"]
past = "hace {0}"
future = "en {0}"
and_word = "y"
timeframes = {
"now": "ahora",
"second": "un segundo",
"seconds": "{0} segundos",
"minute": "un minuto",
"minutes": "{0} minutos",
"hour": "una hora",
"hours": "{0} horas",
"day": "un día",
"days": "{0} días",
"week": "una semana",
"weeks": "{0} semanas",
"month": "un mes",
"months": "{0} meses",
"year": "un año",
"years": "{0} años",
}
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
month_names = [
"",
"enero",
"febrero",
"marzo",
"abril",
"mayo",
"junio",
"julio",
"agosto",
"septiembre",
"octubre",
"noviembre",
"diciembre",
]
month_abbreviations = [
"",
"ene",
"feb",
"mar",
"abr",
"may",
"jun",
"jul",
"ago",
"sep",
"oct",
"nov",
"dic",
]
day_names = [
"",
"lunes",
"martes",
"miércoles",
"jueves",
"viernes",
"sábado",
"domingo",
]
day_abbreviations = ["", "lun", "mar", "mie", "jue", "vie", "sab", "dom"]
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=[ºª]))[ºª])"
def _ordinal_number(self, n: int) -> str:
return f"{n}º"
class FrenchBaseLocale(Locale):
past = "il y a {0}"
future = "dans {0}"
and_word = "et"
timeframes = {
"now": "maintenant",
"second": "une seconde",
"seconds": "{0} secondes",
"minute": "une minute",
"minutes": "{0} minutes",
"hour": "une heure",
"hours": "{0} heures",
"day": "un jour",
"days": "{0} jours",
"week": "une semaine",
"weeks": "{0} semaines",
"month": "un mois",
"months": "{0} mois",
"year": "un an",
"years": "{0} ans",
}
month_names = [
"",
"janvier",
"février",
"mars",
"avril",
"mai",
"juin",
"juillet",
"août",
"septembre",
"octobre",
"novembre",
"décembre",
]
day_names = [
"",
"lundi",
"mardi",
"mercredi",
"jeudi",
"vendredi",
"samedi",
"dimanche",
]
day_abbreviations = ["", "lun", "mar", "mer", "jeu", "ven", "sam", "dim"]
ordinal_day_re = (
r"((?P<value>\b1(?=er\b)|[1-3]?[02-9](?=e\b)|[1-3]1(?=e\b))(er|e)\b)"
)
def _ordinal_number(self, n: int) -> str:
if abs(n) == 1:
return f"{n}er"
return f"{n}e"
class FrenchLocale(FrenchBaseLocale, Locale):
names = ["fr", "fr-fr"]
month_abbreviations = [
"",
"janv",
"févr",
"mars",
"avr",
"mai",
"juin",
"juil",
"août",
"sept",
"oct",
"nov",
"déc",
]
class FrenchCanadianLocale(FrenchBaseLocale, Locale):
names = ["fr-ca"]
month_abbreviations = [
"",
"janv",
"févr",
"mars",
"avr",
"mai",
"juin",
"juill",
"août",
"sept",
"oct",
"nov",
"déc",
]
class GreekLocale(Locale):
names = ["el", "el-gr"]
past = "{0} πριν"
future = "σε {0}"
and_word = "και"
timeframes = {
"now": "τώρα",
"second": "ένα δεύτερο",
"seconds": "{0} δευτερόλεπτα",
"minute": "ένα λεπτό",
"minutes": "{0} λεπτά",
"hour": "μία ώρα",
"hours": "{0} ώρες",
"day": "μία μέρα",
"days": "{0} μέρες",
"month": "ένα μήνα",
"months": "{0} μήνες",
"year": "ένα χρόνο",
"years": "{0} χρόνια",
}
month_names = [
"",
"Ιανουαρίου",
"Φεβρουαρίου",
"Μαρτίου",
"Απριλίου",
"Μαΐου",
"Ιουνίου",
"Ιουλίου",
"Αυγούστου",
"Σεπτεμβρίου",
"Οκτωβρίου",
"Νοεμβρίου",
"Δεκεμβρίου",
]
month_abbreviations = [
"",
"Ιαν",
"Φεβ",
"Μαρ",
"Απρ",
"Μαϊ",
"Ιον",
"Ιολ",
"Αυγ",
"Σεπ",
"Οκτ",
"Νοε",
"Δεκ",
]
day_names = [
"",
"Δευτέρα",
"Τρίτη",
"Τετάρτη",
"Πέμπτη",
"Παρασκευή",
"Σάββατο",
"Κυριακή",
]
day_abbreviations = ["", "Δευ", "Τρι", "Τετ", "Πεμ", "Παρ", "Σαβ", "Κυρ"]
class JapaneseLocale(Locale):
names = ["ja", "ja-jp"]
past = "{0}前"
future = "{0}後"
and_word = ""
timeframes = {
"now": "現在",
"second": "1秒",
"seconds": "{0}秒",
"minute": "1分",
"minutes": "{0}分",
"hour": "1時間",
"hours": "{0}時間",
"day": "1日",
"days": "{0}日",
"week": "1週間",
"weeks": "{0}週間",
"month": "1ヶ月",
"months": "{0}ヶ月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"1月",
"2月",
"3月",
"4月",
"5月",
"6月",
"7月",
"8月",
"9月",
"10月",
"11月",
"12月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日", "日曜日"]
day_abbreviations = ["", "月", "火", "水", "木", "金", "土", "日"]
class SwedishLocale(Locale):
names = ["sv", "sv-se"]
past = "för {0} sen"
future = "om {0}"
and_word = "och"
timeframes = {
"now": "just nu",
"second": "en sekund",
"seconds": "{0} sekunder",
"minute": "en minut",
"minutes": "{0} minuter",
"hour": "en timme",
"hours": "{0} timmar",
"day": "en dag",
"days": "{0} dagar",
"week": "en vecka",
"weeks": "{0} veckor",
"month": "en månad",
"months": "{0} månader",
"year": "ett år",
"years": "{0} år",
}
month_names = [
"",
"januari",
"februari",
"mars",
"april",
"maj",
"juni",
"juli",
"augusti",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"måndag",
"tisdag",
"onsdag",
"torsdag",
"fredag",
"lördag",
"söndag",
]
day_abbreviations = ["", "mån", "tis", "ons", "tor", "fre", "lör", "sön"]
class FinnishLocale(Locale):
names = ["fi", "fi-fi"]
# The finnish grammar is very complex, and its hard to convert
# 1-to-1 to something like English.
past = "{0} sitten"
future = "{0} kuluttua"
timeframes: ClassVar[Mapping[TimeFrameLiteral, List[str]]] = {
"now": ["juuri nyt", "juuri nyt"],
"second": ["sekunti", "sekunti"],
"seconds": ["{0} muutama sekunti", "{0} muutaman sekunnin"],
"minute": ["minuutti", "minuutin"],
"minutes": ["{0} minuuttia", "{0} minuutin"],
"hour": ["tunti", "tunnin"],
"hours": ["{0} tuntia", "{0} tunnin"],
"day": ["päivä", "päivä"],
"days": ["{0} päivää", "{0} päivän"],
"month": ["kuukausi", "kuukauden"],
"months": ["{0} kuukautta", "{0} kuukauden"],
"year": ["vuosi", "vuoden"],
"years": ["{0} vuotta", "{0} vuoden"],
}
# Months and days are lowercase in Finnish
month_names = [
"",
"tammikuu",
"helmikuu",
"maaliskuu",
"huhtikuu",
"toukokuu",
"kesäkuu",
"heinäkuu",
"elokuu",
"syyskuu",
"lokakuu",
"marraskuu",
"joulukuu",
]
month_abbreviations = [
"",
"tammi",
"helmi",
"maalis",
"huhti",
"touko",
"kesä",
"heinä",
"elo",
"syys",
"loka",
"marras",
"joulu",
]
day_names = [
"",
"maanantai",
"tiistai",
"keskiviikko",
"torstai",
"perjantai",
"lauantai",
"sunnuntai",
]
day_abbreviations = ["", "ma", "ti", "ke", "to", "pe", "la", "su"]
# TODO: Fix return type
def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: Union[float, int]) -> Tuple[str, str]: # type: ignore
return (
self.timeframes[timeframe][0].format(abs(delta)),
self.timeframes[timeframe][1].format(abs(delta)),
)
def _format_relative(
self,
humanized: str,
timeframe: TimeFrameLiteral,
delta: Union[float, int],
) -> str:
if timeframe == "now":
return humanized[0]
direction = self.past if delta < 0 else self.future
which = 0 if delta < 0 else 1
return direction.format(humanized[which])
def _ordinal_number(self, n: int) -> str:
return f"{n}."
class ChineseCNLocale(Locale):
names = ["zh", "zh-cn"]
past = "{0}前"
future = "{0}后"
timeframes = {
"now": "刚才",
"second": "一秒",
"seconds": "{0}秒",
"minute": "1分钟",
"minutes": "{0}分钟",
"hour": "1小时",
"hours": "{0}小时",
"day": "1天",
"days": "{0}天",
"week": "一周",
"weeks": "{0}周",
"month": "1个月",
"months": "{0}个月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"一月",
"二月",
"三月",
"四月",
"五月",
"六月",
"七月",
"八月",
"九月",
"十月",
"十一月",
"十二月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"]
day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
class ChineseTWLocale(Locale):
names = ["zh-tw"]
past = "{0}前"
future = "{0}後"
and_word = "和"
timeframes = {
"now": "剛才",
"second": "1秒",
"seconds": "{0}秒",
"minute": "1分鐘",
"minutes": "{0}分鐘",
"hour": "1小時",
"hours": "{0}小時",
"day": "1天",
"days": "{0}天",
"week": "1週",
"weeks": "{0}週",
"month": "1個月",
"months": "{0}個月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"1月",
"2月",
"3月",
"4月",
"5月",
"6月",
"7月",
"8月",
"9月",
"10月",
"11月",
"12月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "週一", "週二", "週三", "週四", "週五", "週六", "週日"]
day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
class HongKongLocale(Locale):
names = ["zh-hk"]
past = "{0}前"
future = "{0}後"
timeframes = {
"now": "剛才",
"second": "1秒",
"seconds": "{0}秒",
"minute": "1分鐘",
"minutes": "{0}分鐘",
"hour": "1小時",
"hours": "{0}小時",
"day": "1天",
"days": "{0}天",
"week": "1星期",
"weeks": "{0}星期",
"month": "1個月",
"months": "{0}個月",
"year": "1年",
"years": "{0}年",
}
month_names = [
"",
"1月",
"2月",
"3月",
"4月",
"5月",
"6月",
"7月",
"8月",
"9月",
"10月",
"11月",
"12月",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "星期一", "星期二", "星期三", "星期四", "星期五", "星期六", "星期日"]
day_abbreviations = ["", "一", "二", "三", "四", "五", "六", "日"]
class KoreanLocale(Locale):
names = ["ko", "ko-kr"]
past = "{0} 전"
future = "{0} 후"
timeframes = {
"now": "지금",
"second": "1초",
"seconds": "{0}초",
"minute": "1분",
"minutes": "{0}분",
"hour": "한시간",
"hours": "{0}시간",
"day": "하루",
"days": "{0}일",
"week": "1주",
"weeks": "{0}주",
"month": "한달",
"months": "{0}개월",
"year": "1년",
"years": "{0}년",
}
special_dayframes = {
-3: "그끄제",
-2: "그제",
-1: "어제",
1: "내일",
2: "모레",
3: "글피",
4: "그글피",
}
special_yearframes = {-2: "제작년", -1: "작년", 1: "내년", 2: "내후년"}
month_names = [
"",
"1월",
"2월",
"3월",
"4월",
"5월",
"6월",
"7월",
"8월",
"9월",
"10월",
"11월",
"12월",
]
month_abbreviations = [
"",
" 1",
" 2",
" 3",
" 4",
" 5",
" 6",
" 7",
" 8",
" 9",
"10",
"11",
"12",
]
day_names = ["", "월요일", "화요일", "수요일", "목요일", "금요일", "토요일", "일요일"]
day_abbreviations = ["", "월", "화", "수", "목", "금", "토", "일"]
def _ordinal_number(self, n: int) -> str:
ordinals = ["0", "첫", "두", "세", "네", "다섯", "여섯", "일곱", "여덟", "아홉", "열"]
if n < len(ordinals):
return f"{ordinals[n]}번째"
return f"{n}번째"
def _format_relative(
self,
humanized: str,
timeframe: TimeFrameLiteral,
delta: Union[float, int],
) -> str:
if timeframe in ("day", "days"):
special = self.special_dayframes.get(int(delta))
if special:
return special
elif timeframe in ("year", "years"):
special = self.special_yearframes.get(int(delta))
if special:
return special
return super()._format_relative(humanized, timeframe, delta)
# derived locale types & implementations.
class DutchLocale(Locale):
names = ["nl", "nl-nl"]
past = "{0} geleden"
future = "over {0}"
timeframes = {
"now": "nu",
"second": "een seconde",
"seconds": "{0} seconden",
"minute": "een minuut",
"minutes": "{0} minuten",
"hour": "een uur",
"hours": "{0} uur",
"day": "een dag",
"days": "{0} dagen",
"week": "een week",
"weeks": "{0} weken",
"month": "een maand",
"months": "{0} maanden",
"year": "een jaar",
"years": "{0} jaar",
}
# In Dutch names of months and days are not starting with a capital letter
# like in the English language.
month_names = [
"",
"januari",
"februari",
"maart",
"april",
"mei",
"juni",
"juli",
"augustus",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mrt",
"apr",
"mei",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"maandag",
"dinsdag",
"woensdag",
"donderdag",
"vrijdag",
"zaterdag",
"zondag",
]
day_abbreviations = ["", "ma", "di", "wo", "do", "vr", "za", "zo"]
class SlavicBaseLocale(Locale):
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, List[str]]]]
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, list):
if delta % 10 == 1 and delta % 100 != 11:
form = form[0]
elif 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form[1]
else:
form = form[2]
return form.format(delta)
class BelarusianLocale(SlavicBaseLocale):
names = ["be", "be-by"]
past = "{0} таму"
future = "праз {0}"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, List[str]]]] = {
"now": "зараз",
"second": "секунду",
"seconds": "{0} некалькі секунд",
"minute": "хвіліну",
"minutes": ["{0} хвіліну", "{0} хвіліны", "{0} хвілін"],
"hour": "гадзіну",
"hours": ["{0} гадзіну", "{0} гадзіны", "{0} гадзін"],
"day": "дзень",
"days": ["{0} дзень", "{0} дні", "{0} дзён"],
"month": "месяц",
"months": ["{0} месяц", "{0} месяцы", "{0} месяцаў"],
"year": "год",
"years": ["{0} год", "{0} гады", "{0} гадоў"],
}
month_names = [
"",
"студзеня",
"лютага",
"сакавіка",
"красавіка",
"траўня",
"чэрвеня",
"ліпеня",
"жніўня",
"верасня",
"кастрычніка",
"лістапада",
"снежня",
]
month_abbreviations = [
"",
"студ",
"лют",
"сак",
"крас",
"трав",
"чэрв",
"ліп",
"жнів",
"вер",
"каст",
"ліст",
"снеж",
]
day_names = [
"",
"панядзелак",
"аўторак",
"серада",
"чацвер",
"пятніца",
"субота",
"нядзеля",
]
day_abbreviations = ["", "пн", "ат", "ср", "чц", "пт", "сб", "нд"]
class PolishLocale(SlavicBaseLocale):
names = ["pl", "pl-pl"]
past = "{0} temu"
future = "za {0}"
# The nouns should be in genitive case (Polish: "dopełniacz")
# in order to correctly form `past` & `future` expressions.
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, List[str]]]] = {
"now": "teraz",
"second": "sekundę",
"seconds": ["{0} sekund", "{0} sekundy", "{0} sekund"],
"minute": "minutę",
"minutes": ["{0} minut", "{0} minuty", "{0} minut"],
"hour": "godzinę",
"hours": ["{0} godzin", "{0} godziny", "{0} godzin"],
"day": "dzień",
"days": "{0} dni",
"week": "tydzień",
"weeks": ["{0} tygodni", "{0} tygodnie", "{0} tygodni"],
"month": "miesiąc",
"months": ["{0} miesięcy", "{0} miesiące", "{0} miesięcy"],
"year": "rok",
"years": ["{0} lat", "{0} lata", "{0} lat"],
}
month_names = [
"",
"styczeń",
"luty",
"marzec",
"kwiecień",
"maj",
"czerwiec",
"lipiec",
"sierpień",
"wrzesień",
"październik",
"listopad",
"grudzień",
]
month_abbreviations = [
"",
"sty",
"lut",
"mar",
"kwi",
"maj",
"cze",
"lip",
"sie",
"wrz",
"paź",
"lis",
"gru",
]
day_names = [
"",
"poniedziałek",
"wtorek",
"środa",
"czwartek",
"piątek",
"sobota",
"niedziela",
]
day_abbreviations = ["", "Pn", "Wt", "Śr", "Czw", "Pt", "So", "Nd"]
class RussianLocale(SlavicBaseLocale):
names = ["ru", "ru-ru"]
past = "{0} назад"
future = "через {0}"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, List[str]]]] = {
"now": "сейчас",
"second": "Второй",
"seconds": "{0} несколько секунд",
"minute": "минуту",
"minutes": ["{0} минуту", "{0} минуты", "{0} минут"],
"hour": "час",
"hours": ["{0} час", "{0} часа", "{0} часов"],
"day": "день",
"days": ["{0} день", "{0} дня", "{0} дней"],
"week": "неделю",
"weeks": ["{0} неделю", "{0} недели", "{0} недель"],
"month": "месяц",
"months": ["{0} месяц", "{0} месяца", "{0} месяцев"],
"year": "год",
"years": ["{0} год", "{0} года", "{0} лет"],
}
month_names = [
"",
"января",
"февраля",
"марта",
"апреля",
"мая",
"июня",
"июля",
"августа",
"сентября",
"октября",
"ноября",
"декабря",
]
month_abbreviations = [
"",
"янв",
"фев",
"мар",
"апр",
"май",
"июн",
"июл",
"авг",
"сен",
"окт",
"ноя",
"дек",
]
day_names = [
"",
"понедельник",
"вторник",
"среда",
"четверг",
"пятница",
"суббота",
"воскресенье",
]
day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "вс"]
class AfrikaansLocale(Locale):
names = ["af", "af-nl"]
past = "{0} gelede"
future = "in {0}"
timeframes = {
"now": "nou",
"second": "n sekonde",
"seconds": "{0} sekondes",
"minute": "minuut",
"minutes": "{0} minute",
"hour": "uur",
"hours": "{0} ure",
"day": "een dag",
"days": "{0} dae",
"month": "een maand",
"months": "{0} maande",
"year": "een jaar",
"years": "{0} jaar",
}
month_names = [
"",
"Januarie",
"Februarie",
"Maart",
"April",
"Mei",
"Junie",
"Julie",
"Augustus",
"September",
"Oktober",
"November",
"Desember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mrt",
"Apr",
"Mei",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Des",
]
day_names = [
"",
"Maandag",
"Dinsdag",
"Woensdag",
"Donderdag",
"Vrydag",
"Saterdag",
"Sondag",
]
day_abbreviations = ["", "Ma", "Di", "Wo", "Do", "Vr", "Za", "So"]
class BulgarianLocale(SlavicBaseLocale):
names = ["bg", "bg-bg"]
past = "{0} назад"
future = "напред {0}"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, List[str]]]] = {
"now": "сега",
"second": "секунда",
"seconds": "{0} няколко секунди",
"minute": "минута",
"minutes": ["{0} минута", "{0} минути", "{0} минути"],
"hour": "час",
"hours": ["{0} час", "{0} часа", "{0} часа"],
"day": "ден",
"days": ["{0} ден", "{0} дни", "{0} дни"],
"month": "месец",
"months": ["{0} месец", "{0} месеца", "{0} месеца"],
"year": "година",
"years": ["{0} година", "{0} години", "{0} години"],
}
month_names = [
"",
"януари",
"февруари",
"март",
"април",
"май",
"юни",
"юли",
"август",
"септември",
"октомври",
"ноември",
"декември",
]
month_abbreviations = [
"",
"ян",
"февр",
"март",
"апр",
"май",
"юни",
"юли",
"авг",
"септ",
"окт",
"ноем",
"дек",
]
day_names = [
"",
"понеделник",
"вторник",
"сряда",
"четвъртък",
"петък",
"събота",
"неделя",
]
day_abbreviations = ["", "пон", "вт", "ср", "четв", "пет", "съб", "нед"]
class UkrainianLocale(SlavicBaseLocale):
names = ["ua", "uk-ua"]
past = "{0} тому"
future = "за {0}"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, List[str]]]] = {
"now": "зараз",
"second": "секунда",
"seconds": "{0} кілька секунд",
"minute": "хвилину",
"minutes": ["{0} хвилину", "{0} хвилини", "{0} хвилин"],
"hour": "годину",
"hours": ["{0} годину", "{0} години", "{0} годин"],
"day": "день",
"days": ["{0} день", "{0} дні", "{0} днів"],
"month": "місяць",
"months": ["{0} місяць", "{0} місяці", "{0} місяців"],
"year": "рік",
"years": ["{0} рік", "{0} роки", "{0} років"],
}
month_names = [
"",
"січня",
"лютого",
"березня",
"квітня",
"травня",
"червня",
"липня",
"серпня",
"вересня",
"жовтня",
"листопада",
"грудня",
]
month_abbreviations = [
"",
"січ",
"лют",
"бер",
"квіт",
"трав",
"черв",
"лип",
"серп",
"вер",
"жовт",
"лист",
"груд",
]
day_names = [
"",
"понеділок",
"вівторок",
"середа",
"четвер",
"п’ятниця",
"субота",
"неділя",
]
day_abbreviations = ["", "пн", "вт", "ср", "чт", "пт", "сб", "нд"]
class MacedonianLocale(SlavicBaseLocale):
names = ["mk", "mk-mk"]
past = "пред {0}"
future = "за {0}"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, List[str]]]] = {
"now": "сега",
"second": "една секунда",
"seconds": ["{0} секунда", "{0} секунди", "{0} секунди"],
"minute": "една минута",
"minutes": ["{0} минута", "{0} минути", "{0} минути"],
"hour": "еден саат",
"hours": ["{0} саат", "{0} саати", "{0} саати"],
"day": "еден ден",
"days": ["{0} ден", "{0} дена", "{0} дена"],
"week": "една недела",
"weeks": ["{0} недела", "{0} недели", "{0} недели"],
"month": "еден месец",
"months": ["{0} месец", "{0} месеци", "{0} месеци"],
"year": "една година",
"years": ["{0} година", "{0} години", "{0} години"],
}
meridians = {"am": "дп", "pm": "пп", "AM": "претпладне", "PM": "попладне"}
month_names = [
"",
"Јануари",
"Февруари",
"Март",
"Април",
"Мај",
"Јуни",
"Јули",
"Август",
"Септември",
"Октомври",
"Ноември",
"Декември",
]
month_abbreviations = [
"",
"Јан",
"Фев",
"Мар",
"Апр",
"Мај",
"Јун",
"Јул",
"Авг",
"Септ",
"Окт",
"Ноем",
"Декем",
]
day_names = [
"",
"Понеделник",
"Вторник",
"Среда",
"Четврток",
"Петок",
"Сабота",
"Недела",
]
day_abbreviations = [
"",
"Пон",
"Вт",
"Сре",
"Чет",
"Пет",
"Саб",
"Нед",
]
class GermanBaseLocale(Locale):
past = "vor {0}"
future = "in {0}"
and_word = "und"
timeframes = {
"now": "gerade eben",
"second": "einer Sekunde",
"seconds": "{0} Sekunden",
"minute": "einer Minute",
"minutes": "{0} Minuten",
"hour": "einer Stunde",
"hours": "{0} Stunden",
"day": "einem Tag",
"days": "{0} Tagen",
"week": "einer Woche",
"weeks": "{0} Wochen",
"month": "einem Monat",
"months": "{0} Monaten",
"year": "einem Jahr",
"years": "{0} Jahren",
}
timeframes_only_distance = timeframes.copy()
timeframes_only_distance["second"] = "eine Sekunde"
timeframes_only_distance["minute"] = "eine Minute"
timeframes_only_distance["hour"] = "eine Stunde"
timeframes_only_distance["day"] = "ein Tag"
timeframes_only_distance["days"] = "{0} Tage"
timeframes_only_distance["week"] = "eine Woche"
timeframes_only_distance["month"] = "ein Monat"
timeframes_only_distance["months"] = "{0} Monate"
timeframes_only_distance["year"] = "ein Jahr"
timeframes_only_distance["years"] = "{0} Jahre"
month_names = [
"",
"Januar",
"Februar",
"März",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mär",
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dez",
]
day_names = [
"",
"Montag",
"Dienstag",
"Mittwoch",
"Donnerstag",
"Freitag",
"Samstag",
"Sonntag",
]
day_abbreviations = ["", "Mo", "Di", "Mi", "Do", "Fr", "Sa", "So"]
def _ordinal_number(self, n: int) -> str:
return f"{n}."
def describe(
self,
timeframe: TimeFrameLiteral,
delta: Union[int, float] = 0,
only_distance: bool = False,
) -> str:
"""Describes a delta within a timeframe in plain language.
:param timeframe: a string representing a timeframe.
:param delta: a quantity representing a delta in a timeframe.
:param only_distance: return only distance eg: "11 seconds" without "in" or "ago" keywords
"""
if not only_distance:
return super().describe(timeframe, delta, only_distance)
# German uses a different case without 'in' or 'ago'
humanized = self.timeframes_only_distance[timeframe].format(trunc(abs(delta)))
return humanized
class GermanLocale(GermanBaseLocale, Locale):
names = ["de", "de-de"]
class SwissLocale(GermanBaseLocale, Locale):
names = ["de-ch"]
class AustrianLocale(GermanBaseLocale, Locale):
names = ["de-at"]
month_names = [
"",
"Jänner",
"Februar",
"März",
"April",
"Mai",
"Juni",
"Juli",
"August",
"September",
"Oktober",
"November",
"Dezember",
]
class NorwegianLocale(Locale):
names = ["nb", "nb-no"]
past = "for {0} siden"
future = "om {0}"
timeframes = {
"now": "nå nettopp",
"second": "ett sekund",
"seconds": "{0} sekunder",
"minute": "ett minutt",
"minutes": "{0} minutter",
"hour": "en time",
"hours": "{0} timer",
"day": "en dag",
"days": "{0} dager",
"month": "en måned",
"months": "{0} måneder",
"year": "ett år",
"years": "{0} år",
}
month_names = [
"",
"januar",
"februar",
"mars",
"april",
"mai",
"juni",
"juli",
"august",
"september",
"oktober",
"november",
"desember",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"mai",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"des",
]
day_names = [
"",
"mandag",
"tirsdag",
"onsdag",
"torsdag",
"fredag",
"lørdag",
"søndag",
]
day_abbreviations = ["", "ma", "ti", "on", "to", "fr", "lø", "sø"]
class NewNorwegianLocale(Locale):
names = ["nn", "nn-no"]
past = "for {0} sidan"
future = "om {0}"
timeframes = {
"now": "no nettopp",
"second": "eitt sekund",
"seconds": "{0} sekund",
"minute": "eitt minutt",
"minutes": "{0} minutt",
"hour": "ein time",
"hours": "{0} timar",
"day": "ein dag",
"days": "{0} dagar",
"month": "en månad",
"months": "{0} månader",
"year": "eitt år",
"years": "{0} år",
}
month_names = [
"",
"januar",
"februar",
"mars",
"april",
"mai",
"juni",
"juli",
"august",
"september",
"oktober",
"november",
"desember",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"mai",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"des",
]
day_names = [
"",
"måndag",
"tysdag",
"onsdag",
"torsdag",
"fredag",
"laurdag",
"sundag",
]
day_abbreviations = ["", "må", "ty", "on", "to", "fr", "la", "su"]
class PortugueseLocale(Locale):
names = ["pt", "pt-pt"]
past = "há {0}"
future = "em {0}"
and_word = "e"
timeframes = {
"now": "agora",
"second": "um segundo",
"seconds": "{0} segundos",
"minute": "um minuto",
"minutes": "{0} minutos",
"hour": "uma hora",
"hours": "{0} horas",
"day": "um dia",
"days": "{0} dias",
"week": "uma semana",
"weeks": "{0} semanas",
"month": "um mês",
"months": "{0} meses",
"year": "um ano",
"years": "{0} anos",
}
month_names = [
"",
"Janeiro",
"Fevereiro",
"Março",
"Abril",
"Maio",
"Junho",
"Julho",
"Agosto",
"Setembro",
"Outubro",
"Novembro",
"Dezembro",
]
month_abbreviations = [
"",
"Jan",
"Fev",
"Mar",
"Abr",
"Mai",
"Jun",
"Jul",
"Ago",
"Set",
"Out",
"Nov",
"Dez",
]
day_names = [
"",
"Segunda-feira",
"Terça-feira",
"Quarta-feira",
"Quinta-feira",
"Sexta-feira",
"Sábado",
"Domingo",
]
day_abbreviations = ["", "Seg", "Ter", "Qua", "Qui", "Sex", "Sab", "Dom"]
class BrazilianPortugueseLocale(PortugueseLocale):
names = ["pt-br"]
past = "faz {0}"
class TagalogLocale(Locale):
names = ["tl", "tl-ph"]
past = "nakaraang {0}"
future = "{0} mula ngayon"
timeframes = {
"now": "ngayon lang",
"second": "isang segundo",
"seconds": "{0} segundo",
"minute": "isang minuto",
"minutes": "{0} minuto",
"hour": "isang oras",
"hours": "{0} oras",
"day": "isang araw",
"days": "{0} araw",
"week": "isang linggo",
"weeks": "{0} linggo",
"month": "isang buwan",
"months": "{0} buwan",
"year": "isang taon",
"years": "{0} taon",
}
month_names = [
"",
"Enero",
"Pebrero",
"Marso",
"Abril",
"Mayo",
"Hunyo",
"Hulyo",
"Agosto",
"Setyembre",
"Oktubre",
"Nobyembre",
"Disyembre",
]
month_abbreviations = [
"",
"Ene",
"Peb",
"Mar",
"Abr",
"May",
"Hun",
"Hul",
"Ago",
"Set",
"Okt",
"Nob",
"Dis",
]
day_names = [
"",
"Lunes",
"Martes",
"Miyerkules",
"Huwebes",
"Biyernes",
"Sabado",
"Linggo",
]
day_abbreviations = ["", "Lun", "Mar", "Miy", "Huw", "Biy", "Sab", "Lin"]
meridians = {"am": "nu", "pm": "nh", "AM": "ng umaga", "PM": "ng hapon"}
def _ordinal_number(self, n: int) -> str:
return f"ika-{n}"
class VietnameseLocale(Locale):
names = ["vi", "vi-vn"]
past = "{0} trước"
future = "{0} nữa"
timeframes = {
"now": "hiện tại",
"second": "một giây",
"seconds": "{0} giây",
"minute": "một phút",
"minutes": "{0} phút",
"hour": "một giờ",
"hours": "{0} giờ",
"day": "một ngày",
"days": "{0} ngày",
"week": "một tuần",
"weeks": "{0} tuần",
"month": "một tháng",
"months": "{0} tháng",
"year": "một năm",
"years": "{0} năm",
}
month_names = [
"",
"Tháng Một",
"Tháng Hai",
"Tháng Ba",
"Tháng Tư",
"Tháng Năm",
"Tháng Sáu",
"Tháng Bảy",
"Tháng Tám",
"Tháng Chín",
"Tháng Mười",
"Tháng Mười Một",
"Tháng Mười Hai",
]
month_abbreviations = [
"",
"Tháng 1",
"Tháng 2",
"Tháng 3",
"Tháng 4",
"Tháng 5",
"Tháng 6",
"Tháng 7",
"Tháng 8",
"Tháng 9",
"Tháng 10",
"Tháng 11",
"Tháng 12",
]
day_names = [
"",
"Thứ Hai",
"Thứ Ba",
"Thứ Tư",
"Thứ Năm",
"Thứ Sáu",
"Thứ Bảy",
"Chủ Nhật",
]
day_abbreviations = ["", "Thứ 2", "Thứ 3", "Thứ 4", "Thứ 5", "Thứ 6", "Thứ 7", "CN"]
class TurkishLocale(Locale):
names = ["tr", "tr-tr"]
past = "{0} önce"
future = "{0} sonra"
timeframes = {
"now": "şimdi",
"second": "bir saniye",
"seconds": "{0} saniye",
"minute": "bir dakika",
"minutes": "{0} dakika",
"hour": "bir saat",
"hours": "{0} saat",
"day": "bir gün",
"days": "{0} gün",
"month": "bir ay",
"months": "{0} ay",
"year": "yıl",
"years": "{0} yıl",
}
month_names = [
"",
"Ocak",
"Şubat",
"Mart",
"Nisan",
"Mayıs",
"Haziran",
"Temmuz",
"Ağustos",
"Eylül",
"Ekim",
"Kasım",
"Aralık",
]
month_abbreviations = [
"",
"Oca",
"Şub",
"Mar",
"Nis",
"May",
"Haz",
"Tem",
"Ağu",
"Eyl",
"Eki",
"Kas",
"Ara",
]
day_names = [
"",
"Pazartesi",
"Salı",
"Çarşamba",
"Perşembe",
"Cuma",
"Cumartesi",
"Pazar",
]
day_abbreviations = ["", "Pzt", "Sal", "Çar", "Per", "Cum", "Cmt", "Paz"]
class AzerbaijaniLocale(Locale):
names = ["az", "az-az"]
past = "{0} əvvəl"
future = "{0} sonra"
timeframes = {
"now": "indi",
"second": "saniyə",
"seconds": "{0} saniyə",
"minute": "bir dəqiqə",
"minutes": "{0} dəqiqə",
"hour": "bir saat",
"hours": "{0} saat",
"day": "bir gün",
"days": "{0} gün",
"month": "bir ay",
"months": "{0} ay",
"year": "il",
"years": "{0} il",
}
month_names = [
"",
"Yanvar",
"Fevral",
"Mart",
"Aprel",
"May",
"İyun",
"İyul",
"Avqust",
"Sentyabr",
"Oktyabr",
"Noyabr",
"Dekabr",
]
month_abbreviations = [
"",
"Yan",
"Fev",
"Mar",
"Apr",
"May",
"İyn",
"İyl",
"Avq",
"Sen",
"Okt",
"Noy",
"Dek",
]
day_names = [
"",
"Bazar ertəsi",
"Çərşənbə axşamı",
"Çərşənbə",
"Cümə axşamı",
"Cümə",
"Şənbə",
"Bazar",
]
day_abbreviations = ["", "Ber", "Çax", "Çər", "Cax", "Cüm", "Şnb", "Bzr"]
class ArabicLocale(Locale):
names = [
"ar",
"ar-ae",
"ar-bh",
"ar-dj",
"ar-eg",
"ar-eh",
"ar-er",
"ar-km",
"ar-kw",
"ar-ly",
"ar-om",
"ar-qa",
"ar-sa",
"ar-sd",
"ar-so",
"ar-ss",
"ar-td",
"ar-ye",
]
past = "منذ {0}"
future = "خلال {0}"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "الآن",
"second": "ثانية",
"seconds": {"double": "ثانيتين", "ten": "{0} ثوان", "higher": "{0} ثانية"},
"minute": "دقيقة",
"minutes": {"double": "دقيقتين", "ten": "{0} دقائق", "higher": "{0} دقيقة"},
"hour": "ساعة",
"hours": {"double": "ساعتين", "ten": "{0} ساعات", "higher": "{0} ساعة"},
"day": "يوم",
"days": {"double": "يومين", "ten": "{0} أيام", "higher": "{0} يوم"},
"month": "شهر",
"months": {"double": "شهرين", "ten": "{0} أشهر", "higher": "{0} شهر"},
"year": "سنة",
"years": {"double": "سنتين", "ten": "{0} سنوات", "higher": "{0} سنة"},
}
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"مايو",
"يونيو",
"يوليو",
"أغسطس",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"مايو",
"يونيو",
"يوليو",
"أغسطس",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
day_names = [
"",
"الإثنين",
"الثلاثاء",
"الأربعاء",
"الخميس",
"الجمعة",
"السبت",
"الأحد",
]
day_abbreviations = ["", "إثنين", "ثلاثاء", "أربعاء", "خميس", "جمعة", "سبت", "أحد"]
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, Mapping):
if delta == 2:
form = form["double"]
elif 2 < delta <= 10:
form = form["ten"]
else:
form = form["higher"]
return form.format(delta)
class LevantArabicLocale(ArabicLocale):
names = ["ar-iq", "ar-jo", "ar-lb", "ar-ps", "ar-sy"]
month_names = [
"",
"كانون الثاني",
"شباط",
"آذار",
"نيسان",
"أيار",
"حزيران",
"تموز",
"آب",
"أيلول",
"تشرين الأول",
"تشرين الثاني",
"كانون الأول",
]
month_abbreviations = [
"",
"كانون الثاني",
"شباط",
"آذار",
"نيسان",
"أيار",
"حزيران",
"تموز",
"آب",
"أيلول",
"تشرين الأول",
"تشرين الثاني",
"كانون الأول",
]
class AlgeriaTunisiaArabicLocale(ArabicLocale):
names = ["ar-tn", "ar-dz"]
month_names = [
"",
"جانفي",
"فيفري",
"مارس",
"أفريل",
"ماي",
"جوان",
"جويلية",
"أوت",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
month_abbreviations = [
"",
"جانفي",
"فيفري",
"مارس",
"أفريل",
"ماي",
"جوان",
"جويلية",
"أوت",
"سبتمبر",
"أكتوبر",
"نوفمبر",
"ديسمبر",
]
class MauritaniaArabicLocale(ArabicLocale):
names = ["ar-mr"]
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"إبريل",
"مايو",
"يونيو",
"يوليو",
"أغشت",
"شتمبر",
"أكتوبر",
"نوفمبر",
"دجمبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"إبريل",
"مايو",
"يونيو",
"يوليو",
"أغشت",
"شتمبر",
"أكتوبر",
"نوفمبر",
"دجمبر",
]
class MoroccoArabicLocale(ArabicLocale):
names = ["ar-ma"]
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"ماي",
"يونيو",
"يوليوز",
"غشت",
"شتنبر",
"أكتوبر",
"نونبر",
"دجنبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"ماي",
"يونيو",
"يوليوز",
"غشت",
"شتنبر",
"أكتوبر",
"نونبر",
"دجنبر",
]
class IcelandicLocale(Locale):
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
form = self.timeframes[timeframe]
if delta < 0:
form = form[0]
elif delta > 0:
form = form[1]
# FIXME: handle when delta is 0
return form.format(abs(delta)) # type: ignore
names = ["is", "is-is"]
past = "fyrir {0} síðan"
future = "eftir {0}"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[Tuple[str, str], str]]] = {
"now": "rétt í þessu",
"second": ("sekúndu", "sekúndu"),
"seconds": ("{0} nokkrum sekúndum", "nokkrar sekúndur"),
"minute": ("einni mínútu", "eina mínútu"),
"minutes": ("{0} mínútum", "{0} mínútur"),
"hour": ("einum tíma", "einn tíma"),
"hours": ("{0} tímum", "{0} tíma"),
"day": ("einum degi", "einn dag"),
"days": ("{0} dögum", "{0} daga"),
"month": ("einum mánuði", "einn mánuð"),
"months": ("{0} mánuðum", "{0} mánuði"),
"year": ("einu ári", "eitt ár"),
"years": ("{0} árum", "{0} ár"),
}
meridians = {"am": "f.h.", "pm": "e.h.", "AM": "f.h.", "PM": "e.h."}
month_names = [
"",
"janúar",
"febrúar",
"mars",
"apríl",
"maí",
"júní",
"júlí",
"ágúst",
"september",
"október",
"nóvember",
"desember",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maí",
"jún",
"júl",
"ágú",
"sep",
"okt",
"nóv",
"des",
]
day_names = [
"",
"mánudagur",
"þriðjudagur",
"miðvikudagur",
"fimmtudagur",
"föstudagur",
"laugardagur",
"sunnudagur",
]
day_abbreviations = ["", "mán", "þri", "mið", "fim", "fös", "lau", "sun"]
class DanishLocale(Locale):
names = ["da", "da-dk"]
past = "for {0} siden"
future = "efter {0}"
and_word = "og"
timeframes = {
"now": "lige nu",
"second": "et sekund",
"seconds": "{0} et par sekunder",
"minute": "et minut",
"minutes": "{0} minutter",
"hour": "en time",
"hours": "{0} timer",
"day": "en dag",
"days": "{0} dage",
"month": "en måned",
"months": "{0} måneder",
"year": "et år",
"years": "{0} år",
}
month_names = [
"",
"januar",
"februar",
"marts",
"april",
"maj",
"juni",
"juli",
"august",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"mandag",
"tirsdag",
"onsdag",
"torsdag",
"fredag",
"lørdag",
"søndag",
]
day_abbreviations = ["", "man", "tir", "ons", "tor", "fre", "lør", "søn"]
class MalayalamLocale(Locale):
names = ["ml"]
past = "{0} മുമ്പ്"
future = "{0} ശേഷം"
timeframes = {
"now": "ഇപ്പോൾ",
"second": "ഒരു നിമിഷം",
"seconds": "{0} സെക്കന്റ്",
"minute": "ഒരു മിനിറ്റ്",
"minutes": "{0} മിനിറ്റ്",
"hour": "ഒരു മണിക്കൂർ",
"hours": "{0} മണിക്കൂർ",
"day": "ഒരു ദിവസം ",
"days": "{0} ദിവസം ",
"month": "ഒരു മാസം ",
"months": "{0} മാസം ",
"year": "ഒരു വർഷം ",
"years": "{0} വർഷം ",
}
meridians = {
"am": "രാവിലെ",
"pm": "ഉച്ചക്ക് ശേഷം",
"AM": "രാവിലെ",
"PM": "ഉച്ചക്ക് ശേഷം",
}
month_names = [
"",
"ജനുവരി",
"ഫെബ്രുവരി",
"മാർച്ച്",
"ഏപ്രിൽ ",
"മെയ് ",
"ജൂണ്",
"ജൂലൈ",
"ഓഗസ്റ്റ്",
"സെപ്റ്റംബർ",
"ഒക്ടോബർ",
"നവംബർ",
"ഡിസംബർ",
]
month_abbreviations = [
"",
"ജനു",
"ഫെബ് ",
"മാർ",
"ഏപ്രിൽ",
"മേയ്",
"ജൂണ്",
"ജൂലൈ",
"ഓഗസ്റ",
"സെപ്റ്റ",
"ഒക്ടോ",
"നവം",
"ഡിസം",
]
day_names = ["", "തിങ്കള്", "ചൊവ്വ", "ബുധന്", "വ്യാഴം", "വെള്ളി", "ശനി", "ഞായര്"]
day_abbreviations = [
"",
"തിങ്കള്",
"ചൊവ്വ",
"ബുധന്",
"വ്യാഴം",
"വെള്ളി",
"ശനി",
"ഞായര്",
]
class HindiLocale(Locale):
names = ["hi"]
past = "{0} पहले"
future = "{0} बाद"
timeframes = {
"now": "अभी",
"second": "एक पल",
"seconds": "{0} सेकंड्",
"minute": "एक मिनट ",
"minutes": "{0} मिनट ",
"hour": "एक घंटा",
"hours": "{0} घंटे",
"day": "एक दिन",
"days": "{0} दिन",
"month": "एक माह ",
"months": "{0} महीने ",
"year": "एक वर्ष ",
"years": "{0} साल ",
}
meridians = {"am": "सुबह", "pm": "शाम", "AM": "सुबह", "PM": "शाम"}
month_names = [
"",
"जनवरी",
"फरवरी",
"मार्च",
"अप्रैल ",
"मई",
"जून",
"जुलाई",
"अगस्त",
"सितंबर",
"अक्टूबर",
"नवंबर",
"दिसंबर",
]
month_abbreviations = [
"",
"जन",
"फ़र",
"मार्च",
"अप्रै",
"मई",
"जून",
"जुलाई",
"आग",
"सित",
"अकत",
"नवे",
"दिस",
]
day_names = [
"",
"सोमवार",
"मंगलवार",
"बुधवार",
"गुरुवार",
"शुक्रवार",
"शनिवार",
"रविवार",
]
day_abbreviations = ["", "सोम", "मंगल", "बुध", "गुरुवार", "शुक्र", "शनि", "रवि"]
class CzechLocale(Locale):
names = ["cs", "cs-cz"]
timeframes: ClassVar[
Mapping[TimeFrameLiteral, Union[Mapping[str, Union[List[str], str]], str]]
] = {
"now": "Teď",
"second": {"past": "vteřina", "future": "vteřina", "zero": "vteřina"},
"seconds": {"past": "{0} sekundami", "future": ["{0} sekundy", "{0} sekund"]},
"minute": {"past": "minutou", "future": "minutu", "zero": "{0} minut"},
"minutes": {"past": "{0} minutami", "future": ["{0} minuty", "{0} minut"]},
"hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodin"},
"hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodin"]},
"day": {"past": "dnem", "future": "den", "zero": "{0} dnů"},
"days": {"past": "{0} dny", "future": ["{0} dny", "{0} dnů"]},
"week": {"past": "týdnem", "future": "týden", "zero": "{0} týdnů"},
"weeks": {"past": "{0} týdny", "future": ["{0} týdny", "{0} týdnů"]},
"month": {"past": "měsícem", "future": "měsíc", "zero": "{0} měsíců"},
"months": {"past": "{0} měsíci", "future": ["{0} měsíce", "{0} měsíců"]},
"year": {"past": "rokem", "future": "rok", "zero": "{0} let"},
"years": {"past": "{0} lety", "future": ["{0} roky", "{0} let"]},
}
past = "Před {0}"
future = "Za {0}"
month_names = [
"",
"leden",
"únor",
"březen",
"duben",
"květen",
"červen",
"červenec",
"srpen",
"září",
"říjen",
"listopad",
"prosinec",
]
month_abbreviations = [
"",
"led",
"úno",
"bře",
"dub",
"kvě",
"čvn",
"čvc",
"srp",
"zář",
"říj",
"lis",
"pro",
]
day_names = [
"",
"pondělí",
"úterý",
"středa",
"čtvrtek",
"pátek",
"sobota",
"neděle",
]
day_abbreviations = ["", "po", "út", "st", "čt", "pá", "so", "ne"]
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
"""Czech aware time frame format function, takes into account
the differences between past and future forms."""
abs_delta = abs(delta)
form = self.timeframes[timeframe]
if isinstance(form, str):
return form.format(abs_delta)
if delta == 0:
key = "zero" # And *never* use 0 in the singular!
elif delta > 0:
key = "future"
else:
key = "past"
form: Union[List[str], str] = form[key]
if isinstance(form, list):
if 2 <= abs_delta % 10 <= 4 and (
abs_delta % 100 < 10 or abs_delta % 100 >= 20
):
form = form[0]
else:
form = form[1]
return form.format(abs_delta)
class SlovakLocale(Locale):
names = ["sk", "sk-sk"]
timeframes: ClassVar[
Mapping[TimeFrameLiteral, Union[Mapping[str, Union[List[str], str]], str]]
] = {
"now": "Teraz",
"second": {"past": "sekundou", "future": "sekundu", "zero": "{0} sekúnd"},
"seconds": {"past": "{0} sekundami", "future": ["{0} sekundy", "{0} sekúnd"]},
"minute": {"past": "minútou", "future": "minútu", "zero": "{0} minút"},
"minutes": {"past": "{0} minútami", "future": ["{0} minúty", "{0} minút"]},
"hour": {"past": "hodinou", "future": "hodinu", "zero": "{0} hodín"},
"hours": {"past": "{0} hodinami", "future": ["{0} hodiny", "{0} hodín"]},
"day": {"past": "dňom", "future": "deň", "zero": "{0} dní"},
"days": {"past": "{0} dňami", "future": ["{0} dni", "{0} dní"]},
"week": {"past": "týždňom", "future": "týždeň", "zero": "{0} týždňov"},
"weeks": {"past": "{0} týždňami", "future": ["{0} týždne", "{0} týždňov"]},
"month": {"past": "mesiacom", "future": "mesiac", "zero": "{0} mesiacov"},
"months": {"past": "{0} mesiacmi", "future": ["{0} mesiace", "{0} mesiacov"]},
"year": {"past": "rokom", "future": "rok", "zero": "{0} rokov"},
"years": {"past": "{0} rokmi", "future": ["{0} roky", "{0} rokov"]},
}
past = "Pred {0}"
future = "O {0}"
and_word = "a"
month_names = [
"",
"január",
"február",
"marec",
"apríl",
"máj",
"jún",
"júl",
"august",
"september",
"október",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"máj",
"jún",
"júl",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"pondelok",
"utorok",
"streda",
"štvrtok",
"piatok",
"sobota",
"nedeľa",
]
day_abbreviations = ["", "po", "ut", "st", "št", "pi", "so", "ne"]
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
"""Slovak aware time frame format function, takes into account
the differences between past and future forms."""
abs_delta = abs(delta)
form = self.timeframes[timeframe]
if isinstance(form, str):
return form.format(abs_delta)
if delta == 0:
key = "zero" # And *never* use 0 in the singular!
elif delta > 0:
key = "future"
else:
key = "past"
form: Union[List[str], str] = form[key]
if isinstance(form, list):
if 2 <= abs_delta % 10 <= 4 and (
abs_delta % 100 < 10 or abs_delta % 100 >= 20
):
form = form[0]
else:
form = form[1]
return form.format(abs_delta)
class FarsiLocale(Locale):
names = ["fa", "fa-ir"]
past = "{0} قبل"
future = "در {0}"
timeframes = {
"now": "اکنون",
"second": "یک لحظه",
"seconds": "{0} ثانیه",
"minute": "یک دقیقه",
"minutes": "{0} دقیقه",
"hour": "یک ساعت",
"hours": "{0} ساعت",
"day": "یک روز",
"days": "{0} روز",
"month": "یک ماه",
"months": "{0} ماه",
"year": "یک سال",
"years": "{0} سال",
}
meridians = {
"am": "قبل از ظهر",
"pm": "بعد از ظهر",
"AM": "قبل از ظهر",
"PM": "بعد از ظهر",
}
month_names = [
"",
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
day_names = [
"",
"دو شنبه",
"سه شنبه",
"چهارشنبه",
"پنجشنبه",
"جمعه",
"شنبه",
"یکشنبه",
]
day_abbreviations = ["", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
class HebrewLocale(Locale):
names = ["he", "he-il"]
past = "לפני {0}"
future = "בעוד {0}"
and_word = "ו"
timeframes = {
"now": "הרגע",
"second": "שנייה",
"seconds": "{0} שניות",
"minute": "דקה",
"minutes": "{0} דקות",
"hour": "שעה",
"hours": "{0} שעות",
"2-hours": "שעתיים",
"day": "יום",
"days": "{0} ימים",
"2-days": "יומיים",
"week": "שבוע",
"weeks": "{0} שבועות",
"2-weeks": "שבועיים",
"month": "חודש",
"months": "{0} חודשים",
"2-months": "חודשיים",
"year": "שנה",
"years": "{0} שנים",
"2-years": "שנתיים",
}
meridians = {
"am": 'לפנ"צ',
"pm": 'אחר"צ',
"AM": "לפני הצהריים",
"PM": "אחרי הצהריים",
}
month_names = [
"",
"ינואר",
"פברואר",
"מרץ",
"אפריל",
"מאי",
"יוני",
"יולי",
"אוגוסט",
"ספטמבר",
"אוקטובר",
"נובמבר",
"דצמבר",
]
month_abbreviations = [
"",
"ינו׳",
"פבר׳",
"מרץ",
"אפר׳",
"מאי",
"יוני",
"יולי",
"אוג׳",
"ספט׳",
"אוק׳",
"נוב׳",
"דצמ׳",
]
day_names = ["", "שני", "שלישי", "רביעי", "חמישי", "שישי", "שבת", "ראשון"]
day_abbreviations = ["", "ב׳", "ג׳", "ד׳", "ה׳", "ו׳", "ש׳", "א׳"]
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
"""Hebrew couple of <timeframe> aware"""
couple = f"2-{timeframe}"
single = timeframe.rstrip("s")
if abs(delta) == 2 and couple in self.timeframes:
key = couple
elif abs(delta) == 1 and single in self.timeframes:
key = single
else:
key = timeframe
return self.timeframes[key].format(trunc(abs(delta)))
def describe_multi(
self,
timeframes: Sequence[Tuple[TimeFrameLiteral, Union[int, float]]],
only_distance: bool = False,
) -> str:
"""Describes a delta within multiple timeframes in plain language.
In Hebrew, the and word behaves a bit differently.
:param timeframes: a list of string, quantity pairs each representing a timeframe and delta.
:param only_distance: return only distance eg: "2 hours and 11 seconds" without "in" or "ago" keywords
"""
humanized = ""
for index, (timeframe, delta) in enumerate(timeframes):
last_humanized = self._format_timeframe(timeframe, delta)
if index == 0:
humanized = last_humanized
elif index == len(timeframes) - 1: # Must have at least 2 items
humanized += " " + self.and_word
if last_humanized[0].isdecimal():
humanized += "־"
humanized += last_humanized
else: # Don't add for the last one
humanized += ", " + last_humanized
if not only_distance:
humanized = self._format_relative(humanized, timeframe, delta)
return humanized
class MarathiLocale(Locale):
names = ["mr"]
past = "{0} आधी"
future = "{0} नंतर"
timeframes = {
"now": "सद्य",
"second": "एक सेकंद",
"seconds": "{0} सेकंद",
"minute": "एक मिनिट ",
"minutes": "{0} मिनिट ",
"hour": "एक तास",
"hours": "{0} तास",
"day": "एक दिवस",
"days": "{0} दिवस",
"month": "एक महिना ",
"months": "{0} महिने ",
"year": "एक वर्ष ",
"years": "{0} वर्ष ",
}
meridians = {"am": "सकाळ", "pm": "संध्याकाळ", "AM": "सकाळ", "PM": "संध्याकाळ"}
month_names = [
"",
"जानेवारी",
"फेब्रुवारी",
"मार्च",
"एप्रिल",
"मे",
"जून",
"जुलै",
"अॉगस्ट",
"सप्टेंबर",
"अॉक्टोबर",
"नोव्हेंबर",
"डिसेंबर",
]
month_abbreviations = [
"",
"जान",
"फेब्रु",
"मार्च",
"एप्रि",
"मे",
"जून",
"जुलै",
"अॉग",
"सप्टें",
"अॉक्टो",
"नोव्हें",
"डिसें",
]
day_names = [
"",
"सोमवार",
"मंगळवार",
"बुधवार",
"गुरुवार",
"शुक्रवार",
"शनिवार",
"रविवार",
]
day_abbreviations = ["", "सोम", "मंगळ", "बुध", "गुरु", "शुक्र", "शनि", "रवि"]
class CatalanLocale(Locale):
names = ["ca", "ca-es", "ca-ad", "ca-fr", "ca-it"]
past = "Fa {0}"
future = "En {0}"
and_word = "i"
timeframes = {
"now": "Ara mateix",
"second": "un segon",
"seconds": "{0} segons",
"minute": "1 minut",
"minutes": "{0} minuts",
"hour": "una hora",
"hours": "{0} hores",
"day": "un dia",
"days": "{0} dies",
"month": "un mes",
"months": "{0} mesos",
"year": "un any",
"years": "{0} anys",
}
month_names = [
"",
"gener",
"febrer",
"març",
"abril",
"maig",
"juny",
"juliol",
"agost",
"setembre",
"octubre",
"novembre",
"desembre",
]
month_abbreviations = [
"",
"gen.",
"febr.",
"març",
"abr.",
"maig",
"juny",
"jul.",
"ag.",
"set.",
"oct.",
"nov.",
"des.",
]
day_names = [
"",
"dilluns",
"dimarts",
"dimecres",
"dijous",
"divendres",
"dissabte",
"diumenge",
]
day_abbreviations = [
"",
"dl.",
"dt.",
"dc.",
"dj.",
"dv.",
"ds.",
"dg.",
]
class BasqueLocale(Locale):
names = ["eu", "eu-eu"]
past = "duela {0}"
future = "{0}" # I don't know what's the right phrase in Basque for the future.
timeframes = {
"now": "Orain",
"second": "segundo bat",
"seconds": "{0} segundu",
"minute": "minutu bat",
"minutes": "{0} minutu",
"hour": "ordu bat",
"hours": "{0} ordu",
"day": "egun bat",
"days": "{0} egun",
"month": "hilabete bat",
"months": "{0} hilabet",
"year": "urte bat",
"years": "{0} urte",
}
month_names = [
"",
"urtarrilak",
"otsailak",
"martxoak",
"apirilak",
"maiatzak",
"ekainak",
"uztailak",
"abuztuak",
"irailak",
"urriak",
"azaroak",
"abenduak",
]
month_abbreviations = [
"",
"urt",
"ots",
"mar",
"api",
"mai",
"eka",
"uzt",
"abu",
"ira",
"urr",
"aza",
"abe",
]
day_names = [
"",
"astelehena",
"asteartea",
"asteazkena",
"osteguna",
"ostirala",
"larunbata",
"igandea",
]
day_abbreviations = ["", "al", "ar", "az", "og", "ol", "lr", "ig"]
class HungarianLocale(Locale):
names = ["hu", "hu-hu"]
past = "{0} ezelőtt"
future = "{0} múlva"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "éppen most",
"second": {"past": "egy második", "future": "egy második"},
"seconds": {"past": "{0} másodpercekkel", "future": "{0} pár másodperc"},
"minute": {"past": "egy perccel", "future": "egy perc"},
"minutes": {"past": "{0} perccel", "future": "{0} perc"},
"hour": {"past": "egy órával", "future": "egy óra"},
"hours": {"past": "{0} órával", "future": "{0} óra"},
"day": {"past": "egy nappal", "future": "egy nap"},
"days": {"past": "{0} nappal", "future": "{0} nap"},
"month": {"past": "egy hónappal", "future": "egy hónap"},
"months": {"past": "{0} hónappal", "future": "{0} hónap"},
"year": {"past": "egy évvel", "future": "egy év"},
"years": {"past": "{0} évvel", "future": "{0} év"},
}
month_names = [
"",
"január",
"február",
"március",
"április",
"május",
"június",
"július",
"augusztus",
"szeptember",
"október",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"febr",
"márc",
"ápr",
"máj",
"jún",
"júl",
"aug",
"szept",
"okt",
"nov",
"dec",
]
day_names = [
"",
"hétfő",
"kedd",
"szerda",
"csütörtök",
"péntek",
"szombat",
"vasárnap",
]
day_abbreviations = ["", "hét", "kedd", "szer", "csüt", "pént", "szom", "vas"]
meridians = {"am": "de", "pm": "du", "AM": "DE", "PM": "DU"}
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
form = self.timeframes[timeframe]
if isinstance(form, Mapping):
if delta > 0:
form = form["future"]
else:
form = form["past"]
return form.format(abs(delta))
class EsperantoLocale(Locale):
names = ["eo", "eo-xx"]
past = "antaŭ {0}"
future = "post {0}"
timeframes = {
"now": "nun",
"second": "sekundo",
"seconds": "{0} kelkaj sekundoj",
"minute": "unu minuto",
"minutes": "{0} minutoj",
"hour": "un horo",
"hours": "{0} horoj",
"day": "unu tago",
"days": "{0} tagoj",
"month": "unu monato",
"months": "{0} monatoj",
"year": "unu jaro",
"years": "{0} jaroj",
}
month_names = [
"",
"januaro",
"februaro",
"marto",
"aprilo",
"majo",
"junio",
"julio",
"aŭgusto",
"septembro",
"oktobro",
"novembro",
"decembro",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aŭg",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"lundo",
"mardo",
"merkredo",
"ĵaŭdo",
"vendredo",
"sabato",
"dimanĉo",
]
day_abbreviations = ["", "lun", "mar", "mer", "ĵaŭ", "ven", "sab", "dim"]
meridians = {"am": "atm", "pm": "ptm", "AM": "ATM", "PM": "PTM"}
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=a))a)"
def _ordinal_number(self, n: int) -> str:
return f"{n}a"
class ThaiLocale(Locale):
names = ["th", "th-th"]
past = "{0}{1}ที่ผ่านมา"
future = "ในอีก{1}{0}"
timeframes = {
"now": "ขณะนี้",
"second": "วินาที",
"seconds": "{0} ไม่กี่วินาที",
"minute": "1 นาที",
"minutes": "{0} นาที",
"hour": "1 ชั่วโมง",
"hours": "{0} ชั่วโมง",
"day": "1 วัน",
"days": "{0} วัน",
"month": "1 เดือน",
"months": "{0} เดือน",
"year": "1 ปี",
"years": "{0} ปี",
}
month_names = [
"",
"มกราคม",
"กุมภาพันธ์",
"มีนาคม",
"เมษายน",
"พฤษภาคม",
"มิถุนายน",
"กรกฎาคม",
"สิงหาคม",
"กันยายน",
"ตุลาคม",
"พฤศจิกายน",
"ธันวาคม",
]
month_abbreviations = [
"",
"ม.ค.",
"ก.พ.",
"มี.ค.",
"เม.ย.",
"พ.ค.",
"มิ.ย.",
"ก.ค.",
"ส.ค.",
"ก.ย.",
"ต.ค.",
"พ.ย.",
"ธ.ค.",
]
day_names = ["", "จันทร์", "อังคาร", "พุธ", "พฤหัสบดี", "ศุกร์", "เสาร์", "อาทิตย์"]
day_abbreviations = ["", "จ", "อ", "พ", "พฤ", "ศ", "ส", "อา"]
meridians = {"am": "am", "pm": "pm", "AM": "AM", "PM": "PM"}
BE_OFFSET = 543
def year_full(self, year: int) -> str:
"""Thai always use Buddhist Era (BE) which is CE + 543"""
year += self.BE_OFFSET
return f"{year:04d}"
def year_abbreviation(self, year: int) -> str:
"""Thai always use Buddhist Era (BE) which is CE + 543"""
year += self.BE_OFFSET
return f"{year:04d}"[2:]
def _format_relative(
self,
humanized: str,
timeframe: TimeFrameLiteral,
delta: Union[float, int],
) -> str:
"""Thai normally doesn't have any space between words"""
if timeframe == "now":
return humanized
space = "" if timeframe == "seconds" else " "
direction = self.past if delta < 0 else self.future
return direction.format(humanized, space)
class BengaliLocale(Locale):
names = ["bn", "bn-bd", "bn-in"]
past = "{0} আগে"
future = "{0} পরে"
timeframes = {
"now": "এখন",
"second": "একটি দ্বিতীয়",
"seconds": "{0} সেকেন্ড",
"minute": "এক মিনিট",
"minutes": "{0} মিনিট",
"hour": "এক ঘণ্টা",
"hours": "{0} ঘণ্টা",
"day": "এক দিন",
"days": "{0} দিন",
"month": "এক মাস",
"months": "{0} মাস ",
"year": "এক বছর",
"years": "{0} বছর",
}
meridians = {"am": "সকাল", "pm": "বিকাল", "AM": "সকাল", "PM": "বিকাল"}
month_names = [
"",
"জানুয়ারি",
"ফেব্রুয়ারি",
"মার্চ",
"এপ্রিল",
"মে",
"জুন",
"জুলাই",
"আগস্ট",
"সেপ্টেম্বর",
"অক্টোবর",
"নভেম্বর",
"ডিসেম্বর",
]
month_abbreviations = [
"",
"জানু",
"ফেব",
"মার্চ",
"এপ্রি",
"মে",
"জুন",
"জুল",
"অগা",
"সেপ্ট",
"অক্টো",
"নভে",
"ডিসে",
]
day_names = [
"",
"সোমবার",
"মঙ্গলবার",
"বুধবার",
"বৃহস্পতিবার",
"শুক্রবার",
"শনিবার",
"রবিবার",
]
day_abbreviations = ["", "সোম", "মঙ্গল", "বুধ", "বৃহঃ", "শুক্র", "শনি", "রবি"]
def _ordinal_number(self, n: int) -> str:
if n > 10 or n == 0:
return f"{n}তম"
if n in [1, 5, 7, 8, 9, 10]:
return f"{n}ম"
if n in [2, 3]:
return f"{n}য়"
if n == 4:
return f"{n}র্থ"
if n == 6:
return f"{n}ষ্ঠ"
class RomanshLocale(Locale):
names = ["rm", "rm-ch"]
past = "avant {0}"
future = "en {0}"
timeframes = {
"now": "en quest mument",
"second": "in secunda",
"seconds": "{0} secundas",
"minute": "ina minuta",
"minutes": "{0} minutas",
"hour": "in'ura",
"hours": "{0} ura",
"day": "in di",
"days": "{0} dis",
"month": "in mais",
"months": "{0} mais",
"year": "in onn",
"years": "{0} onns",
}
month_names = [
"",
"schaner",
"favrer",
"mars",
"avrigl",
"matg",
"zercladur",
"fanadur",
"avust",
"settember",
"october",
"november",
"december",
]
month_abbreviations = [
"",
"schan",
"fav",
"mars",
"avr",
"matg",
"zer",
"fan",
"avu",
"set",
"oct",
"nov",
"dec",
]
day_names = [
"",
"glindesdi",
"mardi",
"mesemna",
"gievgia",
"venderdi",
"sonda",
"dumengia",
]
day_abbreviations = ["", "gli", "ma", "me", "gie", "ve", "so", "du"]
class RomanianLocale(Locale):
names = ["ro", "ro-ro"]
past = "{0} în urmă"
future = "peste {0}"
and_word = "și"
timeframes = {
"now": "acum",
"second": "o secunda",
"seconds": "{0} câteva secunde",
"minute": "un minut",
"minutes": "{0} minute",
"hour": "o oră",
"hours": "{0} ore",
"day": "o zi",
"days": "{0} zile",
"month": "o lună",
"months": "{0} luni",
"year": "un an",
"years": "{0} ani",
}
month_names = [
"",
"ianuarie",
"februarie",
"martie",
"aprilie",
"mai",
"iunie",
"iulie",
"august",
"septembrie",
"octombrie",
"noiembrie",
"decembrie",
]
month_abbreviations = [
"",
"ian",
"febr",
"mart",
"apr",
"mai",
"iun",
"iul",
"aug",
"sept",
"oct",
"nov",
"dec",
]
day_names = [
"",
"luni",
"marți",
"miercuri",
"joi",
"vineri",
"sâmbătă",
"duminică",
]
day_abbreviations = ["", "Lun", "Mar", "Mie", "Joi", "Vin", "Sâm", "Dum"]
class SlovenianLocale(Locale):
names = ["sl", "sl-si"]
past = "pred {0}"
future = "čez {0}"
and_word = "in"
timeframes = {
"now": "zdaj",
"second": "sekundo",
"seconds": "{0} sekund",
"minute": "minuta",
"minutes": "{0} minutami",
"hour": "uro",
"hours": "{0} ur",
"day": "dan",
"days": "{0} dni",
"month": "mesec",
"months": "{0} mesecev",
"year": "leto",
"years": "{0} let",
}
meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
month_names = [
"",
"Januar",
"Februar",
"Marec",
"April",
"Maj",
"Junij",
"Julij",
"Avgust",
"September",
"Oktober",
"November",
"December",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"Maj",
"Jun",
"Jul",
"Avg",
"Sep",
"Okt",
"Nov",
"Dec",
]
day_names = [
"",
"Ponedeljek",
"Torek",
"Sreda",
"Četrtek",
"Petek",
"Sobota",
"Nedelja",
]
day_abbreviations = ["", "Pon", "Tor", "Sre", "Čet", "Pet", "Sob", "Ned"]
class IndonesianLocale(Locale):
names = ["id", "id-id"]
past = "{0} yang lalu"
future = "dalam {0}"
and_word = "dan"
timeframes = {
"now": "baru saja",
"second": "1 sebentar",
"seconds": "{0} detik",
"minute": "1 menit",
"minutes": "{0} menit",
"hour": "1 jam",
"hours": "{0} jam",
"day": "1 hari",
"days": "{0} hari",
"month": "1 bulan",
"months": "{0} bulan",
"year": "1 tahun",
"years": "{0} tahun",
}
meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
month_names = [
"",
"Januari",
"Februari",
"Maret",
"April",
"Mei",
"Juni",
"Juli",
"Agustus",
"September",
"Oktober",
"November",
"Desember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"Mei",
"Jun",
"Jul",
"Ags",
"Sept",
"Okt",
"Nov",
"Des",
]
day_names = ["", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu", "Minggu"]
day_abbreviations = [
"",
"Senin",
"Selasa",
"Rabu",
"Kamis",
"Jumat",
"Sabtu",
"Minggu",
]
class NepaliLocale(Locale):
names = ["ne", "ne-np"]
past = "{0} पहिले"
future = "{0} पछी"
timeframes = {
"now": "अहिले",
"second": "एक सेकेन्ड",
"seconds": "{0} सेकण्ड",
"minute": "मिनेट",
"minutes": "{0} मिनेट",
"hour": "एक घण्टा",
"hours": "{0} घण्टा",
"day": "एक दिन",
"days": "{0} दिन",
"month": "एक महिना",
"months": "{0} महिना",
"year": "एक बर्ष",
"years": "बर्ष",
}
meridians = {"am": "पूर्वाह्न", "pm": "अपरान्ह", "AM": "पूर्वाह्न", "PM": "अपरान्ह"}
month_names = [
"",
"जनवरी",
"फेब्रुअरी",
"मार्च",
"एप्रील",
"मे",
"जुन",
"जुलाई",
"अगष्ट",
"सेप्टेम्बर",
"अक्टोबर",
"नोवेम्बर",
"डिसेम्बर",
]
month_abbreviations = [
"",
"जन",
"फेब",
"मार्च",
"एप्रील",
"मे",
"जुन",
"जुलाई",
"अग",
"सेप",
"अक्ट",
"नोव",
"डिस",
]
day_names = [
"",
"सोमवार",
"मंगलवार",
"बुधवार",
"बिहिवार",
"शुक्रवार",
"शनिवार",
"आइतवार",
]
day_abbreviations = ["", "सोम", "मंगल", "बुध", "बिहि", "शुक्र", "शनि", "आइत"]
class EstonianLocale(Locale):
names = ["ee", "et"]
past = "{0} tagasi"
future = "{0} pärast"
and_word = "ja"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Mapping[str, str]]] = {
"now": {"past": "just nüüd", "future": "just nüüd"},
"second": {"past": "üks sekund", "future": "ühe sekundi"},
"seconds": {"past": "{0} sekundit", "future": "{0} sekundi"},
"minute": {"past": "üks minut", "future": "ühe minuti"},
"minutes": {"past": "{0} minutit", "future": "{0} minuti"},
"hour": {"past": "tund aega", "future": "tunni aja"},
"hours": {"past": "{0} tundi", "future": "{0} tunni"},
"day": {"past": "üks päev", "future": "ühe päeva"},
"days": {"past": "{0} päeva", "future": "{0} päeva"},
"month": {"past": "üks kuu", "future": "ühe kuu"},
"months": {"past": "{0} kuud", "future": "{0} kuu"},
"year": {"past": "üks aasta", "future": "ühe aasta"},
"years": {"past": "{0} aastat", "future": "{0} aasta"},
}
month_names = [
"",
"Jaanuar",
"Veebruar",
"Märts",
"Aprill",
"Mai",
"Juuni",
"Juuli",
"August",
"September",
"Oktoober",
"November",
"Detsember",
]
month_abbreviations = [
"",
"Jan",
"Veb",
"Mär",
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Okt",
"Nov",
"Dets",
]
day_names = [
"",
"Esmaspäev",
"Teisipäev",
"Kolmapäev",
"Neljapäev",
"Reede",
"Laupäev",
"Pühapäev",
]
day_abbreviations = ["", "Esm", "Teis", "Kolm", "Nelj", "Re", "Lau", "Püh"]
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
form = self.timeframes[timeframe]
if delta > 0:
_form = form["future"]
else:
_form = form["past"]
return _form.format(abs(delta))
class LatvianLocale(Locale):
names = ["lv", "lv-lv"]
past = "pirms {0}"
future = "pēc {0}"
and_word = "un"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "tagad",
"second": "sekundes",
"seconds": "{0} sekundēm",
"minute": "minūtes",
"minutes": "{0} minūtēm",
"hour": "stundas",
"hours": "{0} stundām",
"day": "dienas",
"days": "{0} dienām",
"week": "nedēļas",
"weeks": "{0} nedēļām",
"month": "mēneša",
"months": "{0} mēnešiem",
"year": "gada",
"years": "{0} gadiem",
}
month_names = [
"",
"janvāris",
"februāris",
"marts",
"aprīlis",
"maijs",
"jūnijs",
"jūlijs",
"augusts",
"septembris",
"oktobris",
"novembris",
"decembris",
]
month_abbreviations = [
"",
"jan",
"feb",
"marts",
"apr",
"maijs",
"jūnijs",
"jūlijs",
"aug",
"sept",
"okt",
"nov",
"dec",
]
day_names = [
"",
"pirmdiena",
"otrdiena",
"trešdiena",
"ceturtdiena",
"piektdiena",
"sestdiena",
"svētdiena",
]
day_abbreviations = [
"",
"pi",
"ot",
"tr",
"ce",
"pi",
"se",
"sv",
]
class SwahiliLocale(Locale):
names = [
"sw",
"sw-ke",
"sw-tz",
]
past = "{0} iliyopita"
future = "muda wa {0}"
and_word = "na"
timeframes = {
"now": "sasa hivi",
"second": "sekunde",
"seconds": "sekunde {0}",
"minute": "dakika moja",
"minutes": "dakika {0}",
"hour": "saa moja",
"hours": "saa {0}",
"day": "siku moja",
"days": "siku {0}",
"week": "wiki moja",
"weeks": "wiki {0}",
"month": "mwezi moja",
"months": "miezi {0}",
"year": "mwaka moja",
"years": "miaka {0}",
}
meridians = {"am": "asu", "pm": "mch", "AM": "ASU", "PM": "MCH"}
month_names = [
"",
"Januari",
"Februari",
"Machi",
"Aprili",
"Mei",
"Juni",
"Julai",
"Agosti",
"Septemba",
"Oktoba",
"Novemba",
"Desemba",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mac",
"Apr",
"Mei",
"Jun",
"Jul",
"Ago",
"Sep",
"Okt",
"Nov",
"Des",
]
day_names = [
"",
"Jumatatu",
"Jumanne",
"Jumatano",
"Alhamisi",
"Ijumaa",
"Jumamosi",
"Jumapili",
]
day_abbreviations = [
"",
"Jumatatu",
"Jumanne",
"Jumatano",
"Alhamisi",
"Ijumaa",
"Jumamosi",
"Jumapili",
]
class CroatianLocale(Locale):
names = ["hr", "hr-hr"]
past = "prije {0}"
future = "za {0}"
and_word = "i"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "upravo sad",
"second": "sekundu",
"seconds": {"double": "{0} sekunde", "higher": "{0} sekundi"},
"minute": "minutu",
"minutes": {"double": "{0} minute", "higher": "{0} minuta"},
"hour": "sat",
"hours": {"double": "{0} sata", "higher": "{0} sati"},
"day": "jedan dan",
"days": {"double": "{0} dana", "higher": "{0} dana"},
"week": "tjedan",
"weeks": {"double": "{0} tjedna", "higher": "{0} tjedana"},
"month": "mjesec",
"months": {"double": "{0} mjeseca", "higher": "{0} mjeseci"},
"year": "godinu",
"years": {"double": "{0} godine", "higher": "{0} godina"},
}
month_names = [
"",
"siječanj",
"veljača",
"ožujak",
"travanj",
"svibanj",
"lipanj",
"srpanj",
"kolovoz",
"rujan",
"listopad",
"studeni",
"prosinac",
]
month_abbreviations = [
"",
"siječ",
"velj",
"ožuj",
"trav",
"svib",
"lip",
"srp",
"kol",
"ruj",
"list",
"stud",
"pros",
]
day_names = [
"",
"ponedjeljak",
"utorak",
"srijeda",
"četvrtak",
"petak",
"subota",
"nedjelja",
]
day_abbreviations = [
"",
"po",
"ut",
"sr",
"če",
"pe",
"su",
"ne",
]
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, Mapping):
if 1 < delta <= 4:
form = form["double"]
else:
form = form["higher"]
return form.format(delta)
class LatinLocale(Locale):
names = ["la", "la-va"]
past = "ante {0}"
future = "in {0}"
and_word = "et"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "nunc",
"second": "secundum",
"seconds": "{0} secundis",
"minute": "minutam",
"minutes": "{0} minutis",
"hour": "horam",
"hours": "{0} horas",
"day": "diem",
"days": "{0} dies",
"week": "hebdomadem",
"weeks": "{0} hebdomades",
"month": "mensem",
"months": "{0} mensis",
"year": "annum",
"years": "{0} annos",
}
month_names = [
"",
"Ianuarius",
"Februarius",
"Martius",
"Aprilis",
"Maius",
"Iunius",
"Iulius",
"Augustus",
"September",
"October",
"November",
"December",
]
month_abbreviations = [
"",
"Ian",
"Febr",
"Mart",
"Apr",
"Mai",
"Iun",
"Iul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
]
day_names = [
"",
"dies Lunae",
"dies Martis",
"dies Mercurii",
"dies Iovis",
"dies Veneris",
"dies Saturni",
"dies Solis",
]
day_abbreviations = [
"",
"dies Lunae",
"dies Martis",
"dies Mercurii",
"dies Iovis",
"dies Veneris",
"dies Saturni",
"dies Solis",
]
class LithuanianLocale(Locale):
names = ["lt", "lt-lt"]
past = "prieš {0}"
future = "po {0}"
and_word = "ir"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "dabar",
"second": "sekundės",
"seconds": "{0} sekundžių",
"minute": "minutės",
"minutes": "{0} minučių",
"hour": "valandos",
"hours": "{0} valandų",
"day": "dieną",
"days": "{0} dienų",
"week": "savaitės",
"weeks": "{0} savaičių",
"month": "mėnesio",
"months": "{0} mėnesių",
"year": "metų",
"years": "{0} metų",
}
month_names = [
"",
"sausis",
"vasaris",
"kovas",
"balandis",
"gegužė",
"birželis",
"liepa",
"rugpjūtis",
"rugsėjis",
"spalis",
"lapkritis",
"gruodis",
]
month_abbreviations = [
"",
"saus",
"vas",
"kovas",
"bal",
"geg",
"birž",
"liepa",
"rugp",
"rugs",
"spalis",
"lapkr",
"gr",
]
day_names = [
"",
"pirmadienis",
"antradienis",
"trečiadienis",
"ketvirtadienis",
"penktadienis",
"šeštadienis",
"sekmadienis",
]
day_abbreviations = [
"",
"pi",
"an",
"tr",
"ke",
"pe",
"še",
"se",
]
class MalayLocale(Locale):
names = ["ms", "ms-my", "ms-bn"]
past = "{0} yang lalu"
future = "dalam {0}"
and_word = "dan"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "sekarang",
"second": "saat",
"seconds": "{0} saat",
"minute": "minit",
"minutes": "{0} minit",
"hour": "jam",
"hours": "{0} jam",
"day": "hari",
"days": "{0} hari",
"week": "minggu",
"weeks": "{0} minggu",
"month": "bulan",
"months": "{0} bulan",
"year": "tahun",
"years": "{0} tahun",
}
month_names = [
"",
"Januari",
"Februari",
"Mac",
"April",
"Mei",
"Jun",
"Julai",
"Ogos",
"September",
"Oktober",
"November",
"Disember",
]
month_abbreviations = [
"",
"Jan.",
"Feb.",
"Mac",
"Apr.",
"Mei",
"Jun",
"Julai",
"Og.",
"Sept.",
"Okt.",
"Nov.",
"Dis.",
]
day_names = [
"",
"Isnin",
"Selasa",
"Rabu",
"Khamis",
"Jumaat",
"Sabtu",
"Ahad",
]
day_abbreviations = [
"",
"Isnin",
"Selasa",
"Rabu",
"Khamis",
"Jumaat",
"Sabtu",
"Ahad",
]
class MalteseLocale(Locale):
names = ["mt"]
past = "{0} ilu"
future = "fi {0}"
and_word = "u"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "issa",
"second": "sekonda",
"seconds": "{0} sekondi",
"minute": "minuta",
"minutes": "{0} minuti",
"hour": "siegħa",
"hours": {"dual": "{0} sagħtejn", "plural": "{0} sigħat"},
"day": "jum",
"days": {"dual": "{0} jumejn", "plural": "{0} ijiem"},
"week": "ġimgħa",
"weeks": {"dual": "{0} ġimagħtejn", "plural": "{0} ġimgħat"},
"month": "xahar",
"months": {"dual": "{0} xahrejn", "plural": "{0} xhur"},
"year": "sena",
"years": {"dual": "{0} sentejn", "plural": "{0} snin"},
}
month_names = [
"",
"Jannar",
"Frar",
"Marzu",
"April",
"Mejju",
"Ġunju",
"Lulju",
"Awwissu",
"Settembru",
"Ottubru",
"Novembru",
"Diċembru",
]
month_abbreviations = [
"",
"Jan",
"Fr",
"Mar",
"Apr",
"Mejju",
"Ġun",
"Lul",
"Aw",
"Sett",
"Ott",
"Nov",
"Diċ",
]
day_names = [
"",
"It-Tnejn",
"It-Tlieta",
"L-Erbgħa",
"Il-Ħamis",
"Il-Ġimgħa",
"Is-Sibt",
"Il-Ħadd",
]
day_abbreviations = [
"",
"T",
"TL",
"E",
"Ħ",
"Ġ",
"S",
"Ħ",
]
def _format_timeframe(
self, timeframe: TimeFrameLiteral, delta: Union[float, int]
) -> str:
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, Mapping):
if delta == 2:
form = form["dual"]
else:
form = form["plural"]
return form.format(delta)
class OdiaLocale(Locale):
names = ["or", "or-in"]
past = "{0} ପୂର୍ବେ"
future = "{0} ପରେ"
timeframes = {
"now": "ବର୍ତ୍ତମାନ",
"second": "ଏକ ସେକେଣ୍ଡ",
"seconds": "{0} ସେକେଣ୍ଡ",
"minute": "ଏକ ମିନଟ",
"minutes": "{0} ମିନଟ",
"hour": "ଏକ ଘଣ୍ଟା",
"hours": "{0} ଘଣ୍ଟା",
"day": "ଏକ ଦିନ",
"days": "{0} ଦିନ",
"month": "ଏକ ମାସ",
"months": "{0} ମାସ ",
"year": "ଏକ ବର୍ଷ",
"years": "{0} ବର୍ଷ",
}
meridians = {"am": "ପୂର୍ବାହ୍ନ", "pm": "ଅପରାହ୍ନ", "AM": "ପୂର୍ବାହ୍ନ", "PM": "ଅପରାହ୍ନ"}
month_names = [
"",
"ଜାନୁଆରୀ",
"ଫେବୃଆରୀ",
"ମାର୍ଚ୍ଚ୍",
"ଅପ୍ରେଲ",
"ମଇ",
"ଜୁନ୍",
"ଜୁଲାଇ",
"ଅଗଷ୍ଟ",
"ସେପ୍ଟେମ୍ବର",
"ଅକ୍ଟୋବର୍",
"ନଭେମ୍ବର୍",
"ଡିସେମ୍ବର୍",
]
month_abbreviations = [
"",
"ଜାନୁ",
"ଫେବୃ",
"ମାର୍ଚ୍ଚ୍",
"ଅପ୍ରେ",
"ମଇ",
"ଜୁନ୍",
"ଜୁଲା",
"ଅଗ",
"ସେପ୍ଟେ",
"ଅକ୍ଟୋ",
"ନଭେ",
"ଡିସେ",
]
day_names = [
"",
"ସୋମବାର",
"ମଙ୍ଗଳବାର",
"ବୁଧବାର",
"ଗୁରୁବାର",
"ଶୁକ୍ରବାର",
"ଶନିବାର",
"ରବିବାର",
]
day_abbreviations = [
"",
"ସୋମ",
"ମଙ୍ଗଳ",
"ବୁଧ",
"ଗୁରୁ",
"ଶୁକ୍ର",
"ଶନି",
"ରବି",
]
def _ordinal_number(self, n: int) -> str:
if n > 10 or n == 0:
return f"{n}ତମ"
if n in [1, 5, 7, 8, 9, 10]:
return f"{n}ମ"
if n in [2, 3]:
return f"{n}ୟ"
if n == 4:
return f"{n}ର୍ଥ"
if n == 6:
return f"{n}ଷ୍ଠ"
return ""
| apache-2.0 | 4,858,773,953,756,141,000 | 20.314819 | 123 | 0.399362 | false |
thehub/hubspace | md5crypt.py | 1 | 4201 | #########################################################
# md5crypt.py
#
# 0423.2000 by michal wallace http://www.sabren.com/
# based on perl's Crypt::PasswdMD5 by Luis Munoz ([email protected])
# based on /usr/src/libcrypt/crypt.c from FreeBSD 2.2.5-RELEASE
#
# MANY THANKS TO
#
# Carey Evans - http://home.clear.net.nz/pages/c.evans/
# Dennis Marti - http://users.starpower.net/marti1/
#
# For the patches that got this thing working!
#
#########################################################
"""md5crypt.py - Provides interoperable MD5-based crypt() function
SYNOPSIS
import md5crypt.py
cryptedpassword = md5crypt.md5crypt(password, salt);
DESCRIPTION
unix_md5_crypt() provides a crypt()-compatible interface to the
rather new MD5-based crypt() function found in modern operating systems.
It's based on the implementation found on FreeBSD 2.2.[56]-RELEASE and
contains the following license in it:
"THE BEER-WARE LICENSE" (Revision 42):
<[email protected]> wrote this file. As long as you retain this notice you
can do whatever you want with this stuff. If we meet some day, and you think
this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
apache_md5_crypt() provides a function compatible with Apache's
.htpasswd files. This was contributed by Bryan Hart <[email protected]>.
"""
MAGIC = '$1$' # Magic string
ITOA64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
import md5
def to64 (v, n):
ret = ''
while (n - 1 >= 0):
n = n - 1
ret = ret + ITOA64[v & 0x3f]
v = v >> 6
return ret
def apache_md5_crypt (pw, salt):
# change the Magic string to match the one used by Apache
return unix_md5_crypt(pw, salt, '$apr1$')
def unix_md5_crypt(pw, salt, magic=None):
if magic==None:
magic = MAGIC
# Take care of the magic string if present
if salt[:len(magic)] == magic:
salt = salt[len(magic):]
pw = unicode(pw).encode('ascii')
# salt can have up to 8 characters:
#import string
salt = salt.split('$', 1)[0]
#salt = string.split(salt, '$', 1)[0]
salt = salt[:8]
ctx = pw + magic + salt
final = md5.md5(pw + salt + pw).digest()
for pl in range(len(pw),0,-16):
if pl > 16:
ctx = ctx + final[:16]
else:
ctx = ctx + final[:pl]
# Now the 'weird' xform (??)
i = len(pw)
while i:
if i & 1:
ctx = ctx + chr(0) #if ($i & 1) { $ctx->add(pack("C", 0)); }
else:
ctx = ctx + pw[0]
i = i >> 1
final = md5.md5(ctx).digest()
# The following is supposed to make
# things run slower.
# my question: WTF???
for i in range(1000):
ctx1 = ''
if i & 1:
ctx1 = ctx1 + pw
else:
ctx1 = ctx1 + final[:16]
if i % 3:
ctx1 = ctx1 + salt
if i % 7:
ctx1 = ctx1 + pw
if i & 1:
ctx1 = ctx1 + final[:16]
else:
ctx1 = ctx1 + pw
final = md5.md5(ctx1).digest()
# Final xform
passwd = ''
passwd = passwd + to64((int(ord(final[0])) << 16)
|(int(ord(final[6])) << 8)
|(int(ord(final[12]))),4)
passwd = passwd + to64((int(ord(final[1])) << 16)
|(int(ord(final[7])) << 8)
|(int(ord(final[13]))), 4)
passwd = passwd + to64((int(ord(final[2])) << 16)
|(int(ord(final[8])) << 8)
|(int(ord(final[14]))), 4)
passwd = passwd + to64((int(ord(final[3])) << 16)
|(int(ord(final[9])) << 8)
|(int(ord(final[15]))), 4)
passwd = passwd + to64((int(ord(final[4])) << 16)
|(int(ord(final[10])) << 8)
|(int(ord(final[5]))), 4)
passwd = passwd + to64((int(ord(final[11]))), 2)
return magic + salt + '$' + passwd
## assign a wrapper function:
md5crypt = unix_md5_crypt
if __name__ == "__main__":
print unix_md5_crypt("cat", "hat")
| gpl-2.0 | 6,348,591,184,580,210,000 | 25.25625 | 77 | 0.517972 | false |
oldm/OldMan | oldman/validation/value_format.py | 1 | 4405 | from validate_email import validate_email
class ValueFormatError(Exception):
"""Invalid format detected."""
pass
class ValueFormat(object):
"""A :class:`~oldman.validation.value_format.ValueFormat` object
checks the values and converts `rdflib.term.Identifier` objects into
Python objects.
"""
def check_value(self, value):
"""Raises a :class:`~oldman.validation.value_format.ValueFormatError` exception
if the value is wrongly formatted.
:param value: Python value to check.
"""
raise NotImplementedError(u"check_value must be overwritten")
def to_python(self, rdf_term):
"""Converts a `rdflib.term.Identifier` object into
a regular Python value.
By default, uses the RDFlib `toPython()` method.
:param rdf_term: `rdflib.term.Identifier` object.
:return: Regular Python object.
"""
return rdf_term.toPython()
class AnyValueFormat(ValueFormat):
"""Accepts any value."""
def check_value(self, value):
"""See :func:`oldman.validation.value_format.ValueFormat.check_value`."""
pass
class TypedValueFormat(ValueFormat):
"""Checks that the value is of a given type.
:param types: Supported Python types.
"""
def __init__(self, types):
self._types = types
def check_value(self, value):
"""See :func:`oldman.validation.value_format.ValueFormat.check_value`."""
if not isinstance(value, self._types):
raise ValueFormatError(u"%s is not a %s" % (value, self._types))
class IRIValueFormat(ValueFormat):
"""Checks that the value is an IRI."""
def check_value(self, value):
"""See :func:`oldman.validation.value_format.ValueFormat.check_value`."""
#TODO: to be implemented
pass
class PositiveTypedValueFormat(TypedValueFormat):
"""Checks that the value is a positive number."""
def check_value(self, value):
"""See :func:`oldman.validation.value_format.ValueFormat.check_value`."""
TypedValueFormat.check_value(self, value)
if value <= 0:
raise ValueFormatError(u"%s should be positive" % value)
class NegativeTypedValueFormat(TypedValueFormat):
"""Checks that the value is a negative number."""
def check_value(self, value):
"""See :func:`oldman.validation.value_format.ValueFormat.check_value`."""
TypedValueFormat.check_value(self, value)
if value >= 0:
raise ValueFormatError(u"%s should be negative" % value)
class NonPositiveTypedValueFormat(TypedValueFormat):
"""Checks that the value is a non-positive number."""
def check_value(self, value):
"""See :func:`oldman.validation.value_format.ValueFormat.check_value`."""
TypedValueFormat.check_value(self, value)
if value > 0:
raise ValueFormatError(u"%s should not be positive" % value)
class NonNegativeTypedValueFormat(TypedValueFormat):
"""Checks that the value is a non-negative number."""
def check_value(self, value):
"""See :func:`oldman.validation.value_format.ValueFormat.check_value`."""
TypedValueFormat.check_value(self, value)
if value < 0:
raise ValueFormatError(u"%s should not be negative" % value)
class HexBinaryFormat(TypedValueFormat):
"""Checks that the value is a hexadecimal string."""
def __init__(self):
TypedValueFormat.__init__(self, (str, unicode))
def check_value(self, value):
"""See :func:`oldman.validation.value_format.ValueFormat.check_value`."""
TypedValueFormat.check_value(self, value)
try:
int(value, 16)
except ValueError:
raise ValueFormatError(u"%s is not a hexadecimal value" % value)
def to_python(self, rdf_term):
"""Returns a hexstring."""
return unicode(rdf_term)
class EmailValueFormat(TypedValueFormat):
"""Checks that the value is an email address."""
def __init__(self):
TypedValueFormat.__init__(self, (str, unicode))
def check_value(self, value):
"""See :func:`oldman.validation.value_format.ValueFormat.check_value`."""
# Check that it is a string
TypedValueFormat.check_value(self, value)
if not validate_email(value):
raise ValueFormatError(u"%s is not a valid email (bad format)" % value) | bsd-3-clause | 7,466,264,308,116,201,000 | 30.927536 | 87 | 0.651305 | false |
mathcamp/pypicloud | pypicloud/access/remote.py | 1 | 3575 | """ Backend that defers to another server for access control """
from .base import IAccessBackend
class RemoteAccessBackend(IAccessBackend):
"""
This backend allows you to defer all user auth and permissions to a remote
server. It requires the ``requests`` package.
"""
def __init__(self, request=None, settings=None, server=None, auth=None, **kwargs):
super(RemoteAccessBackend, self).__init__(request, **kwargs)
self._settings = settings
self.server = server
self.auth = auth
@classmethod
def configure(cls, settings):
kwargs = super(RemoteAccessBackend, cls).configure(settings)
kwargs["settings"] = settings
kwargs["server"] = settings["auth.backend_server"]
auth = None
user = settings.get("auth.user")
if user is not None:
password = settings.get("auth.password")
auth = (user, password)
kwargs["auth"] = auth
return kwargs
def _req(self, uri, params=None):
""" Hit a server endpoint and return the json response """
try:
import requests
except ImportError: # pragma: no cover
raise ImportError(
"You must 'pip install requests' before using "
"the remote server access backend"
)
response = requests.get(self.server + uri, params=params, auth=self.auth)
response.raise_for_status()
return response.json()
def verify_user(self, username, password):
uri = self._settings.get("auth.uri.verify", "/verify")
params = {"username": username, "password": password}
return self._req(uri, params)
def _get_password_hash(self, username):
# We don't have to do anything here because we overrode 'verify_user'
pass
def groups(self, username=None):
uri = self._settings.get("auth.uri.groups", "/groups")
params = {}
if username is not None:
params["username"] = username
return self._req(uri, params)
def group_members(self, group):
uri = self._settings.get("auth.uri.group_members", "/group_members")
params = {"group": group}
return self._req(uri, params)
def is_admin(self, username):
uri = self._settings.get("auth.uri.admin", "/admin")
params = {"username": username}
return self._req(uri, params)
def group_permissions(self, package):
uri = self._settings.get("auth.uri.group_permissions", "/group_permissions")
params = {"package": package}
return self._req(uri, params)
def user_permissions(self, package):
uri = self._settings.get("auth.uri.user_permissions", "/user_permissions")
params = {"package": package}
return self._req(uri, params)
def user_package_permissions(self, username):
uri = self._settings.get(
"auth.uri.user_package_permissions", "/user_package_permissions"
)
params = {"username": username}
return self._req(uri, params)
def group_package_permissions(self, group):
uri = self._settings.get(
"auth.uri.group_package_permissions", "/group_package_permissions"
)
params = {"group": group}
return self._req(uri, params)
def user_data(self, username=None):
uri = self._settings.get("auth.uri.user_data", "/user_data")
params = None
if username is not None:
params = {"username": username}
return self._req(uri, params)
| mit | 267,732,761,425,824,900 | 34.75 | 86 | 0.604196 | false |
PanDAWMS/panda-bigmon-atlas | atlas/auth/shibsso/tests/test_middleware.py | 1 | 2267 | #
# Copyright 2010 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests procedures for ShibSSO middleware."""
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth import SESSION_KEY
from django.test import Client
from django.test import TestCase
from django.utils.http import urlquote
from shibsso.tests import urls as url_tests
class MiddlewareTest(TestCase):
urls = url_tests
fixtures = ['shibusers.json']
def test_user_stay_logged(self):
# If user is logged in.
# Session must remain.
environ = {settings.META_USERNAME: 'shib_super'}
client = Client()
request_url = settings.LOGIN_URL
client.get(request_url, ** environ)
request_url = '/login_required/'
response = client.get(request_url, ** environ)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'shib_super')
self.assertEqual(client.session[SESSION_KEY], 1)
def test_user_is_logged_out(self):
# If user is logged out.
# Session must be destroyed.
environ = {settings.META_USERNAME: 'shib_super'}
client = Client()
request_url = settings.LOGIN_URL
client.get(request_url, ** environ)
self.assertEqual(client.session[SESSION_KEY], 1)
request_url = '/login_required/'
response = client.get(request_url)
self.assertEqual(response['Location'],
'http://testserver%s?%s=%s' % \
(settings.LOGIN_URL, REDIRECT_FIELD_NAME,
urlquote(request_url)))
self.assertRaises(KeyError, client.session.__getitem__, SESSION_KEY) | apache-2.0 | 826,428,175,119,717,400 | 29.648649 | 76 | 0.659903 | false |
AstroTech/workshop-python | django/solution/untitled/ksiazkaadresowa/forms.py | 1 | 1084 | import re
from django.core.exceptions import ValidationError
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Person
class ContactUsForm(forms.Form):
sender = forms.EmailField()
subject = forms.CharField()
body = forms.CharField(widget=forms.Textarea)
class ContactCreateForm(forms.ModelForm):
class Meta:
model = Person
fields = ['first_name', 'last_name']
def regex(self, pattern, text):
if not re.match(pattern, text):
raise ValidationError(_(f'Invalid character'))
def clean_first_name(self):
first_name = self.cleaned_data['first_name']
first_name = first_name.title()
self.regex(
pattern=r'^\w\w[\s\w-]*\w$',
text=first_name,
)
return first_name
def clean_last_name(self):
last_name = self.cleaned_data['last_name']
last_name = last_name.title()
self.regex(
pattern=r'^\w\w[\s\w-]*\w$',
text=last_name,
)
return last_name
| mit | -2,301,036,317,023,659,800 | 23.636364 | 58 | 0.606089 | false |
CORE-GATECH-GROUP/serpent-tools | serpentTools/parsers/history.py | 1 | 5348 | """
Parser that reads history files
Files can be generated by adding ``set his 1`` to the input file
"""
from numpy import empty, asfortranarray, array
from serpentTools.utils import convertVariableName, deconvertVariableName
from serpentTools.parsers.base import BaseReader
from serpentTools.messages import warning, error
__all__ = ['HistoryReader']
class ScratchStorage(object):
"""Storage container for storing arrays of potentially unknown size"""
def __init__(self):
self.__internals = []
def __setitem__(self, key, value):
if isinstance(self.__internals, list):
self.__internals.append(value)
return
self.__internals[key] = value
def __getitem__(self, key):
return self.__internals.__getitem__(key)
def __len__(self):
return len(self.__internals)
def allocate(self, shape=None, **kwargs):
"""Allocate as an empty array or list."""
if shape is None:
self.__internals = []
return
self.__internals = empty(shape, **kwargs)
def __repr__(self):
return self.__internals.__repr__()
def __str__(self):
return self.__internals.__str__()
def __contains__(self, key):
return self.__internals.__contains__(key)
@property
def data(self):
"""Return the data stored in this container."""
if isinstance(self.__internals, list):
return array(self.__internals)
return self.__internals
class HistoryReader(BaseReader):
"""
Class responsible for reading history files
Arrays can be accessed through either the ``arrays``
dictionary, or with ``his[key]``, where ``key`` is the
name of an array in ``arrays``.
Parameters
----------
filePath : str
path pointing towards the file to be read
Attributes
----------
arrays : dict
Dictionary of all the arrays produced in the file.
These arrays do not have the index column that is presented
in the file.
numInactive : int
Number of inactive cycles used in this calculation
"""
def __init__(self, filePath):
BaseReader.__init__(self, filePath, 'history')
self.arrays = {}
self.numInactive = None
def __getitem__(self, key):
"""Return an item from :attr:`arrays`"""
return self.arrays[key]
def __contains__(self, key):
"""Return ``True`` if key is in :attr:`arrays`, otherwise ``False``"""
return key in self.arrays
def __len__(self):
"""Return number of entries in :attr:`arrays`."""
return len(self.arrays)
def __iter__(self):
"""Iterate over keys in :attr:`arrays`"""
return iter(self.arrays)
def _precheck(self):
with open(self.filePath) as check:
for line in check:
if line[:3] == 'HIS' or 'active cycles' in line:
return
warning('Unable to find indication of active cycles nor history data '
'from {}'.format(self.filePath))
def _postcheck(self):
if not self.arrays:
error("No history data found in {}".format(self.filePath))
if self.numInactive is None:
error('Unable to acertain the number of inactive cycles')
def get(self, key, default=None):
"""Return an array or default if not found"""
return self.arrays.get(key, default)
def _read(self):
curKey = None
scratch = ScratchStorage()
cycles = None
indx = 0
with open(self.filePath) as out:
for line in out:
if not line.strip():
continue
if '=' in line:
serpentN = line.split()[0].replace('HIS_', '')
curKey = convertVariableName(serpentN)
continue
if 'active' in line:
if self.numInactive is None:
self.numInactive = indx
continue
if line[0] == ']':
data = asfortranarray(scratch.data)
self.arrays[curKey] = data
cycles = data.shape[0]
indx = 0
continue
values = line.split()[1:] # skip indexing term
indx += 1
values = [float(xx) for xx in values]
if cycles and indx == 1:
scratch.allocate((cycles, len(values)))
scratch[indx - 1] = values
def _gather_matlab(self, reconvert):
out = {}
if reconvert:
converter = self.ioReconvertName
else:
converter = self.ioConvertName
for key, value in self.arrays.items():
out[converter(key)] = value
return out
def items(self):
"""Iterate over ``(key, value)`` pairs from :attr:`arrays`"""
return self.arrays.items()
@staticmethod
def ioConvertName(name):
"""Convert a variable name to ``camelCase`` for exporting."""
return 'his' + name[0].upper() + name[1:]
@staticmethod
def ioReconvertName(name):
"""Reconvert a variable name to ``SERPENT_STYLE`` for exporting"""
return "HIS_" + deconvertVariableName(name)
| mit | -5,972,619,038,542,608,000 | 30.093023 | 78 | 0.556283 | false |
drewhutchison/socketIO-client | socketIO_client/__init__.py | 1 | 18081 | import logging
import json
import requests
import time
from collections import namedtuple
try:
from urllib.parse import urlparse as parse_url
except ImportError:
from urlparse import urlparse as parse_url
from .exceptions import (
SocketIOError, ConnectionError, TimeoutError, PacketError)
from .symmetries import _get_text
from .transports import (
_get_response, TRANSPORTS,
_WebsocketTransport, _XHR_PollingTransport, _JSONP_PollingTransport)
__version__ = '0.5.4'
_SocketIOSession = namedtuple('_SocketIOSession', [
'id',
'heartbeat_timeout',
'server_supported_transports',
])
_log = logging.getLogger(__name__)
PROTOCOL_VERSION = 1
RETRY_INTERVAL_IN_SECONDS = 1
class BaseNamespace(object):
'Define client behavior'
def __init__(self, _transport, path):
self._transport = _transport
self.path = path
self._was_connected = False
self._callback_by_event = {}
self.initialize()
def initialize(self):
'Initialize custom variables here; you can override this method'
pass
def message(self, data='', callback=None):
self._transport.message(self.path, data, callback)
def emit(self, event, *args, **kw):
callback, args = find_callback(args, kw)
self._transport.emit(self.path, event, args, callback)
def disconnect(self):
self._transport.disconnect(self.path)
def on(self, event, callback):
'Define a callback to handle a custom event emitted by the server'
self._callback_by_event[event] = callback
def on_connect(self):
'Called after server connects; you can override this method'
pass
def on_disconnect(self):
'Called after server disconnects; you can override this method'
pass
def on_heartbeat(self):
'Called after server sends a heartbeat; you can override this method'
pass
def on_message(self, data):
'Called after server sends a message; you can override this method'
pass
def on_event(self, event, *args):
"""
Called after server sends an event; you can override this method.
Called only if a custom event handler does not exist,
such as one defined by namespace.on('my_event', my_function).
"""
callback, args = find_callback(args)
if callback:
callback(*args)
def on_error(self, reason, advice):
'Called after server sends an error; you can override this method'
pass
def on_noop(self):
'Called after server sends a noop; you can override this method'
pass
def on_open(self, *args):
pass
def on_close(self, *args):
pass
def on_retry(self, *args):
pass
def on_reconnect(self, *args):
pass
def _find_event_callback(self, event):
# Check callbacks defined by on()
try:
return self._callback_by_event[event]
except KeyError:
pass
# Convert connect to reconnect if we have seen connect already
if event == 'connect':
if not self._was_connected:
self._was_connected = True
else:
event = 'reconnect'
# Check callbacks defined explicitly or use on_event()
return getattr(
self,
'on_' + event.replace(' ', '_'),
lambda *args: self.on_event(event, *args))
class LoggingNamespace(BaseNamespace):
def _log(self, level, msg, *attrs):
_log.log(level, '%s: %s' % (self._transport._url, msg), *attrs)
def on_connect(self):
self._log(logging.DEBUG, '%s [connect]', self.path)
super(LoggingNamespace, self).on_connect()
def on_disconnect(self):
self._log(logging.DEBUG, '%s [disconnect]', self.path)
super(LoggingNamespace, self).on_disconnect()
def on_heartbeat(self):
self._log(logging.DEBUG, '%s [heartbeat]', self.path)
super(LoggingNamespace, self).on_heartbeat()
def on_message(self, data):
self._log(logging.INFO, '%s [message] %s', self.path, data)
super(LoggingNamespace, self).on_message(data)
def on_event(self, event, *args):
callback, args = find_callback(args)
arguments = [repr(_) for _ in args]
if callback:
arguments.append('callback(*args)')
self._log(logging.INFO, '%s [event] %s(%s)', self.path, event,
', '.join(arguments))
super(LoggingNamespace, self).on_event(event, *args)
def on_error(self, reason, advice):
self._log(logging.INFO, '%s [error] %s', self.path, advice)
super(LoggingNamespace, self).on_error(reason, advice)
def on_noop(self):
self._log(logging.INFO, '%s [noop]', self.path)
super(LoggingNamespace, self).on_noop()
def on_open(self, *args):
self._log(logging.INFO, '%s [open] %s', self.path, args)
super(LoggingNamespace, self).on_open(*args)
def on_close(self, *args):
self._log(logging.INFO, '%s [close] %s', self.path, args)
super(LoggingNamespace, self).on_close(*args)
def on_retry(self, *args):
self._log(logging.INFO, '%s [retry] %s', self.path, args)
super(LoggingNamespace, self).on_retry(*args)
def on_reconnect(self, *args):
self._log(logging.INFO, '%s [reconnect] %s', self.path, args)
super(LoggingNamespace, self).on_reconnect(*args)
class SocketIO(object):
"""Create a socket.io client that connects to a socket.io server
at the specified host and port.
- Define the behavior of the client by specifying a custom Namespace.
- Prefix host with https:// to use SSL.
- Set wait_for_connection=True to block until we have a connection.
- Specify desired transports=['websocket', 'xhr-polling'].
- Pass query params, headers, cookies, proxies as keyword arguments.
SocketIO('localhost', 8000,
params={'q': 'qqq'},
headers={'Authorization': 'Basic ' + b64encode('username:password')},
cookies={'a': 'aaa'},
proxies={'https': 'https://proxy.example.com:8080'})
"""
def __init__(
self, host, port=None, Namespace=None,
wait_for_connection=True, transports=TRANSPORTS,
resource='socket.io', **kw):
self.is_secure, self._base_url = _parse_host(host, port, resource)
self.wait_for_connection = wait_for_connection
self._namespace_by_path = {}
self._client_supported_transports = transports
self._kw = kw
if Namespace:
self.define(Namespace)
def _log(self, level, msg, *attrs):
_log.log(level, '%s: %s' % (self._base_url, msg), *attrs)
def __enter__(self):
return self
def __exit__(self, *exception_pack):
self.disconnect()
def __del__(self):
self.disconnect()
def define(self, Namespace, path=''):
if path:
self._transport.connect(path)
namespace = Namespace(self._transport, path)
self._namespace_by_path[path] = namespace
return namespace
def on(self, event, callback, path=''):
if path not in self._namespace_by_path:
self.define(BaseNamespace, path)
return self.get_namespace(path).on(event, callback)
def message(self, data='', callback=None, path=''):
self._transport.message(path, data, callback)
def emit(self, event, *args, **kw):
path = kw.get('path', '')
callback, args = find_callback(args, kw)
self._transport.emit(path, event, args, callback)
def wait(self, seconds=None, for_callbacks=False):
"""Wait in a loop and process events as defined in the namespaces.
- Omit seconds, i.e. call wait() without arguments, to wait forever.
"""
warning_screen = _yield_warning_screen(seconds)
timeout = min(self._heartbeat_interval, seconds)
for elapsed_time in warning_screen:
if self._stop_waiting(for_callbacks):
break
try:
try:
self._process_events(timeout)
except TimeoutError:
pass
next(self._heartbeat_pacemaker)
except ConnectionError as e:
try:
warning = Exception('[connection error] %s' % e)
warning_screen.throw(warning)
except StopIteration:
self._log(logging.WARNING, warning)
try:
namespace = self._namespace_by_path['']
namespace.on_disconnect()
except KeyError:
pass
def _process_events(self, timeout=None):
for packet in self._transport.recv_packet(timeout):
try:
self._process_packet(packet)
except PacketError as e:
self._log(logging.WARNING, '[packet error] %s', e)
def _process_packet(self, packet):
code, packet_id, path, data = packet
namespace = self.get_namespace(path)
delegate = self._get_delegate(code)
delegate(packet, namespace._find_event_callback)
def _stop_waiting(self, for_callbacks):
# Use __transport to make sure that we do not reconnect inadvertently
if for_callbacks and not self.__transport.has_ack_callback:
return True
if self.__transport._wants_to_disconnect:
return True
return False
def wait_for_callbacks(self, seconds=None):
self.wait(seconds, for_callbacks=True)
def disconnect(self, path=''):
try:
self._transport.disconnect(path)
except ReferenceError:
pass
try:
namespace = self._namespace_by_path[path]
namespace.on_disconnect()
del self._namespace_by_path[path]
except KeyError:
pass
@property
def connected(self):
try:
transport = self.__transport
except AttributeError:
return False
else:
return transport.connected
@property
def _transport(self):
try:
if self.connected:
return self.__transport
except AttributeError:
pass
socketIO_session = self._get_socketIO_session()
supported_transports = self._get_supported_transports(socketIO_session)
self._heartbeat_pacemaker = self._make_heartbeat_pacemaker(
heartbeat_timeout=socketIO_session.heartbeat_timeout)
next(self._heartbeat_pacemaker)
warning_screen = _yield_warning_screen(seconds=None)
for elapsed_time in warning_screen:
try:
self._transport_name = supported_transports.pop(0)
except IndexError:
raise ConnectionError('Could not negotiate a transport')
try:
self.__transport = self._get_transport(
socketIO_session, self._transport_name)
break
except ConnectionError:
pass
for path, namespace in self._namespace_by_path.items():
namespace._transport = self.__transport
if path:
self.__transport.connect(path)
return self.__transport
def _get_socketIO_session(self):
warning_screen = _yield_warning_screen(seconds=None)
for elapsed_time in warning_screen:
try:
return _get_socketIO_session(
self.is_secure, self._base_url, **self._kw)
except ConnectionError as e:
if not self.wait_for_connection:
raise
warning = Exception('[waiting for connection] %s' % e)
try:
warning_screen.throw(warning)
except StopIteration:
self._log(logging.WARNING, warning)
def _get_supported_transports(self, session):
self._log(
logging.DEBUG, '[transports available] %s',
' '.join(session.server_supported_transports))
supported_transports = [
x for x in self._client_supported_transports if
x in session.server_supported_transports]
if not supported_transports:
raise SocketIOError(' '.join([
'could not negotiate a transport:',
'client supports %s but' % ', '.join(
self._client_supported_transports),
'server supports %s' % ', '.join(
session.server_supported_transports),
]))
return supported_transports
def _get_transport(self, session, transport_name):
self._log(logging.DEBUG, '[transport chosen] %s', transport_name)
return {
'websocket': _WebsocketTransport,
'xhr-polling': _XHR_PollingTransport,
'jsonp-polling': _JSONP_PollingTransport,
}[transport_name](session, self.is_secure, self._base_url, **self._kw)
def _make_heartbeat_pacemaker(self, heartbeat_timeout):
self._heartbeat_interval = heartbeat_timeout / 2
heartbeat_time = time.time()
while True:
yield
if time.time() - heartbeat_time > self._heartbeat_interval:
heartbeat_time = time.time()
self._transport.send_heartbeat()
def get_namespace(self, path=''):
try:
return self._namespace_by_path[path]
except KeyError:
raise PacketError('unhandled namespace path (%s)' % path)
def _get_delegate(self, code):
try:
return {
'0': self._on_disconnect,
'1': self._on_connect,
'2': self._on_heartbeat,
'3': self._on_message,
'4': self._on_json,
'5': self._on_event,
'6': self._on_ack,
'7': self._on_error,
'8': self._on_noop,
}[code]
except KeyError:
raise PacketError('unexpected code (%s)' % code)
def _on_disconnect(self, packet, find_event_callback):
find_event_callback('disconnect')()
def _on_connect(self, packet, find_event_callback):
find_event_callback('connect')()
def _on_heartbeat(self, packet, find_event_callback):
find_event_callback('heartbeat')()
def _on_message(self, packet, find_event_callback):
code, packet_id, path, data = packet
args = [data]
if packet_id:
args.append(self._prepare_to_send_ack(path, packet_id))
find_event_callback('message')(*args)
def _on_json(self, packet, find_event_callback):
code, packet_id, path, data = packet
args = [json.loads(data)]
if packet_id:
args.append(self._prepare_to_send_ack(path, packet_id))
find_event_callback('message')(*args)
def _on_event(self, packet, find_event_callback):
code, packet_id, path, data = packet
value_by_name = json.loads(data)
event = value_by_name['name']
args = value_by_name.get('args', [])
if packet_id:
args.append(self._prepare_to_send_ack(path, packet_id))
find_event_callback(event)(*args)
def _on_ack(self, packet, find_event_callback):
code, packet_id, path, data = packet
data_parts = data.split('+', 1)
packet_id = data_parts[0]
try:
ack_callback = self._transport.get_ack_callback(packet_id)
except KeyError:
return
args = json.loads(data_parts[1]) if len(data_parts) > 1 else []
ack_callback(*args)
def _on_error(self, packet, find_event_callback):
code, packet_id, path, data = packet
reason, advice = data.split('+', 1)
find_event_callback('error')(reason, advice)
def _on_noop(self, packet, find_event_callback):
find_event_callback('noop')()
def _prepare_to_send_ack(self, path, packet_id):
'Return function that acknowledges the server'
return lambda *args: self._transport.ack(path, packet_id, *args)
def find_callback(args, kw=None):
'Return callback whether passed as a last argument or as a keyword'
if args and callable(args[-1]):
return args[-1], args[:-1]
try:
return kw['callback'], args
except (KeyError, TypeError):
return None, args
def _parse_host(host, port, resource):
if not host.startswith('http'):
host = 'http://' + host
url_pack = parse_url(host)
is_secure = url_pack.scheme == 'https'
port = port or url_pack.port or (443 if is_secure else 80)
base_url = '%s:%d%s/%s/%s' % (
url_pack.hostname, port, url_pack.path, resource, PROTOCOL_VERSION)
return is_secure, base_url
def _yield_warning_screen(seconds=None):
last_warning = None
for elapsed_time in _yield_elapsed_time(seconds):
try:
yield elapsed_time
except Exception as warning:
warning = str(warning)
if last_warning != warning:
last_warning = warning
_log.warn(warning)
time.sleep(RETRY_INTERVAL_IN_SECONDS)
def _yield_elapsed_time(seconds=None):
start_time = time.time()
if seconds is None:
while True:
yield time.time() - start_time
while time.time() - start_time < seconds:
yield time.time() - start_time
def _get_socketIO_session(is_secure, base_url, **kw):
server_url = '%s://%s/' % ('https' if is_secure else 'http', base_url)
try:
response = _get_response(requests.get, server_url, **kw)
except TimeoutError as e:
raise ConnectionError(e)
response_parts = _get_text(response).split(':')
return _SocketIOSession(
id=response_parts[0],
heartbeat_timeout=int(response_parts[1]),
server_supported_transports=response_parts[3].split(','))
| mit | 4,741,928,711,429,100,000 | 33.571702 | 79 | 0.586417 | false |
avinetworks/avi-heat | avi/heat/resources/server_autoscale.py | 1 | 11348 | # GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from options import *
from common import *
class AutoScaleMesosSettings(object):
# all schemas
force_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Apply scaleout even when there are deployments inprogress. (Default: True)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'force',
)
# mapping of properties to their schemas
properties_schema = {
'force': force_schema,
}
class AutoScaleOpenStackSettings(object):
# all schemas
heat_scale_up_url_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Avi Controller will use this URL to scale upthe pool. Cloud connector will automatically update the membership. This is an alpha feature."),
required=False,
update_allowed=True,
)
heat_scale_down_url_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) Avi Controller will use this URL to scale downthe pool. Cloud connector will automatically update the membership. This is an alpha feature."),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'heat_scale_up_url',
'heat_scale_down_url',
)
# mapping of properties to their schemas
properties_schema = {
'heat_scale_up_url': heat_scale_up_url_schema,
'heat_scale_down_url': heat_scale_down_url_schema,
}
class ServerAutoScalePolicy(AviResource):
resource_name = "serverautoscalepolicy"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
intelligent_autoscale_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Use Avi intelligent autoscale algorithm where autoscale is performed by comparing load on the pool against estimated capacity of all the servers. (Default: False)"),
required=False,
update_allowed=True,
)
intelligent_scaleout_margin_schema = properties.Schema(
properties.Schema.NUMBER,
_("Minimum extra capacity as percentage of load used by the intelligent scheme. Scaleout is triggered when available capacity is less than this margin. (Default: 20)"),
required=False,
update_allowed=True,
)
intelligent_scalein_margin_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum extra capacity as percentage of load used by the intelligent scheme. Scalein is triggered when available capacity is more than this margin. (Default: 40)"),
required=False,
update_allowed=True,
)
min_size_schema = properties.Schema(
properties.Schema.NUMBER,
_("No scale-in happens once number of operationally up servers reach min_servers."),
required=False,
update_allowed=True,
)
max_size_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum number of servers after scaleout."),
required=False,
update_allowed=True,
)
max_scaleout_adjustment_step_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum number of servers to scaleout simultaneously. The actual number of servers to scaleout is chosen such that target number of servers is always less than or equal to the max_size. (Default: 1)"),
required=False,
update_allowed=True,
)
max_scalein_adjustment_step_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum number of servers to scalein simultaneously. The actual number of servers to scalein is chosen such that target number of servers is always more than or equal to the min_size. (Default: 1)"),
required=False,
update_allowed=True,
)
scaleout_cooldown_schema = properties.Schema(
properties.Schema.NUMBER,
_("Cooldown period during which no new scaleout is triggered to allow previous scaleout to successfully complete. (Units: SEC) (Default: 300)"),
required=False,
update_allowed=True,
)
scalein_cooldown_schema = properties.Schema(
properties.Schema.NUMBER,
_("Cooldown period during which no new scalein is triggered to allow previous scalein to successfully complete. (Units: SEC) (Default: 300)"),
required=False,
update_allowed=True,
)
scaleout_alertconfig_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_("Trigger scaleout when alerts due to any of these Alert configurations are raised."),
required=True,
update_allowed=False,
)
scaleout_alertconfig_uuids_schema = properties.Schema(
properties.Schema.LIST,
_("Trigger scaleout when alerts due to any of these Alert configurations are raised. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=scaleout_alertconfig_uuids_item_schema,
required=False,
update_allowed=True,
)
scalein_alertconfig_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_("Trigger scalein when alerts due to any of these Alert configurations are raised."),
required=True,
update_allowed=False,
)
scalein_alertconfig_uuids_schema = properties.Schema(
properties.Schema.LIST,
_("Trigger scalein when alerts due to any of these Alert configurations are raised. You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=scalein_alertconfig_uuids_item_schema,
required=False,
update_allowed=True,
)
use_predicted_load_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Use predicted load rather than current load. (Default: False)"),
required=False,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'intelligent_autoscale',
'intelligent_scaleout_margin',
'intelligent_scalein_margin',
'min_size',
'max_size',
'max_scaleout_adjustment_step',
'max_scalein_adjustment_step',
'scaleout_cooldown',
'scalein_cooldown',
'scaleout_alertconfig_uuids',
'scalein_alertconfig_uuids',
'use_predicted_load',
'description',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'intelligent_autoscale': intelligent_autoscale_schema,
'intelligent_scaleout_margin': intelligent_scaleout_margin_schema,
'intelligent_scalein_margin': intelligent_scalein_margin_schema,
'min_size': min_size_schema,
'max_size': max_size_schema,
'max_scaleout_adjustment_step': max_scaleout_adjustment_step_schema,
'max_scalein_adjustment_step': max_scalein_adjustment_step_schema,
'scaleout_cooldown': scaleout_cooldown_schema,
'scalein_cooldown': scalein_cooldown_schema,
'scaleout_alertconfig_uuids': scaleout_alertconfig_uuids_schema,
'scalein_alertconfig_uuids': scalein_alertconfig_uuids_schema,
'use_predicted_load': use_predicted_load_schema,
'description': description_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'scaleout_alertconfig_uuids': 'alertconfig',
'scalein_alertconfig_uuids': 'alertconfig',
}
class AutoScaleLaunchConfig(AviResource):
resource_name = "autoscalelaunchconfig"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
image_id_schema = properties.Schema(
properties.Schema.STRING,
_("Unique ID of the Amazon Machine Image (AMI) or OpenStack VM ID."),
required=False,
update_allowed=True,
)
openstack_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=AutoScaleOpenStackSettings.properties_schema,
required=False,
update_allowed=True,
)
mesos_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=AutoScaleMesosSettings.properties_schema,
required=False,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
use_external_asg_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.3) If set to True, ServerAutoscalePolicy will use the autoscaling group (external_autoscaling_groups) from Pool to perform scale up and scale down. Pool should have single autoscaling group configured. (Default: True)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'image_id',
'openstack',
'mesos',
'description',
'use_external_asg',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'image_id': image_id_schema,
'openstack': openstack_schema,
'mesos': mesos_schema,
'description': description_schema,
'use_external_asg': use_external_asg_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'openstack': getattr(AutoScaleOpenStackSettings, 'field_references', {}),
'mesos': getattr(AutoScaleMesosSettings, 'field_references', {}),
}
unique_keys = {
'openstack': getattr(AutoScaleOpenStackSettings, 'unique_keys', {}),
'mesos': getattr(AutoScaleMesosSettings, 'unique_keys', {}),
}
def resource_mapping():
return {
'Avi::LBaaS::ServerAutoScalePolicy': ServerAutoScalePolicy,
'Avi::LBaaS::AutoScaleLaunchConfig': AutoScaleLaunchConfig,
}
| apache-2.0 | -8,957,598,642,244,916,000 | 35.964169 | 252 | 0.656944 | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/__init__.py | 1 | 13938 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import lsa_type
class lsa_types(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for a list of LSA types that are
in the LSDB for the specified area
"""
__slots__ = ("_path_helper", "_extmethods", "__lsa_type")
_yang_name = "lsa-types"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__lsa_type = YANGDynClass(
base=YANGListType(
"type",
lsa_type.lsa_type,
yang_name="lsa-type",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="lsa-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
]
def _get_lsa_type(self):
"""
Getter method for lsa_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type (list)
YANG Description: List of LSA types in the LSDB for the specified
area
"""
return self.__lsa_type
def _set_lsa_type(self, v, load=False):
"""
Setter method for lsa_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsa_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsa_type() directly.
YANG Description: List of LSA types in the LSDB for the specified
area
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"type",
lsa_type.lsa_type,
yang_name="lsa-type",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="lsa-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """lsa_type must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("type",lsa_type.lsa_type, yang_name="lsa-type", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="lsa-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__lsa_type = t
if hasattr(self, "_set"):
self._set()
def _unset_lsa_type(self):
self.__lsa_type = YANGDynClass(
base=YANGListType(
"type",
lsa_type.lsa_type,
yang_name="lsa-type",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="lsa-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
lsa_type = __builtin__.property(_get_lsa_type)
_pyangbind_elements = OrderedDict([("lsa_type", lsa_type)])
from . import lsa_type
class lsa_types(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Enclosing container for a list of LSA types that are
in the LSDB for the specified area
"""
__slots__ = ("_path_helper", "_extmethods", "__lsa_type")
_yang_name = "lsa-types"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__lsa_type = YANGDynClass(
base=YANGListType(
"type",
lsa_type.lsa_type,
yang_name="lsa-type",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="lsa-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
]
def _get_lsa_type(self):
"""
Getter method for lsa_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type (list)
YANG Description: List of LSA types in the LSDB for the specified
area
"""
return self.__lsa_type
def _set_lsa_type(self, v, load=False):
"""
Setter method for lsa_type, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsa_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsa_type() directly.
YANG Description: List of LSA types in the LSDB for the specified
area
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
"type",
lsa_type.lsa_type,
yang_name="lsa-type",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="lsa-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """lsa_type must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType("type",lsa_type.lsa_type, yang_name="lsa-type", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions=None), is_container='list', yang_name="lsa-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__lsa_type = t
if hasattr(self, "_set"):
self._set()
def _unset_lsa_type(self):
self.__lsa_type = YANGDynClass(
base=YANGListType(
"type",
lsa_type.lsa_type,
yang_name="lsa-type",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="type",
extensions=None,
),
is_container="list",
yang_name="lsa-type",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
lsa_type = __builtin__.property(_get_lsa_type)
_pyangbind_elements = OrderedDict([("lsa_type", lsa_type)])
| apache-2.0 | 6,863,335,869,656,353,000 | 35.970822 | 537 | 0.544267 | false |
iw3hxn/LibrERP | sale_order_requirement/wizard/order_requirement_line_add.py | 1 | 6310 | import datetime
import netsvc
from openerp.osv import orm, fields
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from tools.translate import _
class OrderRequirementLineAdd(orm.TransientModel):
_name = "order.requirement.line.add"
_description = "Add sale Order to line"
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(OrderRequirementLineAdd, self).default_get(cr, uid, fields, context=context)
if context.get('active_id'):
line = self.pool['order.requirement.line'].browse(cr, uid, context['active_id'], context=context)
if 'order_id' in fields:
order_id = line.order_requirement_id.sale_order_id
res.update(order_id=order_id.id)
return res
def _get_order_line(self, cr, uid, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
sale_order_line_obj = self.pool['sale.order.line']
sale_order_line_ids = []
if context.get('order_id'):
line = self.pool['order.requirement'].browse(cr, uid, context['order_id'], context=context)
order_id = line.sale_order_id.id
sale_order_line_ids = sale_order_line_obj.search(cr, uid, [('order_id', '=', order_id)], context=context)
res = sale_order_line_obj.name_get(cr, uid, sale_order_line_ids, context=context)
return res
_columns = {
'order_id': fields.many2one('sale.order', 'Order', required=True, select=True),
'order_line': fields.selection(_get_order_line, 'Order Line', required=False),
# 'order_line_id': fields.many2one('sale.order.line', 'Production Lots'),
# 'order_line_ids': fields.one2many('stock.move.split.lines', 'wizard_id', 'Production Lots'),
}
def link(self, cr, uid, ids, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
res = self._link(cr, uid, ids, context.get('active_ids'), context=context)
return {'type': 'ir.actions.act_window_close'}
def _link(self, cr, uid, ids, line_ids, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
for wizard in self.browse(cr, uid, ids, context):
if wizard.order_line:
line_id = int(wizard.order_line)
order_requirement_line = self.pool['order.requirement.line'].browse(cr, uid, line_ids[0], context)
if not order_requirement_line.product_id:
raise orm.except_orm(_(u'Error!'), _(u"Missing Product"))
order_requirement_line.write({'sale_order_line_id': line_id})
order_requirement = self.pool['order.requirement'].browse(cr, uid, context['order_id'], context)
# create picking
pick_type = 'internal'
ir_sequence_obj = self.pool['ir.sequence']
stock_picking_obj = self.pool['stock.picking']
pick_name = ir_sequence_obj.get(cr, uid, 'stock.picking.' + pick_type)
order = order_requirement.sale_order_id
date_planned = datetime.date.today().strftime(DEFAULT_SERVER_DATE_FORMAT)
picking_vals = {
'name': pick_name,
'origin': _('Order Requirement') + ' ' + order.name,
'date': date_planned,
'type': pick_type,
'state': 'auto',
'move_type': 'one',
'sale_id': order.id,
'address_id': order.partner_shipping_id.id,
'note': order.note,
'invoice_state': 'none',
'company_id': order.company_id.id,
'auto_picking': True,
}
if order.project_project:
project = order.project_project
picking_vals.update({
'project_id': project.id,
'account_id': project.analytic_account_id.id,
'sale_project': project.id
})
picking_id = stock_picking_obj.create(cr, uid, picking_vals, context)
location_id = order.shop_id.warehouse_id.lot_stock_id.id
output_id = order.shop_id.warehouse_id.lot_output_id.id
price_unit = 0.0
if order_requirement_line.qty != 0.0:
price_unit = order_requirement_line.product_id.cost_price
move_vals = {
'name': order_requirement_line.product_id.name[:250],
'picking_id': picking_id,
'product_id': order_requirement_line.product_id.id,
'date': date_planned,
'date_expected': date_planned,
'product_qty': order_requirement_line.qty,
'product_uom': order_requirement_line.product_id.uom_id.id,
# 'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,
# 'product_uos': (line.product_uos and line.product_uos.id) \
# or line.product_uom.id,
# 'product_packaging': line.product_packaging.id,
# 'address_id': order.partner_shipping_id.id,
'location_id': location_id,
'location_dest_id': output_id,
'sale_line_id': line_id,
'tracking_id': False,
'state': 'draft',
# 'state': 'waiting',
'company_id': order.company_id.id,
'price_unit': price_unit
}
move_id = self.pool['stock.move'].create(cr, uid, move_vals, context)
wf_service = netsvc.LocalService("workflow")
wf_service.trg_validate(uid, 'stock.picking', picking_id, 'button_confirm', cr)
stock_picking_obj.force_assign(cr, uid, [picking_id], context)
ctx = context.copy()
ctx['force_commit'] = True
stock_picking_obj._commit_cost(cr, uid, [picking_id], ctx)
return True
| agpl-3.0 | 6,181,661,718,559,501,000 | 46.443609 | 117 | 0.538669 | false |
prkumar/uplink | tests/integration/test_extend.py | 1 | 2479 | # Third-party imports
import pytest
# Local imports
import uplink
# Constants
BASE_URL = "https://api.github.com"
class GitHubError(Exception):
pass
@uplink.response_handler
def github_error(response):
if "errors" in response.json():
raise GitHubError()
return response
@uplink.timeout(10)
class GitHubService(uplink.Consumer):
@github_error
@uplink.json
@uplink.post("graphql", args=(uplink.Body,))
def graphql(self, **body):
pass
@uplink.returns.json(member=("data", "repository"))
@uplink.args(body=uplink.Body)
@graphql
def get_repository(self, **body):
pass
@uplink.returns.json(member=("data", "repository"))
@graphql.extend("graphql2", args=(uplink.Body,))
def get_repository2(self, **body):
pass
def test_get_repository(mock_client, mock_response):
data = {
"query": """\
query {
repository(owner: "prkumar", name: "uplink") {
nameWithOwner
}
}"""
}
result = {"data": {"repository": {"nameWithOwner": "prkumar/uplink"}}}
mock_response.with_json(result)
mock_client.with_response(mock_response)
github = GitHubService(base_url=BASE_URL, client=mock_client)
response = github.get_repository(**data)
request = mock_client.history[0]
assert request.method == "POST"
assert request.base_url == BASE_URL
assert request.endpoint == "/graphql"
assert request.timeout == 10
assert request.json == data
assert response == result["data"]["repository"]
def test_get_repository2_failure(mock_client, mock_response):
data = {
"query": """\
query {
repository(owner: "prkumar", name: "uplink") {
nameWithOwner
}
}"""
}
result = {
"data": {"repository": None},
"errors": [
{
"type": "NOT_FOUND",
"path": ["repository"],
"locations": [{"line": 7, "column": 3}],
"message": "Could not resolve to a User with the username 'prkussmar'.",
}
],
}
mock_response.with_json(result)
mock_client.with_response(mock_response)
github = GitHubService(base_url=BASE_URL, client=mock_client)
with pytest.raises(GitHubError):
github.get_repository2(**data)
request = mock_client.history[0]
assert request.method == "POST"
assert request.base_url == BASE_URL
assert request.endpoint == "/graphql2"
assert request.timeout == 10
| mit | -3,548,034,276,720,475,000 | 25.37234 | 88 | 0.61315 | false |
baylee/django | django/db/models/signals.py | 1 | 2515 | import warnings
from functools import partial
from django.db.models.utils import make_model_tuple
from django.dispatch import Signal
from django.utils.deprecation import RemovedInDjango20Warning
class_prepared = Signal(providing_args=["class"])
class ModelSignal(Signal):
"""
Signal subclass that allows the sender to be lazily specified as a string
of the `app_label.ModelName` form.
"""
def _lazy_method(self, method, apps, receiver, sender, **kwargs):
# This partial takes a single optional argument named "sender".
partial_method = partial(method, receiver, **kwargs)
# import models here to avoid a circular import
from django.db import models
if isinstance(sender, models.Model) or sender is None:
# Skip lazy_model_operation to get a return value for disconnect()
return partial_method(sender)
apps = apps or models.base.Options.default_apps
apps.lazy_model_operation(partial_method, make_model_tuple(sender))
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None, apps=None):
self._lazy_method(super(ModelSignal, self).connect, apps, receiver, sender, dispatch_uid=dispatch_uid)
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None, apps=None):
if weak is not None:
warnings.warn("Passing `weak` to disconnect has no effect.", RemovedInDjango20Warning, stacklevel=2)
return self._lazy_method(
super(ModelSignal, self).disconnect, apps, receiver, sender, dispatch_uid=dispatch_uid
)
pre_init = ModelSignal(providing_args=["instance", "args", "kwargs"], use_caching=True)
post_init = ModelSignal(providing_args=["instance"], use_caching=True)
pre_save = ModelSignal(providing_args=["instance", "raw", "using", "update_fields"],
use_caching=True)
post_save = ModelSignal(providing_args=["instance", "raw", "created", "using", "update_fields"], use_caching=True)
pre_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
post_delete = ModelSignal(providing_args=["instance", "using"], use_caching=True)
m2m_changed = ModelSignal(
providing_args=["action", "instance", "reverse", "model", "pk_set", "using"],
use_caching=True,
)
pre_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using", "apps", "plan"])
post_migrate = Signal(providing_args=["app_config", "verbosity", "interactive", "using", "apps", "plan"])
| bsd-3-clause | 8,754,010,135,598,167,000 | 44.727273 | 114 | 0.694235 | false |
keiserlab/e3fp-paper | project/parameter_optimization/1_chembl20_opt/config/wrapper.py | 1 | 10710 | """Cross-validation wrapper to be run with Spearmint.
Author: Seth Axen
E-mail: [email protected]
"""
import os
import glob
import re
import logging
import csv
from python_utilities.scripting import setup_logging
from python_utilities.io_tools import touch_dir, smart_open
from python_utilities.parallel import Parallelizer, make_data_iterator
from e3fp.config.params import update_params, write_params
from e3fp.conformer.util import smiles_to_dict, MolItemName
from e3fp_paper.sea_utils.util import molecules_to_lists_dicts, \
lists_dicts_to_molecules, \
targets_to_dict, dict_to_targets, \
filter_targets_by_molecules, \
targets_to_mol_lists_targets, \
fprint_params_to_fptype
from e3fp_paper.pipeline import native_tuples_from_sdf
from e3fp_paper.crossvalidation.run import KFoldCrossValidator, \
ByTargetMoleculeSplitter
PROJECT_DIR = os.environ("E3FP_PROJECT")
MAIN_DIR = os.path.join(PROJECT_DIR, "parameter_optimization")
CV_DIR = os.path.join(MAIN_DIR, "1_chembl20_opt")
MAIN_CONF_DIR = os.path.join(PROJECT_DIR, "conformer_generation")
SMILES_FILE = os.path.join(PROJECT_DIR, "data",
"chembl20_proto_smiles.smi.bz2")
MOL_TARGETS_FILE = os.path.join(PROJECT_DIR, "data",
"chembl20_binding_targets.csv.bz2")
TARGETS_BASENAME = "targets"
MOL_BASENAME = "molecules"
CSV_EXT = ".csv.bz2"
LOG_FILE = "log.txt"
NUM_PROC = None
AUC_TYPE = 'sum'
AFFINITY = 10000
STEREO = True
CV_K = 5
MIN_MOLS_PER_TARGET = 50
REDUCE_NEGATIVES = True
def unformat_params(params):
"""Format params as needed for Spearmint PB file."""
return {k: [v, ] for k, v in params.iteritems()}
def format_params(params):
"""Clean up params dict."""
new_params = {k: v[0] for k, v in params.iteritems()}
new_params['level'] = int(new_params['level'])
new_params['bits'] = int(new_params['bits'])
new_params['first'] = int(new_params['first'])
return new_params
def params_to_str(params, with_first=True):
"""Create a descriptive string built from params."""
params_string = "e3fp_{!s}_rad{:.4g}_level{:d}_fold{:d}".format(
params['conformers'], params['radius_multiplier'], params['level'],
params['bits'])
if with_first:
params_string = "{!s}_first{:d}".format(params_string,
params['first'])
return params_string
def str_to_params(string):
"""Parse descriptive string to get params."""
params = {}
m = re.match("^e3fp_(.+)_rad([0-9\.]+)_level(\d+)_fold(\d+)(.*)", string)
params['conformers'] = m.group(1)
params['radius_multiplier'] = float(m.group(2))
params['level'] = int(m.group(3))
params['bits'] = int(m.group(4))
try:
params['first'] = int(m.group(5).replace('_first', ''))
except ValueError:
pass
return params
def get_existing_fprints(params_string, needed_first, directory):
"""Check directory for fingerprints which can be reused."""
earlier_results_dirs = [x[:-1] for x
in glob.glob("{!s}/*/".format(directory))]
pre_encoding_match_dirs = [
x for x in earlier_results_dirs
if os.path.basename(x).startswith(params_string)]
if len(pre_encoding_match_dirs) == 0:
return None
encoding_num = [(str_to_params(os.path.basename(x))['first'], x)
for x in pre_encoding_match_dirs]
existing_dir_name = None
for first, dir_name in sorted(encoding_num):
if first >= needed_first:
existing_dir_name = dir_name
break
if existing_dir_name is None:
return None
existing_fprints_file = get_molecules_file(os.path.join(
directory, existing_dir_name))
if os.path.isfile(existing_fprints_file):
return existing_fprints_file
else:
return None
def params_to_molecules(params, smiles_file, conf_dir, out_dir,
parallelizer=None):
"""Generate molecules_file based on params dict."""
smiles_dict = smiles_to_dict(smiles_file)
logging.debug("SMILES file has {:d} unique smiles.".format(
len(smiles_dict)))
logging.debug("Example SMILES: {!r}".format(smiles_dict.items()[0]))
fprint_params = {"radius_multiplier": params["radius_multiplier"],
"stereo": STEREO, "bits": params["bits"],
"first": params['first'], "level": params['level']}
conf_dir_files = glob.glob("{!s}/*".format(conf_dir))
logging.debug("Found {:d} files in conformer directory.".format(
len(conf_dir_files)))
sdf_files = [x for x in conf_dir_files
if os.path.basename(x).split('.')[0] in smiles_dict]
logging.debug("{:d} conformer files match SMILES.".format(len(sdf_files)))
if len(sdf_files) == 0:
raise Exception("Directory {!s} does not contain any usable SDF "
"files.".format(conf_dir))
kwargs = {"save": False, "fprint_params": fprint_params}
data_iterator = make_data_iterator(sdf_files)
if parallelizer is not None:
results_iter = parallelizer.run_gen(native_tuples_from_sdf,
data_iterator, kwargs=kwargs)
else:
results_iter = (native_tuples_from_sdf(*x, **kwargs)
for x in data_iterator)
molecules_file = get_molecules_file(out_dir)
fp_type = fprint_params_to_fptype(**params)
with smart_open(molecules_file, "wb") as f:
writer = csv.writer(f)
fp_type.write(writer)
writer.writerow(("molecule id", "smiles", "fingerprint"))
for results in results_iter:
try:
fp_native_list, sdf_file = results
except ValueError:
logging.error("Results of fingerprinting did not look as "
"expected: {!r}".format(results))
proto_name = MolItemName.from_str(fp_native_list[0][1]).proto_name
smiles = smiles_dict[proto_name]
for fp_native, fp_name in fp_native_list:
writer.writerow((fp_name, smiles, fp_native))
del smiles_dict
filtered_smiles_dict, mol_lists_dict, fp_type = molecules_to_lists_dicts(
molecules_file)
return (filtered_smiles_dict, mol_lists_dict, fp_type)
def get_molecules_file(out_dir):
"""Get molecules filename."""
return os.path.join(out_dir, MOL_BASENAME + CSV_EXT)
def get_targets_file(out_dir):
"""Get targets filename."""
return os.path.join(out_dir, TARGETS_BASENAME + CSV_EXT)
def main(job_id, params, main_conf_dir=MAIN_CONF_DIR, main_dir=CV_DIR,
out_dir=None, smiles_file=SMILES_FILE, check_existing=True,
mol_targets_file=MOL_TARGETS_FILE, k=CV_K, log_file=LOG_FILE,
verbose=False, overwrite=False, min_mols=MIN_MOLS_PER_TARGET,
parallelizer=None):
params = format_params(params)
pre_encoding_params_string = params_to_str(params, with_first=False)
params_string = params_to_str(params)
if out_dir is None:
out_dir = os.path.join(main_dir, params_string)
touch_dir(out_dir)
if log_file is not None:
log_file = os.path.join(out_dir, log_file)
setup_logging(log_file, verbose=verbose)
params_file = os.path.join(out_dir, "params.cfg")
config_parser = update_params(params, section_name="fingerprinting")
write_params(config_parser, params_file)
if not isinstance(parallelizer, Parallelizer):
parallelizer = Parallelizer(parallel_mode="processes",
num_proc=NUM_PROC)
logging.info("Params: {!r}".format(params.items()))
logging.info("Saving files to {:s}.".format(out_dir))
logging.info("Checking for usable pre-existing fingerprints.")
existing_molecules_file = get_existing_fprints(pre_encoding_params_string,
params['first'], main_dir)
molecules_file = get_molecules_file(out_dir)
if os.path.isfile(molecules_file) and not overwrite:
logging.info("Molecules file already exists. Loading.")
smiles_dict, mol_lists_dict, fp_type = molecules_to_lists_dicts(
molecules_file)
elif existing_molecules_file is None:
conf_dir = os.path.join(main_conf_dir, params['conformers'])
logging.info("Generating fingerprints from conformers in "
"{!s}.".format(conf_dir))
smiles_dict, mol_lists_dict, fp_type = params_to_molecules(
params, smiles_file, conf_dir, out_dir, parallelizer=parallelizer)
else:
logging.info("Using native strings from existing molecules "
"file {!s}.".format(existing_molecules_file))
smiles_dict, mol_lists_dict, fp_type = molecules_to_lists_dicts(
existing_molecules_file, first=params['first'])
lists_dicts_to_molecules(get_molecules_file(out_dir),
smiles_dict, mol_lists_dict, fp_type)
targets_file = get_targets_file(out_dir)
if overwrite or not os.path.isfile(targets_file):
logging.info("Reading targets from {!s}.".format(mol_targets_file))
targets_dict = targets_to_dict(mol_targets_file, affinity=AFFINITY)
logging.debug("Read {:d} targets.".format(len(targets_dict)))
logging.info("Filtering targets by molecules.")
filtered_targets_dict = targets_to_mol_lists_targets(
filter_targets_by_molecules(targets_dict, mol_lists_dict),
mol_lists_dict)
del targets_dict, smiles_dict, mol_lists_dict, fp_type
logging.info("Saving filtered targets to {!s}.".format(targets_file))
dict_to_targets(targets_file, filtered_targets_dict)
del filtered_targets_dict
else:
logging.info("Targets file already exists. Skipping.")
parallel_mode = parallelizer.parallel_mode
parallelizer = Parallelizer(parallel_mode=parallel_mode, num_proc=k + 1)
splitter = ByTargetMoleculeSplitter(k, reduce_negatives=REDUCE_NEGATIVES)
kfold_cv = KFoldCrossValidator(k=k, parallelizer=parallelizer,
splitter=splitter,
return_auc_type=AUC_TYPE, out_dir=out_dir,
overwrite=False)
auc = kfold_cv.run(molecules_file, targets_file, min_mols=min_mols,
affinity=AFFINITY)
logging.info("CV Mean AUC: {:.4f}".format(auc))
return 1 - auc
| lgpl-3.0 | -577,514,957,864,930,300 | 40.351351 | 78 | 0.61634 | false |
PyIran/website | project/apps/user/views/login.py | 1 | 2555 | # flask import
from flask import Blueprint, abort, request, current_app, session,\
g, redirect, url_for, send_from_directory, make_response, jsonify, flash
from flask.ext.babel import lazy_gettext as _
from werkzeug.security import check_password_hash, generate_password_hash
# project import
from project.utils.auth import not_login_required, login_required, admin_required, roles_accepted
from project.utils.template import render, title, ajax_render, smart_render, is_ajax
from project.apps.user.forms import LoginForm
from project.apps.user.models import *
from project.database import db_session
from . import mod
@mod.route('/')
@login_required
def userindex():
try:
return 'You are loggined as ' + g.user.username
except:
return redirect(url_for('user.login'))
@mod.route('/login/', methods=['GET', 'POST'])
#@not_login_required
@title(_('login'))
def login():
"""
"""
if request.method != 'POST':
form = LoginForm()
return render('user/login.html', login_form=form)
next = request.args.get('next', None)
form = LoginForm(request.form)
form.validate()
username = form.data['username']
password = form.data['password']
if username != '':
try:
user = Profile.query.filter(Profile.username == username).first()
except:
current_app.logger.warning(
'login user not found %s' %
form.data[
'username'])
flash(_("User and password not match!"), "danger")
return render('user/login.html', login_form=form)
try:
if not check_password_hash(user.password, password):
current_app.logger.warning(
'login user and password not match %s - %s ' %
(form.data['username'], form.data['password']))
flash(_("User and password not match!"), "danger")
return render('user/login.html', login_form=form)
except:
flash(_("User and password not match!"), "danger")
return render('user/login.html', login_form=form)
current_app.logger.debug('user %s loggined' % form.data['username'])
session['username'] = username
if next:
return redirect(next)
return redirect(url_for('user.profile', user=user))
@mod.route('/logout/')
@login_required
@title(_('logout'))
def logout():
"""
"""
current_app.logger.debug('user %s logouted' % g.user.username)
del(session['username'])
return redirect(url_for('main.index'))
| gpl-3.0 | -6,441,913,961,556,251,000 | 32.631579 | 97 | 0.626223 | false |
evenmarbles/rlglued | rlglued/network/network.py | 1 | 8219 | #
# Copyright (C) 2007, Mark Lee
#
# http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 592 $
# $Date: 2009-02-04 18:24:59 -0500 (Wed, 04 Feb 2009) $
# $Author: [email protected] $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/network/Network.py $
#
# The Network class is defined in here
#
import socket
import struct
import time
import StringIO
try:
import numpy
numpy_int_type = numpy.dtype('int32').newbyteorder('>')
numpy_float_type = numpy.dtype('float64').newbyteorder('>')
numpy_char_type = 'S1' # numpy.dtype('uint8').newbyteorder('>')
except:
pass
from rlglued.types import Action
from rlglued.types import Observation
from rlglued.types import AbstractType
# RL-Glue needs to know what type of object is trying to connect.
kExperimentConnection = 1
kAgentConnection = 2
kEnvironmentConnection = 3
kAgentInit = 4 # agent_* start by sending one of these values
kAgentSetup = 5 # to the client to let it know what type of
kAgentStart = 6 # event to respond to
kAgentStep = 7
kAgentEnd = 8
kAgentCleanup = 9
kAgentMessage = 10
kEnvInit = 11
kEnvSetup = 12
kEnvStart = 13
kEnvStep = 14
kEnvCleanup = 15
kEnvMessage = 19
kRLInit = 20
kRLStart = 21
kRLStep = 22
kRLCleanup = 23
kRLReturn = 24
kRLNumSteps = 25
kRLNumEpisodes = 26
kRLEpisode = 27
kRLAgentMessage = 33
kRLEnvMessage = 34
kRLTerm = 35
kLocalHost = "127.0.0.1"
kDefaultPort = 4096
kRetryTimeout = 2
kDefaultBufferSize = 4096
kIntSize = 4
kDoubleSize = 8
kCharSize = 1
kUnknownMessage = "Unknown Message: %s\n"
class Network(object):
def __init__(self):
self.sock = None
self.recv_buffer = StringIO.StringIO('')
self.send_buffer = StringIO.StringIO('')
if 'numpy' in globals():
self.get_AbstractType = self.get_AbstractType_numpy
else:
self.get_AbstractType = self.get_AbstractType_list
def connect(self, host=kLocalHost, port=kDefaultPort, retry_timeout=kRetryTimeout):
while self.sock is None:
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.connect((host, port))
except socket.error:
self.sock = None
time.sleep(retry_timeout)
else:
break
def close(self):
self.sock.close()
def send(self):
self.sock.sendall(self.send_buffer.getvalue())
def recv(self, size):
s = ''
while len(s) < size:
s += self.sock.recv(size - len(s))
self.recv_buffer.write(s)
self.recv_buffer.seek(0)
return len(s)
def clear_send_buffer(self):
self.send_buffer.close()
self.send_buffer = StringIO.StringIO()
def clear_recv_buffer(self):
self.recv_buffer.close()
self.recv_buffer = StringIO.StringIO()
def flip_send_buffer(self):
self.clear_send_buffer()
def flip_recv_buffer(self):
self.clear_recv_buffer()
def get_int(self):
s = self.recv_buffer.read(kIntSize)
return struct.unpack("!i", s)[0]
def get_double(self):
s = self.recv_buffer.read(kDoubleSize)
return struct.unpack("!d", s)[0]
def get_string(self):
# If you read 0 you get "" not None so that's fine
length = self.get_int()
return self.recv_buffer.read(length)
def get_AbstractType_list(self):
num_ints = self.get_int()
num_doubles = self.get_int()
num_chars = self.get_int()
return_struct = AbstractType()
if num_ints > 0:
s = self.recv_buffer.read(num_ints * kIntSize)
return_struct.intArray = list(struct.unpack("!%di" % num_ints, s))
if num_doubles > 0:
s = self.recv_buffer.read(num_doubles * kDoubleSize)
return_struct.doubleArray = list(struct.unpack("!%dd" % num_doubles, s))
if num_chars > 0:
s = self.recv_buffer.read(num_chars * kCharSize)
return_struct.charArray = list(struct.unpack("!%dc" % num_chars, s))
return return_struct
def get_AbstractType_numpy(self):
num_ints = self.get_int()
num_doubles = self.get_int()
num_chars = self.get_int()
return_struct = AbstractType()
if num_ints > 0:
s = self.recv_buffer.read(num_ints * kIntSize)
assert kIntSize == 4
return_struct.intArray = numpy.frombuffer(s,
dtype=numpy_int_type,
count=num_ints)
if num_doubles > 0:
s = self.recv_buffer.read(num_doubles * kDoubleSize)
return_struct.doubleArray = numpy.frombuffer(s,
count=num_doubles,
dtype=numpy_float_type)
if num_chars > 0:
s = self.recv_buffer.read(num_chars * kCharSize)
return_struct.charArray = numpy.frombuffer(s,
count=num_chars,
dtype=numpy_char_type)
return return_struct
def get_Observation(self):
return Observation.from_AbstractType(self.get_AbstractType())
def get_Action(self):
return Action.from_AbstractType(self.get_AbstractType())
def put_int(self, value):
self.send_buffer.write(struct.pack("!i", value))
def put_double(self, value):
self.send_buffer.write(struct.pack("!d", value))
def put_string(self, value):
if value is None:
value = ''
self.put_int(len(value))
self.send_buffer.write(value)
def put_Observation(self, obs):
self.put_AbstractType(obs)
def put_Action(self, action):
self.put_AbstractType(action)
def put_AbstractType(self, item):
self.put_int(len(item.intArray))
self.put_int(len(item.doubleArray))
self.put_int(len(item.charArray))
if len(item.intArray) > 0:
self.send_buffer.write(struct.pack("!%di" % (len(item.intArray)), *item.intArray))
if len(item.doubleArray) > 0:
self.send_buffer.write(struct.pack("!%dd" % (len(item.doubleArray)), *item.doubleArray))
if len(item.charArray) > 0:
self.send_buffer.write(struct.pack("!%dc" % (len(item.charArray)), *item.charArray))
def put_RewardObservation(self, reward_observation):
self.put_int(reward_observation.terminal)
self.put_double(reward_observation.r)
self.put_Observation(reward_observation.o)
def sizeof_AbstractType(self, item):
size = kIntSize * 3
int_size = 0
double_size = 0
char_size = 0
if item is not None:
if item.intArray is not None:
int_size = kIntSize * len(item.intArray)
if item.doubleArray is not None:
double_size = kDoubleSize * len(item.doubleArray)
if item.charArray is not None:
char_size = kCharSize * len(item.charArray)
return size + int_size + double_size + char_size
def sizeof_Action(self, action):
return self.sizeof_AbstractType(action)
def sizeof_Observation(self, observation):
return self.sizeof_AbstractType(observation)
def sizeof_RewardObservation(self, reward_observation):
return kIntSize + kDoubleSize + self.sizeof_Observation(reward_observation.o)
| bsd-3-clause | 7,160,002,725,044,621,000 | 30.980545 | 111 | 0.608103 | false |
chibisov/drf-extensions | rest_framework_extensions/etag/mixins.py | 1 | 2855 | from rest_framework_extensions.etag.decorators import etag, api_etag
from rest_framework_extensions.settings import extensions_api_settings
class BaseETAGMixin:
# todo: test me. Create generic test like test_etag(view_instance,
# method, should_rebuild_after_method_evaluation)
object_etag_func = extensions_api_settings.DEFAULT_OBJECT_ETAG_FUNC
list_etag_func = extensions_api_settings.DEFAULT_LIST_ETAG_FUNC
class ListETAGMixin(BaseETAGMixin):
@etag(etag_func='list_etag_func')
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
class RetrieveETAGMixin(BaseETAGMixin):
@etag(etag_func='object_etag_func')
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
class UpdateETAGMixin(BaseETAGMixin):
@etag(etag_func='object_etag_func', rebuild_after_method_evaluation=True)
def update(self, request, *args, **kwargs):
return super().update(request, *args, **kwargs)
class DestroyETAGMixin(BaseETAGMixin):
@etag(etag_func='object_etag_func')
def destroy(self, request, *args, **kwargs):
return super().destroy(request, *args, **kwargs)
class ReadOnlyETAGMixin(RetrieveETAGMixin,
ListETAGMixin):
pass
class ETAGMixin(RetrieveETAGMixin,
UpdateETAGMixin,
DestroyETAGMixin,
ListETAGMixin):
pass
class APIBaseETAGMixin:
# todo: test me. Create generic test like test_etag(view_instance,
# method, should_rebuild_after_method_evaluation)
api_object_etag_func = extensions_api_settings.DEFAULT_API_OBJECT_ETAG_FUNC
api_list_etag_func = extensions_api_settings.DEFAULT_API_LIST_ETAG_FUNC
class APIListETAGMixin(APIBaseETAGMixin):
@api_etag(etag_func='api_list_etag_func')
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
class APIRetrieveETAGMixin(APIBaseETAGMixin):
@api_etag(etag_func='api_object_etag_func')
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
class APIUpdateETAGMixin(APIBaseETAGMixin):
@api_etag(etag_func='api_object_etag_func', rebuild_after_method_evaluation=True)
def update(self, request, *args, **kwargs):
return super().update(request, *args, **kwargs)
class APIDestroyETAGMixin(APIBaseETAGMixin):
@api_etag(etag_func='api_object_etag_func')
def destroy(self, request, *args, **kwargs):
return super().destroy(request, *args, **kwargs)
class APIReadOnlyETAGMixin(APIRetrieveETAGMixin,
APIListETAGMixin):
pass
class APIETAGMixin(APIRetrieveETAGMixin,
APIUpdateETAGMixin,
APIDestroyETAGMixin,
APIListETAGMixin):
pass
| mit | 6,974,626,111,315,781,000 | 31.443182 | 85 | 0.686865 | false |
SimonKohl/lasagne_visualizer | lasagne_visualizer/lasagne_visualizer.py | 1 | 6662 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from copy import deepcopy
def get_currently_trainable_layers(net):
layers = [l for l in net.keys() if hasattr(net[l], 'W') if 'trainable' in list(net[l].params[net[l].W])]
return layers
def get_all_trainable_layers(net):
layers = [l for l in net.keys() if hasattr(net[l], 'W')]
return layers
def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
if n < 1:
raise ValueError('mean requires at least one data point')
return sum(data)/n # in Python 2 use sum(data)/float(n)
def _ss(data):
"""Return sum of square deviations of sequence data."""
c = mean(data)
ss = sum((x-c)**2 for x in data)
return ss
def pstdev(data):
"""Calculates the population standard deviation."""
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
ss = _ss(data)
pvar = ss/n # the population variance
return pvar**0.5
class weight_supervisor():
"""
Class that lets you live-monitor the weights of an arbitrary number of layers.
Example: PLOT ALL CURRENTLY TRAINABLE WEIGHTS IN LASAGNE NETWORK
from lasagne_visualizer import lasagne_visualizer
...import <libraries>
...%matplotlib notebook
...define net
...define no_epochs
f = plt.figure()
weight_supervisor = lasagne_visualizer.weight_supervisor(net, mode='currently_trainable', no_epochs, custom_weight_ranges = {'conv1_2':[-2., 2.]})
weight_supervisor.initialize_grid()
for epoch in range(no_epochs):
...train
...evaluate
weight_supervisor.accumulate_weight_stats()
weight_supervisor.live_plot()
f.canvas.draw()
"""
def __init__(self, net, no_epochs, mode='currently_trainable', layer_names=[], custom_weight_ranges={}):
"""
Initialize the weight_supervisor class.
:param net: dictionary with layer names as keys and lasagne layers as values.
:param no_epochs: integer number of epochs to supervise for.
:param mode: one in 'currently_trainable', 'all_trainable', 'custom'; if 'custom', @param layer_names needs to be given
:param layer_names: list of names of layers to supervise, used only if @param mode equals 'custom'
:param custom_weight_ranges: a dictionary with layer names as keys and lists specifying the custom max/min values of the layers' weights as values.
"""
if mode == 'currently_trainable':
self.layer_names = get_currently_trainable_layers(net)
elif mode == 'all_trainable':
self.layer_names = get_all_trainable_layers(net)
elif mode == 'custom':
self.layer_names = layer_names
else:
raise Exception("Give a @param mode in ['currently_trainable', 'all_trainable', 'custom']!")
self.net = net
self.no_epochs = no_epochs
self.weight_ranges = {l:[-1.,1.] if l not in custom_weight_ranges.keys() else custom_weight_ranges[l] for l in self.layer_names}
init_dict = {l: [] for l in self.layer_names}
self.max_weights, self.min_weights, self.mean_weights, self.err_band_lo_weights, self.err_band_hi_weights = \
deepcopy(init_dict), deepcopy(init_dict), deepcopy(init_dict), deepcopy(init_dict), deepcopy(init_dict)
self.epochs = []
self.curr_epoch = 1
def initialize_grid(self):
no_layers = len(self.layer_names)
gs = gridspec.GridSpec(no_layers, 1)
self.axis = []
for l in range(no_layers):
self.axis.append(plt.subplot(gs[l, 0]))
y_min = self.weight_ranges[self.layer_names[l]][0]
y_max = self.weight_ranges[self.layer_names[l]][1]
self.axis[l].set_xlim(1, self.no_epochs)
self.axis[l].set_ylim(y_min, y_max)
aspect_ratio = self.no_epochs // 5 /(y_max-y_min)
try:
assert aspect_ratio > 0.
except AssertionError:
raise Exception("aspect ratio must be > 0., was found {}".format(aspect_ratio))
self.axis[l].set_aspect(aspect_ratio)
self.axis[l].locator_params(axis='y', nbins=5)
self.axis[l].locator_params(axis='x', nbins=5)
self.axis[-1].set_xlabel('epochs')
def accumulate_weight_stats(self):
for l in self.layer_names:
weights = self.net[l].W.get_value()[0]
total_weights = 1
for w in weights.shape:
total_weights *= w
weights = weights.reshape(total_weights)
weights = [weights[i] for i in range(len(weights))]
self.max_weights[l].append(max(weights))
self.min_weights[l].append(min(weights))
self.mean_weights[l].append(mean(weights))
self.err_band_lo_weights[l].append(mean(weights) - pstdev(weights) / 2.)
self.err_band_hi_weights[l].append(mean(weights) + pstdev(weights) / 2.)
self.epochs.append(self.curr_epoch)
self.curr_epoch += 1
def live_plot(self):
fs = 12
for l_ix, l in enumerate(self.layer_names):
if self.axis[l_ix].lines:
self.axis[l_ix].lines[0].set_xdata(self.epochs)
self.axis[l_ix].lines[0].set_ydata(self.mean_weights[l])
else:
self.axis[l_ix].plot(self.epochs, self.mean_weights[l], 'r', label='mean')
### remove collection objects to avoid stacked redrawing
self.axis[l_ix].collections[:] = []
self.axis[l_ix].fill_between(self.epochs, self.min_weights[l], self.err_band_lo_weights[l], facecolor='green', edgecolor='green',alpha=0.5, label='extremata')
self.axis[l_ix].fill_between(self.epochs, self.max_weights[l], self.err_band_hi_weights[l] , facecolor='green', edgecolor='green', alpha=0.5)
self.axis[l_ix].fill_between(self.epochs, self.err_band_lo_weights[l] , self.err_band_hi_weights[l], facecolor='blue', edgecolor='blue',alpha=0.5, label='std. dev.')
### remove previous text objects to avoid stacked redrawing
self.axis[l_ix].texts[:] = []
y_max = self.weight_ranges[l][1]
self.axis[l_ix].text(1., y_max, l, color='black', fontsize=fs, bbox=dict(facecolor='white', alpha=1))
handles, labels = self.axis[0].get_legend_handles_labels()
leg = self.axis[0].legend(handles, labels, ncol=3, loc=1, fontsize=fs)
leg.get_frame().set_alpha(0.5)
| mit | 247,173,798,034,820,320 | 37.287356 | 177 | 0.606575 | false |
spivachuk/sovrin-node | indy_node/test/anon_creds/test_get_revoc_reg_delta_with_none_results.py | 1 | 1588 | import copy
import json
from indy_common.constants import REVOC_REG_DEF_ID, TO, CRED_DEF_ID, REVOC_TYPE, TAG, VALUE
from indy_common.state import domain
from plenum.common.constants import DATA, STATE_PROOF
from plenum.common.types import f, OPERATION
from plenum.common.util import get_utc_epoch
from plenum.test.helper import sdk_send_and_check
def test_send_reg_def_and_get_delta_then(
looper,
txnPoolNodeSet,
sdk_pool_handle,
send_revoc_reg_def_by_default,
build_get_revoc_reg_delta):
rev_def_req, _ = send_revoc_reg_def_by_default
get_revoc_reg_delta = copy.deepcopy(build_get_revoc_reg_delta)
get_revoc_reg_delta['operation'][REVOC_REG_DEF_ID] = domain.make_state_path_for_revoc_def(authors_did=rev_def_req[f.IDENTIFIER.nm],
cred_def_id=rev_def_req[OPERATION][CRED_DEF_ID],
revoc_def_type=rev_def_req[OPERATION][REVOC_TYPE],
revoc_def_tag=rev_def_req[OPERATION][TAG]).decode()
get_revoc_reg_delta['operation'][TO] = get_utc_epoch()
sdk_reply = sdk_send_and_check([json.dumps(get_revoc_reg_delta)], looper, txnPoolNodeSet, sdk_pool_handle)
reply = sdk_reply[0][1]
assert DATA in reply['result']
assert reply['result'][DATA] is None
assert STATE_PROOF in reply['result']
assert reply['result'][STATE_PROOF] is not None
| apache-2.0 | 550,678,674,313,616,640 | 51.933333 | 145 | 0.588791 | false |
aktiur/votes | votes/spiders/votes_spider.py | 1 | 7106 | # -*- coding: utf-8 -*-
import logging
import re
import scrapy
from votes import items
__author__ = 'Arthur Cheysson <[email protected]>'
logger = logging.getLogger(__file__)
RE_VOTE = r'([0-9]+)(\*?)'
RE_SCRUTIN_URL = r'\(legislature\)/(?P<legislature>[0-9]+)/\(num\)/(?P<scrutin>[0-9]+)$'
RE_SCRUTIN_DATE = r'[0-9]{2}/[0-9]{2}/[0-9]{4}$'
RE_GROUPE_MEMBRES = r'\(([0-9]+) membres\)$'
NBSP = u"\u00A0"
class VotesSpider(scrapy.Spider):
name = "votes"
allowed_domains = ['assemblee-nationale.fr']
start_urls = ['http://www2.assemblee-nationale.fr/scrutins/liste/(legislature)/14']
def __init__(self):
super(VotesSpider, self).__init__()
self.re_vote = re.compile(RE_VOTE)
self.re_scrutin_url = re.compile(RE_SCRUTIN_URL)
self.re_scrutin_date = re.compile(RE_SCRUTIN_DATE)
self.re_groupe_membres = re.compile(RE_GROUPE_MEMBRES)
def parse(self, response):
logger.info(u'Récupéré sommaire (%d) <%s>', response.status, response.url)
for analyse in response.xpath('//table[@id="listeScrutins"]/tbody/tr/td[3]/a[contains(., "analyse")]/@href').extract():
yield scrapy.Request(response.urljoin(analyse), self.parse_analyse)
next_link = response.css('#contenu-page .pagination-bootstrap:first-child li:last-child')\
.xpath('a[contains(., "Suivant")]/@href').extract_first()
if next_link:
yield scrapy.Request(response.urljoin(next_link), self.parse)
def parse_analyse(self, response):
logger.info(u'Récupéré scrutin (%d) <%s>', response.status, response.url)
url_match = self.re_scrutin_url.search(response.url)
legislature = url_match.group('legislature')
identifiant = url_match.group('scrutin')
scrutin = self.extraire_scrutin(response)
scrutin['legislature'] = legislature
scrutin['identifiant'] = identifiant
yield scrutin
position = None
for position in self.extraire_positions(response):
position['legislature'] = legislature
position['identifiant'] = identifiant
yield position
if position is None:
logger.warning('Pas de position de groupe pour <%s>', response.url)
vote = None
for vote in self.extraire_votes(response):
vote['legislature'] = legislature
vote['identifiant'] = identifiant
yield vote
if vote is None:
logger.warning('Pas de position personnelle pour <%s>', response.url)
def extraire_scrutin(self, response):
s = items.Scrutin()
titre_page = response.xpath('//h1/text()[last()]').extract_first()
titre_page_match = self.re_scrutin_date.search(titre_page)
s["date"] = titre_page_match.group(0)
sujet_scrutin = response.xpath('//h3[@class="president-title"]//text()').extract_first()
s["intitule"] = sujet_scrutin[len('Scrutin public sur '):]
elements_votes = response.css('.interieur-media-moyenne-colonne-scrutin')
nombre_votes = map(int, elements_votes.xpath('p/b/text()').extract())
if len(nombre_votes) == 1:
# motion de censure
s['votes_pour'] = nombre_votes
else:
s["nombres_votants"], s["nombres_exprimes"], s["nombres_majorite"], s["votes_pour"], s["votes_contre"] = nombre_votes
s["abstention"] = s["nombres_votants"] - s["nombres_exprimes"]
resultat_brut = elements_votes.css('.annoncevote::text').extract_first()
if u"n'a pas adopté" in resultat_brut:
s["resultat"] = u"non adopté"
elif u"a adopté" in resultat_brut:
s["resultat"] = u"adopté"
else:
s["resultat"] = u"Inattendu : " + resultat_brut
return s
def extraire_positions(self, response):
elem_groupes = response.css('#index-groupe li > a')
noms_groupe_avec_nombre = response.css('#analyse .TTgroupe .nomgroupe::text').extract()
for groupe, nb_membres in zip(elem_groupes, noms_groupe_avec_nombre):
p = items.Position()
p['groupe'] = groupe.css('.nom-groupe::text').extract_first()
p['membres'] = self.re_groupe_membres.search(nb_membres).group(1)
for res_groupe in groupe.css('.res-groupe'):
position = res_groupe.xpath('text()[1]').extract_first().split(':')[0].lower()
if position in ['non-votant', 'non-votants']:
position = 'nonvotants'
p[position] = int(res_groupe.xpath('b/text()').extract_first())
yield p
def extraire_votes(self, response):
for groupe in response.css('#analyse .TTgroupe'):
nom_groupe = groupe.xpath('a/@name').extract_first()
for position in groupe.xpath('div'):
nom_position = position.xpath('@class').extract_first().lower()
if nom_position in ['non-votants', 'non-votant']:
nom_position = 'non-votant'
for depute in position.xpath('ul/li'):
prenom_potentiel = depute.xpath('text()').extract_first().strip()
if prenom_potentiel in [u"membres du groupe",
u"membre du groupe",
u"présent ou ayant délégué son droit de vote",
u"présents ou ayant délégué leur droit de vote"]:
continue
if prenom_potentiel.split()[0] in [u'M.', u'Mme']:
prenom_potentiel = ' '.join(prenom_potentiel.split()[1:])
v = items.Vote()
v['groupe'] = nom_groupe
v['position'] = nom_position
v['prenom'] = prenom_potentiel
v['nom'] = depute.xpath('b/text()').extract_first().strip().replace(NBSP, ' ')
yield v
else:
# on se trouve dans le cas où il n'y avait pas de "li" dans notre "ul.depute"
liste = position.xpath('ul')
for elem_nom in liste.xpath('b'):
chaine_prenom = elem_nom.xpath('preceding-sibling::node()[1]').extract_first()
prenom_potentiel = chaine_prenom.split(' ')[-1]
composants_prenom = prenom_potentiel.split() # par espace inbrécable du coup
if composants_prenom[0] in [u'M.', u'Mme', u'MM.', u'Mmes']:
prenom = ' '.join(composants_prenom[1:])
else:
prenom = ' '.join(composants_prenom)
v = items.Vote()
v['groupe'] = nom_groupe
v['position'] = nom_position
v['prenom'] = prenom
v['nom'] = elem_nom.xpath('text()').extract_first().strip().replace(NBSP, ' ')
yield v
| gpl-3.0 | 5,991,381,271,288,323,000 | 39.959538 | 129 | 0.548123 | false |
higebu/pyvmomi | pyVim/connect.py | 1 | 29200 | # VMware vSphere Python SDK
# Copyright (c) 2008-2015 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## @file connect.py
## @brief Connect to a VMOMI ServiceInstance.
##
## Detailed description (for Doxygen goes here)
"""
Connect to a VMOMI ServiceInstance.
Detailed description (for [e]pydoc goes here).
"""
from six import reraise
import sys
import re
import ssl
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
from six.moves import http_client
import requests
from requests.auth import HTTPBasicAuth
from pyVmomi import vim, vmodl, SoapStubAdapter, SessionOrientedStub
from pyVmomi.SoapAdapter import CONNECTION_POOL_IDLE_TIMEOUT_SEC
from pyVmomi.VmomiSupport import nsMap, versionIdMap, versionMap, IsChildVersion
from pyVmomi.VmomiSupport import GetServiceVersions
"""
Global regular expression for parsing host and port connection
See http://www.ietf.org/rfc/rfc3986.txt sec 3.2.2
"""
_rx = re.compile(r"(^\[.+\]|[^:]+)(:\d+)?$")
_si = None
"""
Global (thread-shared) ServiceInstance
@todo: Get rid of me?
"""
class closing(object):
"""
Helper class for using closable objects in a 'with' statement,
similar to the one provided by contextlib.
"""
def __init__(self, obj):
self.obj = obj
def __enter__(self):
return self.obj
def __exit__(self, *exc_info):
self.obj.close()
class VimSessionOrientedStub(SessionOrientedStub):
'''A vim-specific SessionOrientedStub. See the SessionOrientedStub class
in pyVmomi/SoapAdapter.py for more information.'''
# The set of exceptions that should trigger a relogin by the session stub.
SESSION_EXCEPTIONS = (
vim.fault.NotAuthenticated,
)
@staticmethod
def makeUserLoginMethod(username, password, locale=None):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.Login(username, password, locale)
return _doLogin
@staticmethod
def makeExtensionLoginMethod(extensionKey):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.LoginExtensionByCertificate(extensionKey)
return _doLogin
@staticmethod
def makeCertHokTokenLoginMethod(stsUrl, stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a HoK SAML token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param stsUrl: URL of the SAML Token issuing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(stsUrl)
def _doLogin(soapStub):
import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_hok_saml_assertion(cert,key)
def _requestModifier(request):
return sso.add_saml_context(request, samlAssertion, key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
with soapStub.requestModifier(_requestModifier):
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
@staticmethod
def makeCredBearerTokenLoginMethod(username,
password,
stsUrl,
stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a Bearer token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param username: username of the user/service registered with STS.
@param password: password of the user/service registered with STS.
@param stsUrl: URL of the SAML Token issueing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(username)
assert(password)
assert(stsUrl)
def _doLogin(soapStub):
import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_bearer_saml_assertion(username,
password,
cert,
key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
def Connect(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
version=None, keyFile=None, certFile=None, thumbprint=None,
sslContext=None, b64token=None, mechanism='userpass'):
"""
Connect to the specified server, login and return the service
instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param namespace: Namespace *** Deprecated: Use version instead ***
@type namespace: string
@param path: Path
@type path: string
@param version: Version
@type version: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@param b64token: base64 encoded token
@type b64token: string
@param mechanism: authentication mechanism: userpass or sspi
@type mechanism: string
"""
try:
info = re.match(_rx, host)
if info is not None:
host = info.group(1)
if host[0] == '[':
host = info.group(1)[1:-1]
if info.group(2) is not None:
port = int(info.group(2)[1:])
except ValueError as ve:
pass
if namespace:
assert(version is None)
version = versionMap[namespace]
elif not version:
version = "vim.version.version6"
si, stub = None, None
if mechanism == 'userpass':
si, stub = __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext)
elif mechanism == 'sspi':
si, stub = __LoginBySSPI(host, port, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, b64token)
else:
raise Exception('''The provided connection mechanism is not available, the
supported mechanisms are userpass or sspi''')
SetSi(si)
return si
def Disconnect(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
# Logout
__Logout(si)
SetSi(None)
## Method that gets a local ticket for the specified user
def GetLocalTicket(si, user):
try:
sessionManager = si.content.sessionManager
except Exception as e:
if type(e).__name__ == 'ExpatError':
msg = 'Malformed response while querying for local ticket: "%s"' % e
raise vim.fault.HostConnectFault(msg=msg)
else:
msg = 'Failed to query for local ticket: "%s"' % e
raise vim.fault.HostConnectFault(msg=msg)
localTicket = sessionManager.AcquireLocalTicket(userName=user)
return (localTicket.userName, file(localTicket.passwordFilePath).read())
## Private method that performs the actual Connect and returns a
## connected service instance object.
def __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext):
"""
Private method that performs the actual Connect and returns a
connected service instance object.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
content, si, stub = __RetrieveContent(host, port, adapter, version, path,
keyFile, certFile, thumbprint, sslContext)
# Get a ticket if we're connecting to localhost and password is not specified
if host == 'localhost' and not pwd:
try:
(user, pwd) = GetLocalTicket(si, user)
except:
pass # This is not supported against vCenter, and connecting
# with an empty password is fine in debug builds
# Login
try:
x = content.sessionManager.Login(user, pwd, None)
except vim.fault.InvalidLogin:
raise
except Exception as e:
raise
return si, stub
## Private method that performs LoginBySSPI and returns a
## connected service instance object.
## Copyright (c) 2015 Morgan Stanley. All rights reserved.
def __LoginBySSPI(host, port, service, adapter, version, path,
keyFile, certFile, thumbprint, sslContext, b64token):
"""
Private method that performs the actual Connect and returns a
connected service instance object.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@param b64token: base64 encoded token
@type b64token: string
"""
content, si, stub = __RetrieveContent(host, port, adapter, version, path,
keyFile, certFile, thumbprint, sslContext)
if b64token is None:
raise Exception('Token is not defined for sspi login')
# Login
try:
x = content.sessionManager.LoginBySSPI(b64token)
except vim.fault.InvalidLogin:
raise
except Exception as e:
raise
return si, stub
## Private method that performs the actual Disonnect
def __Logout(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
try:
if si:
content = si.RetrieveContent()
content.sessionManager.Logout()
except Exception as e:
pass
## Private method that returns the service content
def __RetrieveContent(host, port, adapter, version, path, keyFile, certFile,
thumbprint, sslContext):
"""
Retrieve service instance for connection.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
"""
# XXX remove the adapter and service arguments once dependent code is fixed
if adapter != "SOAP":
raise ValueError(adapter)
# Create the SOAP stub adapter
stub = SoapStubAdapter(host, port, version=version, path=path,
certKeyFile=keyFile, certFile=certFile,
thumbprint=thumbprint, sslContext=sslContext)
# Get Service instance
si = vim.ServiceInstance("ServiceInstance", stub)
content = None
try:
content = si.RetrieveContent()
except vmodl.MethodFault:
raise
except Exception as e:
# NOTE (hartsock): preserve the traceback for diagnostics
# pulling and preserving the traceback makes diagnosing connection
# failures easier since the fault will also include where inside the
# library the fault occurred. Without the traceback we have no idea
# why the connection failed beyond the message string.
(type, value, traceback) = sys.exc_info()
if traceback:
fault = vim.fault.HostConnectFault(msg=str(e))
reraise(vim.fault.HostConnectFault, fault, traceback)
else:
raise vim.fault.HostConnectFault(msg=str(e))
return content, si, stub
## Get the saved service instance.
def GetSi():
""" Get the saved service instance. """
return _si
## Set the saved service instance.
def SetSi(si):
""" Set the saved service instance. """
global _si
_si = si
## Get the global saved stub
def GetStub():
""" Get the global saved stub. """
si = GetSi()
if si:
return si._GetStub()
return None;
## RAII-style class for managing connections
class Connection(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.si = None
def __enter__(self):
self.si = Connect(*self.args, **self.kwargs)
return self.si
def __exit__(self, *exc_info):
if self.si:
Disconnect(self.si)
self.si = None
class SmartConnection(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.si = None
def __enter__(self):
self.si = SmartConnect(*self.args, **self.kwargs)
return self.si
def __exit__(self, *exc_info):
if self.si:
Disconnect(self.si)
self.si = None
def __GetElementTree(protocol, server, port, path, sslContext):
"""
Private method that returns a root from ElementTree for a remote XML document.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
if protocol == "https":
kwargs = {"context": sslContext} if sslContext else {}
conn = http_client.HTTPSConnection(server, port=port, **kwargs)
elif protocol == "http":
conn = http_client.HTTPConnection(server, port=port)
else:
raise Exception("Protocol " + protocol + " not supported.")
conn.request("GET", path)
response = conn.getresponse()
if response.status == 200:
try:
tree = ElementTree.fromstring(response.read())
return tree
except ExpatError:
pass
return None
## Private method that returns an ElementTree describing the API versions
## supported by the specified server. The result will be vimServiceVersions.xml
## if it exists, otherwise vimService.wsdl if it exists, otherwise None.
def __GetServiceVersionDescription(protocol, server, port, path, sslContext):
"""
Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
tree = __GetElementTree(protocol, server, port,
path + "/vimServiceVersions.xml", sslContext)
if tree is not None:
return tree
tree = __GetElementTree(protocol, server, port,
path + "/vimService.wsdl", sslContext)
return tree
## Private method that returns true if the service version description document
## indicates that the desired version is supported
def __VersionIsSupported(desiredVersion, serviceVersionDescription):
"""
Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree
"""
root = serviceVersionDescription
if root.tag == 'namespaces':
# serviceVersionDescription appears to be a vimServiceVersions.xml document
if root.get('version') != '1.0':
raise RuntimeError('vimServiceVersions.xml has version %s,' \
' which is not understood' % (root.get('version')))
desiredVersionId = versionIdMap[desiredVersion]
supportedVersion = None
for namespace in root.findall('namespace'):
versionId = namespace.findtext('version')
if versionId == desiredVersionId:
return True
else:
for versionId in namespace.findall('priorVersions/version'):
if versionId.text == desiredVersionId:
return True
else:
# serviceVersionDescription must be a vimService.wsdl document
wsdlNS = 'http://schemas.xmlsoap.org/wsdl/'
importElement = serviceVersionDescription.find('.//{%s}import' % wsdlNS)
supportedVersion = versionMap[importElement.get('namespace')[4:]]
if IsChildVersion(supportedVersion, desiredVersion):
return True
return False
## Private method that returns the most preferred API version supported by the
## specified server,
def __FindSupportedVersion(protocol, server, port, path, preferredApiVersions, sslContext):
"""
Private method that returns the most preferred API version supported by the
specified server,
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred.
@type preferredApiVersions: string or string list
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
serviceVersionDescription = __GetServiceVersionDescription(protocol,
server,
port,
path,
sslContext)
if serviceVersionDescription is None:
return None
if not isinstance(preferredApiVersions, list):
preferredApiVersions = [ preferredApiVersions ]
for desiredVersion in preferredApiVersions:
if __VersionIsSupported(desiredVersion, serviceVersionDescription):
return desiredVersion
return None
def SmartStubAdapter(host='localhost', port=443, path='/sdk',
url=None, sock=None, poolSize=5,
certFile=None, certKeyFile=None,
httpProxyHost=None, httpProxyPort=80, sslProxyPath=None,
thumbprint=None, cacertsFile=None, preferredApiVersions=None,
acceptCompressedResponses=True,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
samlToken=None, sslContext=None):
"""
Determine the most preferred API version supported by the specified server,
then create a soap stub adapter using that version
The parameters are the same as for pyVmomi.SoapStubAdapter except for
version which is renamed to prefferedApiVersions
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
supportedVersion = __FindSupportedVersion('https' if port > 0 else 'http',
host,
port,
path,
preferredApiVersions,
sslContext)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
return SoapStubAdapter(host=host, port=port, path=path,
url=url, sock=sock, poolSize=poolSize,
certFile=certFile, certKeyFile=certKeyFile,
httpProxyHost=httpProxyHost, httpProxyPort=httpProxyPort,
sslProxyPath=sslProxyPath, thumbprint=thumbprint,
cacertsFile=cacertsFile, version=supportedVersion,
acceptCompressedResponses=acceptCompressedResponses,
connectionPoolTimeout=connectionPoolTimeout,
samlToken=samlToken, sslContext=sslContext)
def SmartConnect(protocol='https', host='localhost', port=443, user='root', pwd='',
service="hostd", path="/sdk",
preferredApiVersions=None, keyFile=None, certFile=None,
thumbprint=None, sslContext=None, b64token=None, mechanism='userpass'):
"""
Determine the most preferred API version supported by the specified server,
then connect to the specified server using that API version, login and return
the service instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
supportedVersion = __FindSupportedVersion(protocol,
host,
port,
path,
preferredApiVersions,
sslContext)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
portNumber = protocol == "http" and -int(port) or int(port)
return Connect(host=host,
port=portNumber,
user=user,
pwd=pwd,
service=service,
adapter='SOAP',
version=supportedVersion,
path=path,
keyFile=keyFile,
certFile=certFile,
thumbprint=thumbprint,
sslContext=sslContext,
b64token=b64token,
mechanism=mechanism)
def OpenUrlWithBasicAuth(url, user='root', pwd=''):
"""
Open the specified URL, using HTTP basic authentication to provide
the specified credentials to the server as part of the request.
Returns the response as a file-like object.
"""
return requests.get(url, auth=HTTPBasicAuth(user, pwd), verify=False)
def OpenPathWithStub(path, stub):
"""
Open the specified path using HTTP, using the host/port/protocol
associated with the specified stub. If the stub has a session cookie,
it is included with the HTTP request. Returns the response as a
file-like object.
"""
import httplib
if not hasattr(stub, 'scheme'):
raise vmodl.fault.NotSupported()
elif stub.scheme == httplib.HTTPConnection:
protocol = 'http'
elif stub.scheme == httplib.HTTPSConnection:
protocol = 'https'
else:
raise vmodl.fault.NotSupported()
hostPort = stub.host
url = '%s://%s%s' % (protocol, hostPort, path)
headers = {}
if stub.cookie:
headers["Cookie"] = stub.cookie
return requests.get(url, headers=headers, verify=False)
| apache-2.0 | 266,203,138,452,200,600 | 34.784314 | 91 | 0.637637 | false |
pombredanne/bioneb | bioneb/sequence/data.py | 1 | 3729 | # Copyright 2009 New England Biolabs <[email protected]>
#
# This file is part of the BioNEB package released
# under the MIT license.
#
import string
DEGENERATES = {
"A": "A", "C": "C", "G": "G", "T": "T", "U": "U",
"W": "AT", "S": "CG", "M": "AC", "K": "GT", "R": "AG", "Y": "CT",
"B": "AGT", "D": "ACT", "H": "ACT", "V": "ACG", "N": "ACGT"
}
COMPLEMENTS = [
"ACGTUNSWMKRYVDHBacgtunswmkryvdhb",
"TGCAANSWKMYRBHDVtgcaanswkmyrbhdv"
]
TRANSTABLE = string.maketrans(COMPLEMENTS[0], COMPLEMENTS[1])
# Three letter codes
AMINO_ACID_TLC = {
"ALA": "A",
"ASX": "B",
"CYS": "C",
"ASP": "D",
"GLU": "E",
"PHE": "F",
"GLY": "G",
"HIS": "H",
"ILE": "I",
"LYS": "K",
"LEU": "L",
"MET": "M",
"ASN": "N",
"PYL": "O",
"PRO": "P",
"GLN": "Q",
"ARG": "R",
"SER": "S",
"THR": "T",
"SEC": "U",
"VAL": "V",
"TRP": "W",
"XAA": "X",
"TYR": "Y",
"GLX": "Z",
# Due to Genbank awesomeness
"OTHER": "X",
"TERM": "*"
}
CODONS = ["%s%s%s" % (b1, b2, b3)
for b1 in "TCAG" for b2 in "TCAG" for b3 in "TCAG"]
CODON_TABLE_DATA = [
"""1
FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
---M---------------M---------------M----------------------------""",
"""2
FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSS**VVVVAAAADDEEGGGG
--------------------------------MMMM---------------M------------""",
"""3
FFLLSSSSYY**CCWWTTTTPPPPHHQQRRRRIIMMTTTTNNKKSSRRVVVVAAAADDEEGGGG
----------------------------------MM----------------------------""",
"""4
FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
--MM---------------M------------MMMM---------------M------------""",
"""5
FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSSSVVVVAAAADDEEGGGG
---M----------------------------MMMM---------------M------------""",
"""6
FFLLSSSSYYQQCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
-----------------------------------M----------------------------""",
"""9
FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG
-----------------------------------M---------------M------------""",
"""10
FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
---M---------------M------------MMMM---------------M------------""",
"""11
FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
---M---------------M------------MMMM---------------M------------""",
"""12
FFLLSSSSYY**CC*WLLLSPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
-------------------M---------------M----------------------------""",
"""13
FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNKKSSGGVVVVAAAADDEEGGGG
---M------------------------------MM---------------M------------""",
"""14
FFLLSSSSYYY*CCWWLLLLPPPPHHQQRRRRIIIMTTTTNNNKSSSSVVVVAAAADDEEGGGG
-----------------------------------M----------------------------""",
"""15
FFLLSSSSYY*QCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
-----------------------------------M----------------------------""",
"""16
FFLLSSSSYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
-----------------------------------M----------------------------""",
"""21
FFLLSSSSYY**CCWWLLLLPPPPHHQQRRRRIIMMTTTTNNNKSSSSVVVVAAAADDEEGGGG
-----------------------------------M---------------M------------""",
"""22
FFLLSS*SYY*LCC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
-----------------------------------M----------------------------""",
"""23
FF*LSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG
--------------------------------M--M---------------M------------"""
] | mit | 3,675,023,044,940,320,300 | 33.859813 | 74 | 0.44221 | false |
eharney/cinder | cinder/volume/drivers/tegile.py | 1 | 26097 | # Copyright (c) 2015 by Tegile Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Tegile storage.
"""
import ast
import json
import requests
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume import utils as volume_utils
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
default_api_service = 'openstack'
TEGILE_API_PATH = 'zebi/api'
TEGILE_DEFAULT_BLOCK_SIZE = '32KB'
TEGILE_LOCAL_CONTAINER_NAME = 'Local'
DEBUG_LOGGING = False
tegile_opts = [
cfg.StrOpt('tegile_default_pool',
help='Create volumes in this pool'),
cfg.StrOpt('tegile_default_project',
help='Create volumes in this project')]
CONF = cfg.CONF
CONF.register_opts(tegile_opts, group=configuration.SHARED_CONF_GROUP)
def debugger(func):
"""Returns a wrapper that wraps func.
The wrapper will log the entry and exit points of the function
"""
def wrapper(*args, **kwds):
if DEBUG_LOGGING:
LOG.debug('Entering %(classname)s.%(funcname)s',
{'classname': args[0].__class__.__name__,
'funcname': func.__name__})
LOG.debug('Arguments: %(args)s, %(kwds)s',
{'args': args[1:],
'kwds': kwds})
f_result = func(*args, **kwds)
if DEBUG_LOGGING:
LOG.debug('Exiting %(classname)s.%(funcname)s',
{'classname': args[0].__class__.__name__,
'funcname': func.__name__})
LOG.debug('Results: %(result)s',
{'result': f_result})
return f_result
return wrapper
class TegileAPIExecutor(object):
def __init__(self, classname, hostname, username, password):
self._classname = classname
self._hostname = hostname
self._username = username
self._password = password
@debugger
@utils.retry(exceptions=(requests.ConnectionError, requests.Timeout))
def send_api_request(self, method, params=None,
request_type='post',
api_service=default_api_service,
fine_logging=DEBUG_LOGGING):
if params is not None:
params = json.dumps(params)
url = 'https://%s/%s/%s/%s' % (self._hostname,
TEGILE_API_PATH,
api_service,
method)
if fine_logging:
LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, '
'url: %(url)s', {'classname': self._classname,
'method': method,
'url': url})
if request_type == 'post':
if fine_logging:
LOG.debug('TegileAPIExecutor(%(classname)s) '
'method: %(method)s, payload: %(payload)s',
{'classname': self._classname,
'method': method,
'payload': params})
req = requests.post(url,
data=params,
auth=(self._username, self._password),
verify=False)
else:
req = requests.get(url,
auth=(self._username, self._password),
verify=False)
if fine_logging:
LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, '
'return code: %(retcode)s',
{'classname': self._classname,
'method': method,
'retcode': req})
try:
response = req.json()
if fine_logging:
LOG.debug('TegileAPIExecutor(%(classname)s) '
'method: %(method)s, response: %(response)s',
{'classname': self._classname,
'method': method,
'response': response})
except ValueError:
response = ''
req.close()
if req.status_code != 200:
msg = _('API response: %(response)s') % {'response': response}
raise exception.TegileAPIException(msg)
return response
class TegileIntelliFlashVolumeDriver(san.SanDriver):
"""Tegile IntelliFlash Volume Driver."""
VENDOR = 'Tegile Systems Inc.'
VERSION = '1.0.0'
REQUIRED_OPTIONS = ['san_ip', 'san_login',
'san_password', 'tegile_default_pool']
SNAPSHOT_PREFIX = 'Manual-V-'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Tegile_Storage_CI"
# TODO(smcginnis) Remove driver in Queens if CI issues not fixed
SUPPORTED = False
_api_executor = None
def __init__(self, *args, **kwargs):
self._context = None
super(TegileIntelliFlashVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(tegile_opts)
self._protocol = 'iSCSI' # defaults to iscsi
hostname = getattr(self.configuration, 'san_ip')
username = getattr(self.configuration, 'san_login')
password = getattr(self.configuration, 'san_password')
self._default_pool = getattr(self.configuration, 'tegile_default_pool')
self._default_project = (
getattr(self.configuration, 'tegile_default_project') or
'openstack')
self._api_executor = TegileAPIExecutor(self.__class__.__name__,
hostname,
username,
password)
@debugger
def do_setup(self, context):
super(TegileIntelliFlashVolumeDriver, self).do_setup(context)
self._context = context
self._check_ops(self.REQUIRED_OPTIONS, self.configuration)
@debugger
def create_volume(self, volume):
pool = volume_utils.extract_host(volume['host'], level='pool',
default_pool_name=self._default_pool)
tegile_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE,
'datasetPath': '%s/%s/%s' %
(pool,
TEGILE_LOCAL_CONTAINER_NAME,
self._default_project),
'local': 'true',
'name': volume['name'],
'poolName': '%s' % pool,
'projectName': '%s' % self._default_project,
'protocol': self._protocol,
'thinProvision': 'true',
'volSize': volume['size'] * units.Gi}
params = list()
params.append(tegile_volume)
params.append(True)
self._api_executor.send_api_request(method='createVolume',
params=params)
LOG.info("Created volume %(volname)s, volume id %(volid)s.",
{'volname': volume['name'], 'volid': volume['id']})
return self.get_additional_info(volume, pool, self._default_project)
@debugger
def delete_volume(self, volume):
"""Deletes a snapshot."""
params = list()
pool, project, volume_name = self._get_pool_project_volume_name(volume)
params.append('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
volume_name))
params.append(True)
params.append(False)
self._api_executor.send_api_request('deleteVolume', params)
@debugger
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
snap_name = snapshot['name']
display_list = [getattr(snapshot, 'display_name', ''),
getattr(snapshot, 'display_description', '')]
snap_description = ':'.join(filter(None, display_list))
# Limit to 254 characters
snap_description = snap_description[:254]
pool, project, volume_name = self._get_pool_project_volume_name(
snapshot['volume'])
volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE,
'datasetPath': '%s/%s/%s' %
(pool,
TEGILE_LOCAL_CONTAINER_NAME,
project),
'local': 'true',
'name': volume_name,
'poolName': '%s' % pool,
'projectName': '%s' % project,
'protocol': self._protocol,
'thinProvision': 'true',
'volSize': snapshot['volume']['size'] * units.Gi}
params = list()
params.append(volume)
params.append(snap_name)
params.append(False)
LOG.info('Creating snapshot for volume_name=%(vol)s'
' snap_name=%(name)s snap_description=%(desc)s',
{'vol': volume_name,
'name': snap_name,
'desc': snap_description})
self._api_executor.send_api_request('createVolumeSnapshot', params)
@debugger
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
params = list()
pool, project, volume_name = self._get_pool_project_volume_name(
snapshot['volume'])
params.append('%s/%s/%s/%s@%s%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
volume_name,
self.SNAPSHOT_PREFIX,
snapshot['name']))
params.append(False)
self._api_executor.send_api_request('deleteVolumeSnapshot', params)
@debugger
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from snapshot."""
params = list()
pool, project, volume_name = self._get_pool_project_volume_name(
snapshot['volume'])
params.append('%s/%s/%s/%s@%s%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
volume_name,
self.SNAPSHOT_PREFIX,
snapshot['name']))
params.append(volume['name'])
params.append(True)
params.append(True)
self._api_executor.send_api_request('cloneVolumeSnapshot', params)
return self.get_additional_info(volume, pool, project)
@debugger
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
pool, project, volume_name = self._get_pool_project_volume_name(
src_vref)
data_set_path = '%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project)
source_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE,
'datasetPath': data_set_path,
'local': 'true',
'name': volume_name,
'poolName': '%s' % pool,
'projectName': '%s' % project,
'protocol': self._protocol,
'thinProvision': 'true',
'volSize': src_vref['size'] * units.Gi}
dest_volume = {'blockSize': TEGILE_DEFAULT_BLOCK_SIZE,
'datasetPath': data_set_path,
# clone can reside only in the source project
'local': 'true',
'name': volume['name'],
'poolName': '%s' % pool,
'projectName': '%s' % project,
'protocol': self._protocol,
'thinProvision': 'true',
'volSize': volume['size'] * units.Gi}
params = list()
params.append(source_volume)
params.append(dest_volume)
self._api_executor.send_api_request(method='createClonedVolume',
params=params)
return self.get_additional_info(volume, pool, project)
@debugger
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data
"""
if refresh:
try:
self._update_volume_stats()
except Exception:
pass
return self._stats
@debugger
def _update_volume_stats(self):
"""Retrieves stats info from volume group."""
try:
data = self._api_executor.send_api_request(method='getArrayStats',
request_type='get',
fine_logging=False)
# fixing values coming back here as String to float
data['total_capacity_gb'] = float(data.get('total_capacity_gb', 0))
data['free_capacity_gb'] = float(data.get('free_capacity_gb', 0))
for pool in data.get('pools', []):
pool['total_capacity_gb'] = float(
pool.get('total_capacity_gb', 0))
pool['free_capacity_gb'] = float(
pool.get('free_capacity_gb', 0))
pool['allocated_capacity_gb'] = float(
pool.get('allocated_capacity_gb', 0))
data['volume_backend_name'] = getattr(self.configuration,
'volume_backend_name')
data['vendor_name'] = self.VENDOR
data['driver_version'] = self.VERSION
data['storage_protocol'] = self._protocol
self._stats = data
except Exception as e:
LOG.warning('TegileIntelliFlashVolumeDriver(%(clsname)s) '
'_update_volume_stats failed: %(error)s',
{'clsname': self.__class__.__name__,
'error': e})
@debugger
def get_pool(self, volume):
"""Returns pool name where volume resides.
:param volume: The volume hosted by the driver.
:return: Name of the pool where given volume is hosted.
"""
pool = volume_utils.extract_host(volume['host'], level='pool',
default_pool_name=self._default_pool)
return pool
@debugger
def extend_volume(self, volume, new_size):
params = list()
pool, project, volume_name = self._get_pool_project_volume_name(volume)
params.append('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
volume_name))
vol_size = six.text_type(new_size)
params.append(vol_size)
params.append('GB')
self._api_executor.send_api_request(method='resizeVolume',
params=params)
@debugger
def manage_existing(self, volume, existing_ref):
volume['name_id'] = existing_ref['name']
pool, project, volume_name = self._get_pool_project_volume_name(volume)
additional_info = self.get_additional_info(volume, pool, project)
additional_info['_name_id'] = existing_ref['name'],
return additional_info
@debugger
def manage_existing_get_size(self, volume, existing_ref):
params = list()
pool, project, volume_name = self._get_pool_project_volume_name(volume)
params.append('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
existing_ref['name']))
volume_size = self._api_executor.send_api_request(
method='getVolumeSizeinGB',
params=params)
return volume_size
@debugger
def _get_pool_project_volume_name(self, volume):
pool = volume_utils.extract_host(volume['host'], level='pool',
default_pool_name=self._default_pool)
try:
project = volume['metadata']['project']
except (AttributeError, TypeError, KeyError):
project = self._default_project
if volume['_name_id'] is not None:
volume_name = volume['_name_id']
else:
volume_name = volume['name']
return pool, project, volume_name
@debugger
def get_additional_info(self, volume, pool, project):
try:
metadata = self._get_volume_metadata(volume)
except Exception:
metadata = dict()
metadata['pool'] = pool
metadata['project'] = project
return {'metadata': metadata}
@debugger
def _get_volume_metadata(self, volume):
volume_metadata = {}
if 'volume_metadata' in volume:
for metadata in volume['volume_metadata']:
volume_metadata[metadata['key']] = metadata['value']
if 'metadata' in volume:
metadata = volume['metadata']
for key in metadata:
volume_metadata[key] = metadata[key]
return volume_metadata
@debugger
def _check_ops(self, required_ops, configuration):
"""Ensures that the options we care about are set."""
for attr in required_ops:
if not getattr(configuration, attr, None):
raise exception.InvalidInput(reason=_('%(attr)s is not '
'set.') % {'attr': attr})
@interface.volumedriver
class TegileISCSIDriver(TegileIntelliFlashVolumeDriver, san.SanISCSIDriver):
"""Tegile ISCSI Driver."""
def __init__(self, *args, **kwargs):
super(TegileISCSIDriver, self).__init__(*args, **kwargs)
self._protocol = 'iSCSI'
@debugger
def do_setup(self, context):
super(TegileISCSIDriver, self).do_setup(context)
@debugger
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance."""
if getattr(self.configuration, 'use_chap_auth', False):
chap_username = getattr(self.configuration, 'chap_username', '')
chap_password = getattr(self.configuration, 'chap_password', '')
else:
chap_username = ''
chap_password = ''
if volume['provider_location'] is None:
params = list()
pool, project, volume_name = (
self._get_pool_project_volume_name(volume))
params.append('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
volume_name))
initiator_info = {
'initiatorName': connector['initiator'],
'chapUserName': chap_username,
'chapSecret': chap_password
}
params.append(initiator_info)
mapping_info = self._api_executor.send_api_request(
method='getISCSIMappingForVolume',
params=params)
target_portal = mapping_info['target_portal']
target_iqn = mapping_info['target_iqn']
target_lun = mapping_info['target_lun']
else:
(target_portal, target_iqn, target_lun) = (
volume['provider_location'].split())
connection_data = dict()
connection_data['target_portal'] = target_portal
connection_data['target_iqn'] = target_iqn
connection_data['target_lun'] = int(target_lun)
connection_data['target_discovered'] = False,
connection_data['volume_id'] = volume['id'],
connection_data['discard'] = False
if getattr(self.configuration, 'use_chap_auth', False):
connection_data['auth_method'] = 'CHAP'
connection_data['auth_username'] = chap_username
connection_data['auth_password'] = chap_password
return {
'driver_volume_type': 'iscsi',
'data': connection_data
}
@debugger
def terminate_connection(self, volume, connector, **kwargs):
pass
@debugger
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
params = list()
pool, project, volume_name = self._get_pool_project_volume_name(volume)
params.append('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
volume_name))
if getattr(self.configuration, 'use_chap_auth', False):
chap_username = getattr(self.configuration, 'chap_username', '')
chap_password = getattr(self.configuration, 'chap_password', '')
else:
chap_username = ''
chap_password = ''
initiator_info = {
'initiatorName': connector['initiator'],
'chapUserName': chap_username,
'chapSecret': chap_password
}
params.append(initiator_info)
mapping_info = self._api_executor.send_api_request(
method='getISCSIMappingForVolume',
params=params)
target_portal = mapping_info['target_portal']
target_iqn = mapping_info['target_iqn']
target_lun = int(mapping_info['target_lun'])
provider_location = '%s %s %s' % (target_portal,
target_iqn,
target_lun)
if getattr(self.configuration, 'use_chap_auth', False):
provider_auth = ('CHAP %s %s' % (chap_username,
chap_password))
else:
provider_auth = None
return (
{'provider_location': provider_location,
'provider_auth': provider_auth})
@interface.volumedriver
class TegileFCDriver(TegileIntelliFlashVolumeDriver,
driver.FibreChannelDriver):
"""Tegile FC driver."""
def __init__(self, *args, **kwargs):
super(TegileFCDriver, self).__init__(*args, **kwargs)
self._protocol = 'FC'
@debugger
def do_setup(self, context):
super(TegileFCDriver, self).do_setup(context)
@fczm_utils.add_fc_zone
@debugger
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info."""
params = list()
pool, project, volume_name = self._get_pool_project_volume_name(volume)
params.append('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
volume_name))
wwpns = connector['wwpns']
connectors = ','.join(wwpns)
params.append(connectors)
target_info = self._api_executor.send_api_request(
method='getFCPortsForVolume',
params=params)
initiator_target_map = target_info['initiator_target_map']
connection_data = {
'driver_volume_type': 'fibre_channel',
'data': {
'encrypted': False,
'target_discovered': False,
'target_lun': int(target_info['target_lun']),
'target_wwn': ast.literal_eval(target_info['target_wwn']),
'initiator_target_map': ast.literal_eval(initiator_target_map)
}
}
return connection_data
@fczm_utils.remove_fc_zone
@debugger
def terminate_connection(self, volume, connector, force=False, **kwargs):
params = list()
pool, project, volume_name = self._get_pool_project_volume_name(volume)
params.append('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
volume_name))
wwpns = connector['wwpns']
connectors = ','.join(wwpns)
params.append(connectors)
target_info = self._api_executor.send_api_request(
method='getFCPortsForVolume',
params=params)
initiator_target_map = target_info['initiator_target_map']
connection_data = {
'data': {
'target_wwn': ast.literal_eval(target_info['target_wwn']),
'initiator_target_map': ast.literal_eval(initiator_target_map)
}
}
return connection_data
| apache-2.0 | 6,672,828,031,298,610,000 | 38.243609 | 79 | 0.515921 | false |
gkioxari/RstarCNN | lib/fast_rcnn/test_scene.py | 1 | 8794 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# --------------------------------------------------------
# R*CNN
# Written by Georgia Gkioxari, 2015.
# See LICENSE in the project root for license information.
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from utils.cython_nms import nms
import cPickle
import heapq
from utils.blob import im_list_to_blob
import os
import scipy.io as sio
import utils.cython_bbox
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None, 'secondary_rois': None}
im_size = im.shape
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
scene_roi = np.array([0, 0, im_size[1]-1, im_size[0]-1], ndmin=2, dtype=np.float32)
scene_roi = np.tile(scene_roi, [rois.shape[0], 1])
blobs['secondary_rois'] = _get_rois_blob(scene_roi, im_scale_factors)
return blobs, im_scale_factors
def _bbox_pred(boxes, box_deltas):
"""Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + cfg.EPS
heights = boxes[:, 3] - boxes[:, 1] + cfg.EPS
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def im_detect(net, im, boxes):
"""Detect classes in an image given object proposals.
"""
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
base_shape = blobs['data'].shape
blobs_rois = blobs['rois'].astype(np.float32, copy=False)
blobs_rois = blobs_rois[:, :, np.newaxis, np.newaxis]
blobs_sec_rois = blobs['secondary_rois'].astype(np.float32, copy=False)
blobs_sec_rois = blobs_sec_rois[:, :, np.newaxis, np.newaxis]
num_rois = blobs_rois.shape[0]
num_sec_rois = blobs_sec_rois.shape[0]
# reshape network inputs
net.blobs['data'].reshape(base_shape[0], base_shape[1],
base_shape[2], base_shape[3])
net.blobs['rois'].reshape(num_rois, 5, 1, 1)
net.blobs['secondary_rois'].reshape(num_sec_rois, 5, 1, 1)
blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),
rois=blobs_rois,
secondary_rois = blobs_sec_rois)
scores = blobs_out['cls_prob']
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = _bbox_pred(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
return scores
def vis_detections(im, boxes, scores, classes):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(1):
bbox = boxes[i, :4]
sscore = scores[i, :]
cls_ind = sscore.argmax()
sscore = sscore.max()
#plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.title('{} {:.3f}'.format(classes[cls_ind], sscore))
plt.show()
def test_net(net, imdb):
"""Test a R*CNN network on an image database."""
num_images = len(imdb.image_index)
num_classes = imdb.num_classes
all_boxes = np.zeros((0, 2+num_classes), dtype = np.float32)
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# timers
_t = {'im_detect' : Timer()}
roidb = imdb.roidb
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(i))
gt = np.where(roidb[i]['gt_classes']>-1)[0]
gt_boxes = roidb[i]['boxes'][gt]
_t['im_detect'].tic()
scores = im_detect(net, im, gt_boxes)
_t['im_detect'].toc()
# Visualize detections
# vis_detections(im, gt_boxes, scores, imdb.classes)
for j in xrange(gt_boxes.shape[0]):
# store image id and voc_id (1-indexed)
temp = np.array([i+1, j+1], ndmin=2)
temp = np.concatenate((temp, np.array(scores[j,:],ndmin=2)), axis=1)
all_boxes = np.concatenate((all_boxes, temp), axis=0)
print 'im_detect: {:d}/{:d} {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time)
print 'Writing VOC results'
imdb._write_voc_results_file(all_boxes)
| bsd-2-clause | 2,369,762,761,901,376,500 | 33.085271 | 87 | 0.576984 | false |
Hammerite/LPPGP | lex.py | 1 | 8816 | import ply
import re
from .customised_lexer import CustomisedLexer
from ._lex_utilities import *
maximum_identifier_length = 99
states = (
('singleQuotedString', 'exclusive'),
('doubleQuotedString', 'exclusive'),
)
reserved_words = (
'boolean', 'number', 'vector', 'point', 'line', 'plane',
'horizontal', 'vertical', 'diagonal', 'antidiagonal',
'declare', 'variable', 'constant', 'with',
'put',
'on', 'through',
'set', 'of',
'move', 'along',
'rotate', 'clockwise', 'anticlockwise', 'degrees', 'about',
'forget',
'message', 'error',
'assert',
'if', 'else', 'end', 'scope',
'stop',
'and', 'or', 'xor', 'not',
'uv', 'uvzs',
'abs', 'length',
'displacement', 'distance', 'from', 'to',
'intersection',
'locus',
'true', 'false',
)
tokens = (
'semicolon', 'colon',
'comment',
'dollarSign',
'fractionSlash', 'atSign',
'questionMark',
'openingBracket', 'closingBracket',
'openingSquareBracket', 'closingSquareBracket',
'singleQuote', 'doubleQuote',
'escapedSingleQuote', 'escapedDoubleQuote', 'escapedBackslash',
'unpartneredBackslash',
'genericCharacters',
'quotedString',
'plus', 'minus', 'times', 'dividedBy',
'equals',
'greaterThan', 'greaterThanOrEqual', 'lessThan', 'lessThanOrEqual',
'elbowHorizontalVertical', 'elbowHorizontalDiagonal',
'elbowHorizontalAntidiagonal', 'elbowVerticalHorizontal',
'elbowVerticalDiagonal', 'elbowVerticalAntidiagonal',
'elbowDiagonalHorizontal', 'elbowDiagonalVertical',
'elbowDiagonalAntidiagonal', 'elbowAntidiagonalHorizontal',
'elbowAntidiagonalVertical', 'elbowAntidiagonalDiagonal',
'projectionOntoHorizontal', 'projectionOntoVertical',
'projectionOntoDiagonal', 'projectionOntoAntidiagonal',
'orthogonalProjectionOnto',
'identifier',
'numberDecimal', 'numberInteger',
'errorWithAFirstCharacterThatMightOtherwiseBeValid'
) + reserved_words
t_ignore = ' \t'
def t_newlines (t):
r'[\r\n]+'
t.lexer.lineno += count_newlines(t.value)
t.lexer.line_start_positions.append(t.lexer.lexpos)
def t_error (t):
t.lexer.character_error(t.value)
t_semicolon = r';'
t_colon = r':'
t_dollarSign = r'\$'
t_fractionSlash = r'/'
t_atSign = r'@'
t_questionMark = r'\?'
t_openingBracket = r'\('
t_closingBracket = r'\)'
t_openingSquareBracket = r'\['
t_closingSquareBracket = r'\]'
def t_comment (t):
r'\#[^\r\n]*'
pass
def t_singleQuote (t):
"'"
t.lexer.begin('singleQuotedString')
t.lexer.current_string = ''
t.lexer.current_string_start_position = t.lexpos
def t_doubleQuote (t):
'"'
t.lexer.begin('doubleQuotedString')
t.lexer.current_string = ''
t.lexer.current_string_start_position = t.lexpos
def t_singleQuotedString_singleQuote (t):
"'"
t.lexer.begin('INITIAL')
t.type = 'quotedString'
t.value = t.lexer.current_string.strip()
t.lexpos = t.lexer.current_string_start_position
return t
def t_doubleQuotedString_doubleQuote (t):
'"'
t.lexer.begin('INITIAL')
t.type = 'quotedString'
t.value = t.lexer.current_string.strip()
t.lexpos = t.lexer.current_string_start_position
return t
def t_singleQuotedString_escapedSingleQuote (t):
r"\'"
t.lexer.current_string += "'"
def t_doubleQuotedString_escapedDoubleQuote (t):
r'\"'
t.lexer.current_string += '"'
def t_singleQuotedString_escapedBackslash (t):
r'\\\\'
t.lexer.current_string += '\\'
t_doubleQuotedString_escapedBackslash = t_singleQuotedString_escapedBackslash
def t_singleQuotedString_unpartneredBackslash (t):
r'\\'
t.lexer.add_error(
'Quoted strings may not contain unpartnered backslashes; '
'if you want a backslash, then escape it with another backslash',
t.lexer.lineno,
t.lexer.lexpos
)
t_doubleQuotedString_unpartneredBackslash = t_singleQuotedString_unpartneredBackslash
@ply.lex.TOKEN('[%s"]+' % generic_characters)
def t_singleQuotedString_genericCharacters (t):
t.lexer.current_string += t.value
@ply.lex.TOKEN("[%s']+" % generic_characters)
def t_doubleQuotedString_genericCharacters (t):
t.lexer.current_string += t.value
t_doubleQuotedString_ignore = t_singleQuotedString_ignore = ''
def t_singleQuotedString_newline (t):
r'[\r\n]+'
t.lexer.add_error('Quoted strings may not contain newlines', t.lexer.lineno, None)
t.lexer.current_string += t.value
t.lexer.lineno += count_newlines(t.value)
t.lexer.line_start_positions.append(t.lexer.lexpos)
t_doubleQuotedString_newline = t_singleQuotedString_newline
def t_singleQuotedString_error (t):
try:
t.lexer.add_error(
character_error_message(
t.value[0],
'Quoted strings may contain only ASCII characters; '
'the character %s is not allowed',
'Quoted strings may not contain control characters; '
'the character %s is not allowed'
),
t.lexer.lineno,
t.lexer.lexpos
)
finally:
t.lexer.skip_past_error(t.value)
t_doubleQuotedString_error = t_singleQuotedString_error
t_plus = r'\+'
t_minus = r'-'
t_times = r'\*'
t_dividedBy = r'//'
t_equals = '='
t_greaterThan = '>'
t_greaterThanOrEqual = '>='
t_lessThan = '<'
t_lessThanOrEqual = '<='
t_elbowHorizontalVertical = literal_hyphen_minus + literal_vertical_pipe
t_elbowHorizontalDiagonal = literal_hyphen_minus + literal_backslash
t_elbowHorizontalAntidiagonal = literal_hyphen_minus + literal_slash
t_elbowVerticalHorizontal = literal_vertical_pipe + literal_hyphen_minus
t_elbowVerticalDiagonal = literal_vertical_pipe + literal_backslash
t_elbowVerticalAntidiagonal = literal_vertical_pipe + literal_slash
t_elbowDiagonalHorizontal = literal_backslash + literal_hyphen_minus
t_elbowDiagonalVertical = literal_backslash + literal_vertical_pipe
t_elbowDiagonalAntidiagonal = literal_backslash + literal_slash
t_elbowAntidiagonalHorizontal = literal_slash + literal_hyphen_minus
t_elbowAntidiagonalVertical = literal_slash + literal_vertical_pipe
t_elbowAntidiagonalDiagonal = literal_slash + literal_backslash
t_projectionOntoHorizontal = literal_hyphen_minus + '>'
t_projectionOntoVertical = literal_vertical_pipe + '>'
t_projectionOntoDiagonal = literal_backslash + '>'
t_projectionOntoAntidiagonal = literal_slash + '>'
t_orthogonalProjectionOnto = '!>'
def t_identifier (t):
'[a-z][a-z0-9_]*'
t.value = t.value.lower()
if t.value in reserved_words:
t.type = t.value
elif (
len(t.value) > maximum_identifier_length and
t.value not in t.lexer.long_identifiers
):
t.lexer.long_identifiers.append(t.value)
t.lexer.add_error(
'The identifier "%s" is too long at %d characters - '
'the maximum allowed is %d characters'
% (t.value, len(t.value), maximum_identifier_length),
t.lexer.lineno,
t.lexer.lexpos
)
return t
def t_numberDecimal (t):
'0|-?[1-9][0-9]*\.[0-9]+'
t.value = float(t.value)
return t
def t_numberInteger (t):
'0|-?[1-9][0-9]*'
t.value = int(t.value)
return t
def t_errorWithAFirstCharacterThatMightOtherwiseBeValid (t):
r'(\|(?![\-\\/>])|\\(?![\-\|/>])|/(?![\-\|\\>])|!(?!>))(.|[\r\n])?'
if len(t.value) == 1:
t.lexer.add_error(
'Unexpected end of input after "%s"' % t.value[0],
t.lexer.lineno,
None,
-1
)
elif t.value[1] in '\r\n\t ':
# We don't use string.whitespace here because \r, \n, \t and space are the only
# whitespace characters that can legally appear in the program outside of
# comments, and if t[1] is some other whitespace character then we want to
# generate the same error message for it as we would if it occurred as the
# "leading" character in an error.
try:
t.lexer.add_error(
'Unexpected whitespace after "%s"' % t.value[0],
t.lexer.lineno,
t.lexer.lexpos,
-1
)
finally:
t.lexer.lexpos -= 1
# Otherwise the whitespace character after the error will be skipped over,
# which would be bad if it's \r or \n, as we are trying to track newlines
# accurately.
else:
t.lexer.character_error(t.lexer.lexdata[t.lexer.lexpos - 1 : ], -1)
lexer = CustomisedLexer.from_lexer(ply.lex.lex(reflags = re.IGNORECASE))
| mit | -6,751,100,139,000,333,000 | 31.531365 | 87 | 0.626928 | false |
bmedx/modulestore | xmodule/modulestore/xml_importer.py | 1 | 47786 | """
Each store has slightly different semantics wrt draft v published. XML doesn't officially recognize draft
but does hold it in a subdir. Old mongo has a virtual but not physical draft for every unit in published state.
Split mongo has a physical for every unit in every state.
Given that, here's a table of semantics and behaviors where - means no record and letters indicate values.
For xml, (-, x) means the item is published and can be edited. For split, it means the item's
been deleted from draft and will be deleted from published the next time it gets published. old mongo
can't represent that virtual state (2nd row in table)
In the table body, the tuples represent virtual modulestore result. The row headers represent the pre-import
modulestore state.
Modulestore virtual | XML physical (draft, published)
(draft, published) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
----------------------+--------------------------------------------
(-, -) | (-, -) | (x, -) | (x, x) | (x, y) | (-, x)
(-, a) | (-, a) | (x, a) | (x, x) | (x, y) | (-, x) : deleted from draft before import
(a, -) | (a, -) | (x, -) | (x, x) | (x, y) | (a, x)
(a, a) | (a, a) | (x, a) | (x, x) | (x, y) | (a, x)
(a, b) | (a, b) | (x, b) | (x, x) | (x, y) | (a, x)
"""
import logging
from abc import abstractmethod
from opaque_keys.edx.locator import LibraryLocator
import os
import mimetypes
from path import Path as path
import json
import re
from lxml import etree
# from xmodule.library_tools import LibraryToolsService
from xmodule.modulestore.xml import XMLModuleStore, LibraryXMLModuleStore, ImportSystem
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xmodule.x_module import XModuleDescriptor, XModuleMixin
from opaque_keys.edx.keys import UsageKey
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.contentstore.content import StaticContent
from .inheritance import own_metadata
from xmodule.util.errortracker import make_error_tracker
from .store_utilities import rewrite_nonportable_content_links
import xblock
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.django import ASSET_IGNORE_REGEX
from xmodule.modulestore.exceptions import DuplicateCourseError
from xmodule.modulestore.mongo.base import MongoRevisionKey
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.store_utilities import draft_node_constructor, get_draft_subtree_roots
from xmodule.modulestore.tests.utils import LocationMixin
from xmodule.util.misc import escape_invalid_characters
log = logging.getLogger(__name__)
def import_static_content(
course_data_path, static_content_store,
target_id, subpath='static', verbose=False):
remap_dict = {}
# now import all static assets
static_dir = course_data_path / subpath
try:
with open(course_data_path / 'policies/assets.json') as f:
policy = json.load(f)
except (IOError, ValueError) as err:
# xml backed courses won't have this file, only exported courses;
# so, its absence is not really an exception.
policy = {}
verbose = True
mimetypes.add_type('application/octet-stream', '.sjson')
mimetypes.add_type('application/octet-stream', '.srt')
mimetypes_list = mimetypes.types_map.values()
for dirname, _, filenames in os.walk(static_dir):
for filename in filenames:
content_path = os.path.join(dirname, filename)
if re.match(ASSET_IGNORE_REGEX, filename):
if verbose:
log.debug('skipping static content %s...', content_path)
continue
if verbose:
log.debug('importing static content %s...', content_path)
try:
with open(content_path, 'rb') as f:
data = f.read()
except IOError:
if filename.startswith('._'):
# OS X "companion files". See
# http://www.diigo.com/annotated/0c936fda5da4aa1159c189cea227e174
continue
# Not a 'hidden file', then re-raise exception
raise
# strip away leading path from the name
fullname_with_subpath = content_path.replace(static_dir, '')
if fullname_with_subpath.startswith('/'):
fullname_with_subpath = fullname_with_subpath[1:]
asset_key = StaticContent.compute_location(target_id, fullname_with_subpath)
policy_ele = policy.get(asset_key.path, {})
# During export display name is used to create files, strip away slashes from name
displayname = escape_invalid_characters(
name=policy_ele.get('displayname', filename),
invalid_char_list=['/', '\\']
)
locked = policy_ele.get('locked', False)
mime_type = policy_ele.get('contentType')
# Check extracted contentType in list of all valid mimetypes
if not mime_type or mime_type not in mimetypes_list:
mime_type = mimetypes.guess_type(filename)[0] # Assign guessed mimetype
content = StaticContent(
asset_key, displayname, mime_type, data,
import_path=fullname_with_subpath, locked=locked
)
# first let's save a thumbnail so we can get back a thumbnail location
thumbnail_content, thumbnail_location = static_content_store.generate_thumbnail(content)
if thumbnail_content is not None:
content.thumbnail_location = thumbnail_location
# then commit the content
try:
static_content_store.save(content)
except Exception as err:
log.exception(u'Error importing {0}, error={1}'.format(
fullname_with_subpath, err
))
# store the remapping information which will be needed
# to subsitute in the module data
remap_dict[fullname_with_subpath] = asset_key
return remap_dict
class ImportManager(object):
"""
Import xml-based courselikes from data_dir into modulestore.
Returns:
list of new courselike objects
Args:
store: a modulestore implementing ModuleStoreWriteBase in which to store the imported courselikes.
data_dir: the root directory from which to find the xml courselikes.
source_dirs: If specified, the list of data_dir subdirectories to load. Otherwise, load
all dirs
target_id: is the Locator that all modules should be remapped to
after import off disk. NOTE: this only makes sense if importing only
one courselike. If there are more than one courselike loaded from data_dir/source_dirs & you
supply this id, an AssertException will be raised.
static_content_store: the static asset store
do_import_static: if True, then import the courselike's static files into static_content_store
This can be employed for courselikes which have substantial
unchanging static content, which is too inefficient to import every
time the course is loaded. Static content for some courses may also be
served directly by nginx, instead of going through django.
create_if_not_present: If True, then a new courselike is created if it doesn't already exist.
Otherwise, it throws an InvalidLocationError if the courselike does not exist.
default_class, load_error_modules: are arguments for constructing the XMLModuleStore (see its doc)
"""
store_class = XMLModuleStore
def __init__(
self, store, user_id, data_dir, source_dirs=None,
default_class='xmodule.default_module.DefaultDescriptor',
load_error_modules=True, static_content_store=None,
target_id=None, verbose=False,
do_import_static=True, create_if_not_present=False,
raise_on_failure=False
):
self.store = store
self.user_id = user_id
self.data_dir = data_dir
self.source_dirs = source_dirs
self.load_error_modules = load_error_modules
self.static_content_store = static_content_store
self.target_id = target_id
self.verbose = verbose
self.do_import_static = do_import_static
self.create_if_not_present = create_if_not_present
self.raise_on_failure = raise_on_failure
self.xml_module_store = self.store_class(
data_dir,
default_class=default_class,
source_dirs=source_dirs,
load_error_modules=load_error_modules,
xblock_mixins=store.xblock_mixins,
xblock_select=store.xblock_select,
target_course_id=target_id,
)
self.logger, self.errors = make_error_tracker()
def preflight(self):
"""
Perform any pre-import sanity checks.
"""
# If we're going to remap the ID, then we can only do that with
# a single target
if self.target_id:
assert len(self.xml_module_store.modules) == 1
def import_static(self, data_path, dest_id):
"""
Import all static items into the content store.
"""
if self.static_content_store is not None and self.do_import_static:
# first pass to find everything in /static/
import_static_content(
data_path, self.static_content_store,
dest_id, subpath='static', verbose=self.verbose
)
elif self.verbose and not self.do_import_static:
log.debug(
"Skipping import of static content, "
"since do_import_static=%s", self.do_import_static
)
# no matter what do_import_static is, import "static_import" directory
# This is needed because the "about" pages (eg "overview") are
# loaded via load_extra_content, and do not inherit the lms
# metadata from the course module, and thus do not get
# "static_content_store" properly defined. Static content
# referenced in those extra pages thus need to come through the
# c4x:// contentstore, unfortunately. Tell users to copy that
# content into the "static_import" subdir.
simport = 'static_import'
if os.path.exists(data_path / simport):
import_static_content(
data_path, self.static_content_store,
dest_id, subpath=simport, verbose=self.verbose
)
def import_asset_metadata(self, data_dir, course_id):
"""
Read in assets XML file, parse it, and add all asset metadata to the modulestore.
"""
asset_dir = path(data_dir) / AssetMetadata.EXPORTED_ASSET_DIR
assets_filename = AssetMetadata.EXPORTED_ASSET_FILENAME
asset_xml_file = asset_dir / assets_filename
def make_asset_id(course_id, asset_xml):
"""
Construct an asset ID out of a complete asset XML section.
"""
asset_type = None
asset_name = None
for child in asset_xml.iterchildren():
if child.tag == AssetMetadata.ASSET_TYPE_ATTR:
asset_type = child.text
elif child.tag == AssetMetadata.ASSET_BASENAME_ATTR:
asset_name = child.text
return course_id.make_asset_key(asset_type, asset_name)
all_assets = []
try:
xml_data = etree.parse(asset_xml_file).getroot()
assert xml_data.tag == AssetMetadata.ALL_ASSETS_XML_TAG
for asset in xml_data.iterchildren():
if asset.tag == AssetMetadata.ASSET_XML_TAG:
# Construct the asset key.
asset_key = make_asset_id(course_id, asset)
asset_md = AssetMetadata(asset_key)
asset_md.from_xml(asset)
all_assets.append(asset_md)
except IOError:
logging.info('No %s file is present with asset metadata.', assets_filename)
return
except Exception: # pylint: disable=W0703
logging.exception('Error while parsing asset xml.')
if self.raise_on_failure:
raise
else:
return
# Now add all asset metadata to the modulestore.
if len(all_assets) > 0:
self.store.save_asset_metadata_list(all_assets, all_assets[0].edited_by, import_only=True)
def import_courselike(self, runtime, courselike_key, dest_id, source_courselike):
"""
Import the base module/block
"""
if self.verbose:
log.debug("Scanning %s for courselike module...", courselike_key)
# Quick scan to get course module as we need some info from there.
# Also we need to make sure that the course module is committed
# first into the store
course_data_path = path(self.data_dir) / source_courselike.data_dir
log.debug(u'======> IMPORTING courselike %s', courselike_key)
if not self.do_import_static:
# for old-style xblock where this was actually linked to kvs
source_courselike.static_asset_path = source_courselike.data_dir
source_courselike.save()
log.debug('course static_asset_path=%s', source_courselike.static_asset_path)
log.debug('course data_dir=%s', source_courselike.data_dir)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_id):
course = _update_and_import_module(
source_courselike, self.store, self.user_id,
courselike_key,
dest_id,
do_import_static=self.do_import_static,
runtime=runtime,
)
self.static_updater(course, source_courselike, courselike_key, dest_id, runtime)
self.store.update_item(course, self.user_id)
return course, course_data_path
@abstractmethod
def static_updater(self, course, source_courselike, courselike_key, dest_id, runtime):
"""
Updates any special static items, such as PDF coursebooks.
"""
pass
@abstractmethod
def get_dest_id(self, courselike_key):
"""
Given a courselike_key, get the version of the key that will actually be used in the modulestore
for import.
"""
raise NotImplementedError
@abstractmethod
def get_courselike(self, courselike_key, runtime, dest_id):
"""
Given a key, a runtime, and an intended destination key, get the descriptor for the courselike
we'll be importing into.
"""
raise NotImplementedError
@abstractmethod
def import_children(self, source_courselike, courselike, courselike_key, dest_id):
"""
To be overloaded with a method that installs the child items into self.store.
"""
raise NotImplementedError
@abstractmethod
def import_drafts(self, courselike, courselike_key, data_path, dest_id):
"""
To be overloaded with a method that installs the draft items into self.store.
"""
raise NotImplementedError
def recursive_build(self, source_courselike, courselike, courselike_key, dest_id):
"""
Recursively imports all child blocks from the temporary modulestore into the
target modulestore.
"""
all_locs = set(self.xml_module_store.modules[courselike_key].keys())
all_locs.remove(source_courselike.location)
def depth_first(subtree):
"""
Import top down just so import code can make assumptions about parents always being available
"""
if subtree.has_children:
for child in subtree.get_children():
try:
all_locs.remove(child.location)
except KeyError:
# tolerate same child occurring under 2 parents such as in
# ContentStoreTest.test_image_import
pass
if self.verbose:
log.debug('importing module location %s', child.location)
_update_and_import_module(
child,
self.store,
self.user_id,
courselike_key,
dest_id,
do_import_static=self.do_import_static,
runtime=courselike.runtime,
)
depth_first(child)
depth_first(source_courselike)
for leftover in all_locs:
if self.verbose:
log.debug('importing module location %s', leftover)
_update_and_import_module(
self.xml_module_store.get_item(leftover),
self.store,
self.user_id,
courselike_key,
dest_id,
do_import_static=self.do_import_static,
runtime=courselike.runtime,
)
def run_imports(self):
"""
Iterate over the given directories and yield courses.
"""
self.preflight()
for courselike_key in self.xml_module_store.modules.keys():
try:
dest_id, runtime = self.get_dest_id(courselike_key)
except DuplicateCourseError:
continue
# This bulk operation wraps all the operations to populate the published branch.
with self.store.bulk_operations(dest_id):
# Retrieve the course itself.
source_courselike, courselike, data_path = self.get_courselike(courselike_key, runtime, dest_id)
# Import all static pieces.
self.import_static(data_path, dest_id)
# Import asset metadata stored in XML.
self.import_asset_metadata(data_path, dest_id)
# Import all children
self.import_children(source_courselike, courselike, courselike_key, dest_id)
# This bulk operation wraps all the operations to populate the draft branch with any items
# from the /drafts subdirectory.
# Drafts must be imported in a separate bulk operation from published items to import properly,
# due to the recursive_build() above creating a draft item for each course block
# and then publishing it.
with self.store.bulk_operations(dest_id):
# Import all draft items into the courselike.
courselike = self.import_drafts(courselike, courselike_key, data_path, dest_id)
yield courselike
class CourseImportManager(ImportManager):
"""
Import manager for Courses.
"""
store_class = XMLModuleStore
def get_courselike(self, courselike_key, runtime, dest_id):
"""
Given a key, runtime, and target key, get the version of the course
from the temporary modulestore.
"""
source_course = self.xml_module_store.get_course(courselike_key)
# STEP 1: find and import course module
course, course_data_path = self.import_courselike(
runtime, courselike_key, dest_id, source_course,
)
return source_course, course, course_data_path
def get_dest_id(self, courselike_key):
"""
Get the course key that will be used for the target modulestore.
"""
if self.target_id is not None:
dest_id = self.target_id
else:
# Note that dest_course_id will be in the format for the default modulestore.
dest_id = self.store.make_course_key(courselike_key.org, courselike_key.course, courselike_key.run)
existing_id = self.store.has_course(dest_id, ignore_case=True)
# store.has_course will return the course_key in the format for the modulestore in which it was found.
# This may be different from dest_course_id, so correct to the format found.
if existing_id:
dest_id = existing_id
runtime = None
# Creates a new course if it doesn't already exist
if self.create_if_not_present and not existing_id:
try:
new_course = self.store.create_course(
dest_id.org, dest_id.course, dest_id.run, self.user_id
)
runtime = new_course.runtime
except DuplicateCourseError:
log.debug(
"Skipping import of course with id, %s, "
"since it collides with an existing one", dest_id
)
raise
return dest_id, runtime
def static_updater(self, course, source_courselike, courselike_key, dest_id, runtime):
"""
Update special static assets, such as PDF textbooks and wiki resources.
"""
for entry in course.pdf_textbooks:
for chapter in entry.get('chapters', []):
if StaticContent.is_c4x_path(chapter.get('url', '')):
asset_key = StaticContent.get_location_from_path(chapter['url'])
chapter['url'] = StaticContent.get_static_path_from_location(asset_key)
# Original wiki_slugs had value location.course. To make them unique this was changed to 'org.course.name'.
# If we are importing into a course with a different course_id and wiki_slug is equal to either of these default
# values then remap it so that the wiki does not point to the old wiki.
if courselike_key != course.id:
original_unique_wiki_slug = u'{0}.{1}.{2}'.format(
courselike_key.org,
courselike_key.course,
courselike_key.run
)
if course.wiki_slug == original_unique_wiki_slug or course.wiki_slug == courselike_key.course:
course.wiki_slug = u'{0}.{1}.{2}'.format(
course.id.org,
course.id.course,
course.id.run,
)
def import_children(self, source_courselike, courselike, courselike_key, dest_id):
"""
Imports all children into the desired store.
"""
# The branch setting of published_only forces an overwrite of all draft modules
# during the course import.
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only, dest_id):
self.recursive_build(source_courselike, courselike, courselike_key, dest_id)
def import_drafts(self, courselike, courselike_key, data_path, dest_id):
"""
Imports all drafts into the desired store.
"""
# Import any draft items
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, dest_id):
_import_course_draft(
self.xml_module_store,
self.store,
self.user_id,
data_path,
courselike_key,
dest_id,
courselike.runtime
)
# Importing the drafts potentially triggered a new structure version.
# If so, the HEAD version_guid of the passed-in courselike will be out-of-date.
# Fetch the course to return the most recent course version.
return self.store.get_course(courselike.id.replace(branch=None, version_guid=None))
class LibraryImportManager(ImportManager):
"""
Import manager for Libraries
"""
store_class = LibraryXMLModuleStore
def get_dest_id(self, courselike_key):
"""
Get the LibraryLocator that will be used in the target modulestore.
"""
if self.target_id is not None:
dest_id = self.target_id
else:
dest_id = LibraryLocator(self.target_id.org, self.target_id.library)
existing_lib = self.store.get_library(dest_id, ignore_case=True)
runtime = None
if existing_lib:
dest_id = existing_lib.location.library_key
runtime = existing_lib.runtime
if self.create_if_not_present and not existing_lib:
try:
library = self.store.create_library(
org=self.target_id.org,
library=self.target_id.library,
user_id=self.user_id,
fields={"display_name": ""},
)
runtime = library.runtime
except DuplicateCourseError:
log.debug(
"Skipping import of Library with id %s, "
"since it collides with an existing one", dest_id
)
raise
return dest_id, runtime
def get_courselike(self, courselike_key, runtime, dest_id):
"""
Get the descriptor of the library from the XML import modulestore.
"""
source_library = self.xml_module_store.get_library(courselike_key) # pylint: disable=no-member
library, library_data_path = self.import_courselike(
runtime, courselike_key, dest_id, source_library,
)
return source_library, library, library_data_path
def static_updater(self, course, source_courselike, courselike_key, dest_id, runtime):
"""
Libraries have no special static items to import.
"""
pass
def import_children(self, source_courselike, courselike, courselike_key, dest_id):
"""
Imports all children into the desired store.
"""
self.recursive_build(source_courselike, courselike, courselike_key, dest_id)
def import_drafts(self, courselike, courselike_key, data_path, dest_id):
"""
Imports all drafts into the desired store.
"""
return courselike
def import_course_from_xml(*args, **kwargs):
"""
Thin wrapper for the Course Import Manager. See ImportManager for details.
"""
manager = CourseImportManager(*args, **kwargs)
return list(manager.run_imports())
def import_library_from_xml(*args, **kwargs):
"""
Thin wrapper for the Library Import Manager. See ImportManager for details.
"""
manager = LibraryImportManager(*args, **kwargs)
return list(manager.run_imports())
def _update_and_import_module(
module, store, user_id,
source_course_id, dest_course_id,
do_import_static=True, runtime=None):
"""
Update all the module reference fields to the destination course id,
then import the module into the destination course.
"""
logging.debug(u'processing import of module %s...', unicode(module.location))
def _update_module_references(module, source_course_id, dest_course_id):
"""
Move the module to a new course.
"""
def _convert_ref_fields_to_new_namespace(reference): # pylint: disable=invalid-name
"""
Convert a reference to the new namespace, but only
if the original namespace matched the original course.
Otherwise, returns the input value.
"""
assert isinstance(reference, UsageKey)
if source_course_id == reference.course_key:
return reference.map_into_course(dest_course_id)
else:
return reference
fields = {}
for field_name, field in module.fields.iteritems():
if field.scope != Scope.parent and field.is_set_on(module):
if isinstance(field, Reference):
value = field.read_from(module)
if value is None:
fields[field_name] = None
else:
fields[field_name] = _convert_ref_fields_to_new_namespace(field.read_from(module))
elif isinstance(field, ReferenceList):
references = field.read_from(module)
fields[field_name] = [_convert_ref_fields_to_new_namespace(reference) for reference in references]
elif isinstance(field, ReferenceValueDict):
reference_dict = field.read_from(module)
fields[field_name] = {
key: _convert_ref_fields_to_new_namespace(reference)
for key, reference
in reference_dict.iteritems()
}
elif field_name == 'xml_attributes':
value = field.read_from(module)
# remove any export/import only xml_attributes
# which are used to wire together draft imports
if 'parent_url' in value:
del value['parent_url']
if 'parent_sequential_url' in value:
del value['parent_sequential_url']
if 'index_in_children_list' in value:
del value['index_in_children_list']
fields[field_name] = value
else:
fields[field_name] = field.read_from(module)
return fields
if do_import_static and 'data' in module.fields and isinstance(module.fields['data'], xblock.fields.String):
# we want to convert all 'non-portable' links in the module_data
# (if it is a string) to portable strings (e.g. /static/)
module.data = rewrite_nonportable_content_links(
source_course_id,
dest_course_id,
module.data
)
fields = _update_module_references(module, source_course_id, dest_course_id)
asides = module.get_asides() if isinstance(module, XModuleMixin) else None
block = store.import_xblock(
user_id, dest_course_id, module.location.category,
module.location.block_id, fields, runtime, asides=asides
)
return block
def _import_course_draft(
xml_module_store,
store,
user_id,
course_data_path,
source_course_id,
target_id,
mongo_runtime
):
"""
This method will import all the content inside of the 'drafts' folder, if content exists.
NOTE: This is not a full course import! In our current application, only verticals
(and blocks beneath) can be in draft. Therefore, different call points into the import
process_xml are used as the XMLModuleStore() constructor cannot simply be called
(as is done for importing public content).
"""
draft_dir = course_data_path + "/drafts"
if not os.path.exists(draft_dir):
return
# create a new 'System' object which will manage the importing
errorlog = make_error_tracker()
# The course_dir as passed to ImportSystem is expected to just be relative, not
# the complete path including data_dir. ImportSystem will concatenate the two together.
data_dir = xml_module_store.data_dir
# Whether or not data_dir ends with a "/" differs in production vs. test.
if not data_dir.endswith("/"):
data_dir += "/"
# Remove absolute path, leaving relative <course_name>/drafts.
draft_course_dir = draft_dir.replace(data_dir, '', 1)
system = ImportSystem(
xmlstore=xml_module_store,
course_id=source_course_id,
course_dir=draft_course_dir,
error_tracker=errorlog.tracker,
load_error_modules=False,
mixins=xml_module_store.xblock_mixins,
field_data=KvsFieldData(kvs=DictKeyValueStore()),
target_course_id=target_id,
)
def _import_module(module):
# IMPORTANT: Be sure to update the module location in the NEW namespace
module_location = module.location.map_into_course(target_id)
# Update the module's location to DRAFT revision
# We need to call this method (instead of updating the location directly)
# to ensure that pure XBlock field data is updated correctly.
_update_module_location(module, module_location.replace(revision=MongoRevisionKey.draft))
parent_url = get_parent_url(module)
index = index_in_children_list(module)
# make sure our parent has us in its list of children
# this is to make sure private only modules show up
# in the list of children since they would have been
# filtered out from the non-draft store export.
if parent_url is not None and index is not None:
course_key = descriptor.location.course_key
parent_location = course_key.make_usage_key_from_deprecated_string(parent_url)
# IMPORTANT: Be sure to update the parent in the NEW namespace
parent_location = parent_location.map_into_course(target_id)
parent = store.get_item(parent_location, depth=0)
non_draft_location = module.location.map_into_course(target_id)
if not any(child.block_id == module.location.block_id for child in parent.children):
parent.children.insert(index, non_draft_location)
store.update_item(parent, user_id)
_update_and_import_module(
module, store, user_id,
source_course_id,
target_id,
runtime=mongo_runtime,
)
for child in module.get_children():
_import_module(child)
# Now walk the /drafts directory.
# Each file in the directory will be a draft copy of the vertical.
# First it is necessary to order the draft items by their desired index in the child list,
# since the order in which os.walk() returns the files is not guaranteed.
drafts = []
for rootdir, __, filenames in os.walk(draft_dir):
for filename in filenames:
if filename.startswith('._'):
# Skip any OSX quarantine files, prefixed with a '._'.
continue
module_path = os.path.join(rootdir, filename)
with open(module_path, 'r') as f:
try:
xml = f.read().decode('utf-8')
# The process_xml() call below recursively processes all descendants. If
# we call this on all verticals in a course with verticals nested below
# the unit level, we try to import the same content twice, causing naming conflicts.
# Therefore only process verticals at the unit level, assuming that any other
# verticals must be descendants.
if 'index_in_children_list' in xml:
descriptor = system.process_xml(xml)
# HACK: since we are doing partial imports of drafts
# the vertical doesn't have the 'url-name' set in the
# attributes (they are normally in the parent object,
# aka sequential), so we have to replace the location.name
# with the XML filename that is part of the pack
filename, __ = os.path.splitext(filename)
descriptor.location = descriptor.location.replace(name=filename)
index = index_in_children_list(descriptor)
parent_url = get_parent_url(descriptor, xml)
draft_url = unicode(descriptor.location)
draft = draft_node_constructor(
module=descriptor, url=draft_url, parent_url=parent_url, index=index
)
drafts.append(draft)
except Exception: # pylint: disable=broad-except
logging.exception('Error while parsing course drafts xml.')
# Sort drafts by `index_in_children_list` attribute.
drafts.sort(key=lambda x: x.index)
for draft in get_draft_subtree_roots(drafts):
try:
_import_module(draft.module)
except Exception: # pylint: disable=broad-except
logging.exception('while importing draft descriptor %s', draft.module)
def allowed_metadata_by_category(category):
# should this be in the descriptors?!?
return {
'vertical': [],
'chapter': ['start'],
'sequential': ['due', 'format', 'start', 'graded']
}.get(category, ['*'])
def check_module_metadata_editability(module):
"""
Assert that there is no metadata within a particular module that
we can't support editing. However we always allow 'display_name'
and 'xml_attributes'
"""
allowed = allowed_metadata_by_category(module.location.category)
if '*' in allowed:
# everything is allowed
return 0
allowed = allowed + ['xml_attributes', 'display_name']
err_cnt = 0
illegal_keys = set(own_metadata(module).keys()) - set(allowed)
if len(illegal_keys) > 0:
err_cnt = err_cnt + 1
print(
": found non-editable metadata on {url}. "
"These metadata keys are not supported = {keys}".format(
url=unicode(module.location), keys=illegal_keys
)
)
return err_cnt
def get_parent_url(module, xml=None):
"""
Get the parent_url, if any, from module using xml as an alternative source. If it finds it in
xml but not on module, it modifies module so that the next call to this w/o the xml will get the parent url
"""
if hasattr(module, 'xml_attributes'):
return module.xml_attributes.get(
# handle deprecated old attr
'parent_url', module.xml_attributes.get('parent_sequential_url')
)
if xml is not None:
create_xml_attributes(module, xml)
return get_parent_url(module) # don't reparse xml b/c don't infinite recurse but retry above lines
return None
def index_in_children_list(module, xml=None):
"""
Get the index_in_children_list, if any, from module using xml
as an alternative source. If it finds it in xml but not on module,
it modifies module so that the next call to this w/o the xml
will get the field.
"""
if hasattr(module, 'xml_attributes'):
val = module.xml_attributes.get('index_in_children_list')
if val is not None:
return int(val)
return None
if xml is not None:
create_xml_attributes(module, xml)
return index_in_children_list(module) # don't reparse xml b/c don't infinite recurse but retry above lines
return None
def create_xml_attributes(module, xml):
"""
Make up for modules which don't define xml_attributes by creating them here and populating
"""
xml_attrs = {}
for attr, val in xml.attrib.iteritems():
if attr not in module.fields:
# translate obsolete attr
if attr == 'parent_sequential_url':
attr = 'parent_url'
xml_attrs[attr] = val
# now cache it on module where it's expected
module.xml_attributes = xml_attrs
def validate_no_non_editable_metadata(module_store, course_id, category):
err_cnt = 0
for module_loc in module_store.modules[course_id]:
module = module_store.modules[course_id][module_loc]
if module.location.category == category:
err_cnt = err_cnt + check_module_metadata_editability(module)
return err_cnt
def validate_category_hierarchy(
module_store, course_id, parent_category, expected_child_category):
err_cnt = 0
parents = []
# get all modules of parent_category
for module in module_store.modules[course_id].itervalues():
if module.location.category == parent_category:
parents.append(module)
for parent in parents:
for child_loc in parent.children:
if child_loc.category != expected_child_category:
err_cnt += 1
print(
"ERROR: child {child} of parent {parent} was expected to be "
"category of {expected} but was {actual}".format(
child=child_loc, parent=parent.location,
expected=expected_child_category,
actual=child_loc.category
)
)
return err_cnt
def validate_data_source_path_existence(path, is_err=True, extra_msg=None):
_cnt = 0
if not os.path.exists(path):
print(
"{type}: Expected folder at {path}. {extra}".format(
type='ERROR' if is_err else 'WARNING',
path=path,
extra=extra_msg or "",
)
)
_cnt = 1
return _cnt
def validate_data_source_paths(data_dir, course_dir):
# check that there is a '/static/' directory
course_path = data_dir / course_dir
err_cnt = 0
warn_cnt = 0
err_cnt += validate_data_source_path_existence(course_path / 'static')
warn_cnt += validate_data_source_path_existence(
course_path / 'static/subs', is_err=False,
extra_msg='Video captions (if they are used) will not work unless they are static/subs.'
)
return err_cnt, warn_cnt
def validate_course_policy(module_store, course_id):
"""
Validate that the course explicitly sets values for any fields
whose defaults may have changed between the export and the import.
Does not add to error count as these are just warnings.
"""
# is there a reliable way to get the module location just given the course_id?
warn_cnt = 0
for module in module_store.modules[course_id].itervalues():
if module.location.category == 'course':
if not module._field_data.has(module, 'rerandomize'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"rerandomize" whose default is now "never". '
'The behavior of your course may change.'
)
if not module._field_data.has(module, 'showanswer'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"showanswer" whose default is now "finished". '
'The behavior of your course may change.'
)
return warn_cnt
def perform_xlint(
data_dir, source_dirs,
default_class='xmodule.default_module.DefaultDescriptor',
load_error_modules=True,
xblock_mixins=(LocationMixin, XModuleMixin)):
err_cnt = 0
warn_cnt = 0
module_store = XMLModuleStore(
data_dir,
default_class=default_class,
source_dirs=source_dirs,
load_error_modules=load_error_modules,
xblock_mixins=xblock_mixins
)
# check all data source path information
for course_dir in source_dirs:
_err_cnt, _warn_cnt = validate_data_source_paths(path(data_dir), course_dir)
err_cnt += _err_cnt
warn_cnt += _warn_cnt
# first count all errors and warnings as part of the XMLModuleStore import
for err_log in module_store._course_errors.itervalues(): # pylint: disable=protected-access
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
# then count outright all courses that failed to load at all
for err_log in module_store.errored_courses.itervalues():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
print msg
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
for course_id in module_store.modules.keys():
# constrain that courses only have 'chapter' children
err_cnt += validate_category_hierarchy(
module_store, course_id, "course", "chapter"
)
# constrain that chapters only have 'sequentials'
err_cnt += validate_category_hierarchy(
module_store, course_id, "chapter", "sequential"
)
# constrain that sequentials only have 'verticals'
err_cnt += validate_category_hierarchy(
module_store, course_id, "sequential", "vertical"
)
# validate the course policy overrides any defaults
# which have changed over time
warn_cnt += validate_course_policy(module_store, course_id)
# don't allow metadata on verticals, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "vertical"
)
# don't allow metadata on chapters, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "chapter"
)
# don't allow metadata on sequences that we can't edit
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "sequential"
)
# check for a presence of a course marketing video
if not module_store.has_item(course_id.make_usage_key('about', 'video')):
print(
"WARN: Missing course marketing video. It is recommended "
"that every course have a marketing video."
)
warn_cnt += 1
print "\n"
print "------------------------------------------"
print "VALIDATION SUMMARY: {err} Errors {warn} Warnings".format(
err=err_cnt,
warn=warn_cnt
)
if err_cnt > 0:
print(
"This course is not suitable for importing. Please fix courseware "
"according to specifications before importing."
)
elif warn_cnt > 0:
print(
"This course can be imported, but some errors may occur "
"during the run of the course. It is recommend that you fix "
"your courseware before importing"
)
else:
print "This course can be imported successfully."
return err_cnt
def _update_module_location(module, new_location):
"""
Update a module's location.
If the module is a pure XBlock (not an XModule), then its field data
keys will need to be updated to include the new location.
Args:
module (XModuleMixin): The module to update.
new_location (Location): The new location of the module.
Returns:
None
"""
# Retrieve the content and settings fields that have been explicitly set
# to ensure that they are properly re-keyed in the XBlock field data.
if isinstance(module, XModuleDescriptor):
rekey_fields = []
else:
rekey_fields = (
module.get_explicitly_set_fields_by_scope(Scope.content).keys() +
module.get_explicitly_set_fields_by_scope(Scope.settings).keys() +
module.get_explicitly_set_fields_by_scope(Scope.children).keys()
)
module.location = new_location
# Pure XBlocks store the field data in a key-value store
# in which one component of the key is the XBlock's location (equivalent to "scope_ids").
# Since we've changed the XBlock's location, we need to re-save
# all the XBlock's fields so they will be stored using the new location in the key.
# However, since XBlocks only save "dirty" fields, we need to call
# XBlock's `force_save_fields_method`
if len(rekey_fields) > 0:
module.force_save_fields(rekey_fields)
| apache-2.0 | -6,630,822,637,302,118,000 | 39.223906 | 120 | 0.602227 | false |
jeremiah-c-leary/vhdl-style-guide | vsg/rules/separate_multiple_signal_identifiers_into_individual_statements.py | 1 | 3128 |
import copy
from vsg import parser
from vsg import rule
from vsg import token
from vsg import violation
from vsg.vhdlFile import utils
class separate_multiple_signal_identifiers_into_individual_statements(rule.Rule):
'''
Checks the case for words.
Parameters
----------
name : string
The group the rule belongs to.
identifier : string
unique identifier. Usually in the form of 00N.
lTokens : list of parser object types
object types to check the prefix
lPrefixes : string list
acceptable prefixes
'''
def __init__(self, name, identifier, lTokens, iAllow=2):
rule.Rule.__init__(self, name=name, identifier=identifier)
self.solution = 'Split signal declaration into individual declarations'
self.phase = 1
self.lTokens = lTokens
self.consecutive = iAllow
self.configuration.append('consecutive')
def _get_tokens_of_interest(self, oFile):
return oFile.get_tokens_bounded_by(token.signal_declaration.signal_keyword, token.signal_declaration.semicolon)
def _analyze(self, lToi):
for oToi in lToi:
lTokens = oToi.get_tokens()
iIdentifiers = 0
iStartIndex = None
iEndIndex = 0
bPreTokens = True
lPreTokens = []
lIdentifiers = []
for iToken, oToken in enumerate(lTokens):
if isinstance(oToken, token.signal_declaration.identifier):
lIdentifiers.append(oToken)
iIdentifiers += 1
if iStartIndex is None:
iStartIndex = iToken
iEndIndex = iToken
bPreTokens = False
if bPreTokens:
lPreTokens.append(oToken)
if iIdentifiers > self.consecutive:
oViolation = violation.New(oToi.get_line_number(), oToi, self.solution)
dAction = {}
dAction['start'] = iStartIndex
dAction['end'] = iEndIndex
dAction['number'] = iIdentifiers
dAction['identifiers'] = lIdentifiers
oViolation.set_action(dAction)
self.add_violation(oViolation)
def _fix_violation(self, oViolation):
lTokens = oViolation.get_tokens()
dAction = oViolation.get_action()
lFinalTokens = []
for oIdentifier in dAction['identifiers']:
lNewTokens = []
for iToken, oToken in enumerate(lTokens):
if iToken < dAction['start']:
lNewTokens.append(copy.deepcopy(oToken))
if iToken == dAction['start']:
lNewTokens.append(oIdentifier)
if iToken > dAction['end']:
lNewTokens.append(copy.deepcopy(oToken))
lNewTokens = utils.remove_carriage_returns_from_token_list(lNewTokens)
lFinalTokens.extend(lNewTokens)
lFinalTokens.append(parser.carriage_return())
lFinalTokens.pop()
oViolation.set_tokens(lFinalTokens)
| gpl-3.0 | -6,016,345,232,679,527,000 | 32.634409 | 119 | 0.58344 | false |
lino-framework/book | lino_book/projects/noi1e/maketour.py | 1 | 4300 | # -*- coding: UTF-8 -*-
# Copyright 2015-2021 Rumma & Ko Ltd
# License: BSD, see LICENSE for more details.
# Usage:
# $ go noi1e
# $ python manage.py run tours/make.py
# import time
from pathlib import Path
# from os.path import dirname
# import traceback
from django.conf import settings
from django.utils import translation
from lino.api import gettext as _ # not lazy
from lino.api import rt
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from lino.api.selenium import Tour, runserver
def tour(app, user):
driver = app.driver
app.screenshot('login1', "Before signing in")
# note that "Sign in" button is in English even when user.language is
# something else because they haven't yet authenticated:
app.find_clickable("Sign in").click()
elem = driver.find_element(By.NAME, 'username')
elem.send_keys(user.username)
elem = driver.find_element(By.NAME, 'password')
elem.send_keys("1234")
app.screenshot('login2', "The login window")
elem.send_keys(Keys.RETURN)
app.screenshot('welcome', "The main screen")
app.find_clickable(_("Contacts")).click()
app.screenshot('menu_contacts', "The Contacts menu")
app.find_clickable(_("Organizations")).click()
# elem = driver.find_element(By.LINK_TEXT, _("Organizations"))
# elem.click()
app.screenshot('contacts.Companies.grid', "The list of organizations")
# this worked on 20210206 but after a firefox upgrade it caused
# selenium.common.exceptions.ElementClickInterceptedException: Message: Element <button id="ext-gen103" class=" x-btn-text x-tbar-database_gear" type="button"> is not clickable at point (172,69) because another element <div id="ext-gen195" class="ext-el-mask"> obscures it
# and other problems.
wait = WebDriverWait(driver, 10)
elem = wait.until(EC.element_to_be_clickable((By.CLASS_NAME, "x-tbar-database_gear")))
# app.stabilize()
# elem = driver.find_element(By.CLASS_NAME, "x-tbar-database_gear")
elem.click()
app.screenshot('contacts.Companies.grid.params', "Filter parameters")
# time.sleep(2)
# find the first row and doubleclick it:
app.stabilize()
found = False
for elem in driver.find_elements(By.CLASS_NAME, 'x-grid3-row'):
if app.is_stale(elem):
print("stale:", elem)
else:
found = True
app.doubleclick(elem)
app.screenshot('contacts.Companies.detail', "Detail window of an organization")
app.find_clickable(_("Contact")).click()
app.screenshot('contacts.Companies.detail2', "Detail window of an organization (tab 2)")
app.find_clickable(_("Sites")).click()
app.screenshot('contacts.Companies.detail3', "Detail window of an organization (tab 3)")
break
if not found:
print("Mysterious: Did not find any row to doubleclick")
# we can open the datail window programmatically using a permalink
if False: # TODO: stabilize fails when there is no dashboard
obj = rt.models.contacts.Person.objects.first()
ar = rt.login(user=user)
ba = obj.get_detail_action(ar)
url = ar.get_detail_url(ba.actor, obj.pk)
driver.get(app.server_url + url)
app.stabilize()
app.screenshot('contacts.Person.detail', "Detail window of a person")
# Log out before leaving so that the next user can enter
app.find_clickable(str(user)).click()
app.stabilize()
app.find_clickable(_("Sign out")).click()
def main(a):
"""The function to call when the server is running."""
for username in ("robin", "rolf"):
user = rt.models.users.User.objects.get(username=username)
a.set_language(user.language)
with translation.override(user.language):
tour(a, user)
if __name__ == '__main__':
pth = Path(__file__).resolve().parents[3] / "docs/specs/noi/tour"
print("Writing screenshots to {}...".format(pth))
pth.mkdir(exist_ok=True)
# pth.mkdir(exist_ok=True)
Tour(main, output_path=pth,
title="A tour of the Noi/ExtJS demo project", ref="noi1e.tour").make()
| bsd-2-clause | 9,220,006,689,325,919,000 | 34.53719 | 276 | 0.669535 | false |
snap-stanford/ogb | examples/linkproppred/biokg/run.py | 1 | 15371 | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
from ogb.linkproppred import LinkPropPredDataset, Evaluator
from collections import defaultdict
from tqdm import tqdm
import time
from tensorboardX import SummaryWriter
import pdb
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--dataset', type=str, default='ogbl-biokg', help='dataset name, default to biokg')
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-de', '--double_entity_embedding', action='store_true')
parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true',
help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=10000, type=int)
parser.add_argument('--valid_steps', default=10000, type=int)
parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--print_on_screen', action='store_true', help='log on screen or not')
parser.add_argument('--ntriples_eval_train', type=int, default=200000, help='number of training triples to evaluate eventually')
parser.add_argument('--neg_size_eval_train', type=int, default=500, help='number of negative samples when evaluating training triples')
return parser.parse_args(args)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
args.dataset = argparse_dict['dataset']
args.model = argparse_dict['model']
args.double_entity_embedding = argparse_dict['double_entity_embedding']
args.double_relation_embedding = argparse_dict['double_relation_embedding']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
def save_model(model, optimizer, save_variable_list, args):
'''
Save the parameters of the model and the optimizer,
as well as some other variables such as step and learning_rate
'''
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
torch.save({
**save_variable_list,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
os.path.join(args.save_path, 'checkpoint')
)
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'entity_embedding'),
entity_embedding
)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'relation_embedding'),
relation_embedding
)
def set_logger(args):
'''
Write logs to checkpoint and console
'''
if args.do_train:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')
else:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics, writer):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
writer.add_scalar("_".join([mode, metric]), metrics[metric], step)
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
dataset = LinkPropPredDataset(name = 'ogbl-biokg')
split_edge = dataset.get_edge_split()
train_triples, valid_triples, test_triples = split_edge["train"], split_edge["valid"], split_edge["test"]
nrelation = int(max(train_triples['relation']))+1
entity_dict = dict()
cur_idx = 0
for key in dataset[0]['num_nodes_dict']:
entity_dict[key] = (cur_idx, cur_idx + dataset[0]['num_nodes_dict'][key])
cur_idx += dataset[0]['num_nodes_dict'][key]
nentity = sum(dataset[0]['num_nodes_dict'].values())
evaluator = Evaluator(name = args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
# train_triples = split_dict['train']
logging.info('#train: %d' % len(train_triples['head']))
# valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
# test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
head_type, tail_type = train_triples['head_type'][i], train_triples['tail_type'][i]
train_count[(head, relation, head_type)] += 1
train_count[(tail, -relation-1, tail_type)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
kge_model = KGEModel(
model_name=args.model,
nentity=nentity,
nrelation=nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding,
evaluator=evaluator
)
logging.info('Model Parameter Configuration:')
for name, param in kge_model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if args.cuda:
kge_model = kge_model.cuda()
if args.init_checkpoint:
# Restore model from checkpoint directory
logging.info('Loading checkpoint %s...' % args.init_checkpoint)
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
entity_dict = checkpoint['entity_dict']
if args.do_train:
# Set training dataloader iterator
train_dataloader_head = DataLoader(
TrainDataset(train_triples, nentity, nrelation,
args.negative_sample_size, 'head-batch',
train_count, train_true_head, train_true_tail,
entity_dict),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_dataloader_tail = DataLoader(
TrainDataset(train_triples, nentity, nrelation,
args.negative_sample_size, 'tail-batch',
train_count, train_true_head, train_true_tail,
entity_dict),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)
# Set training configuration
current_learning_rate = args.learning_rate
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
if args.warm_up_steps:
warm_up_steps = args.warm_up_steps
else:
warm_up_steps = args.max_steps // 2
if args.init_checkpoint:
# Restore model from checkpoint directory
# logging.info('Loading checkpoint %s...' % args.init_checkpoint)
# checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
init_step = checkpoint['step']
kge_model.load_state_dict(checkpoint['model_state_dict'])
# entity_dict = checkpoint['entity_dict']
if args.do_train:
current_learning_rate = checkpoint['current_learning_rate']
warm_up_steps = checkpoint['warm_up_steps']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
logging.info('Ramdomly Initializing %s Model...' % args.model)
init_step = 0
step = init_step
logging.info('Start Training...')
logging.info('init_step = %d' % init_step)
logging.info('batch_size = %d' % args.batch_size)
logging.info('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling)
logging.info('hidden_dim = %d' % args.hidden_dim)
logging.info('gamma = %f' % args.gamma)
logging.info('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling))
if args.negative_adversarial_sampling:
logging.info('adversarial_temperature = %f' % args.adversarial_temperature)
# Set valid dataloader as it would be evaluated during training
if args.do_train:
logging.info('learning_rate = %d' % current_learning_rate)
training_logs = []
#Training Loop
for step in range(init_step, args.max_steps):
log = kge_model.train_step(kge_model, optimizer, train_iterator, args)
training_logs.append(log)
if step >= warm_up_steps:
current_learning_rate = current_learning_rate / 10
logging.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
warm_up_steps = warm_up_steps * 3
if step % args.save_checkpoint_steps == 0 and step > 0: # ~ 41 seconds/saving
save_variable_list = {
'step': step,
'current_learning_rate': current_learning_rate,
'warm_up_steps': warm_up_steps,
'entity_dict': entity_dict
}
save_model(kge_model, optimizer, save_variable_list, args)
if step % args.log_steps == 0:
metrics = {}
for metric in training_logs[0].keys():
metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)
log_metrics('Train', step, metrics, writer)
training_logs = []
if args.do_valid and step % args.valid_steps == 0 and step > 0:
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, valid_triples, args, entity_dict)
log_metrics('Valid', step, metrics, writer)
save_variable_list = {
'step': step,
'current_learning_rate': current_learning_rate,
'warm_up_steps': warm_up_steps
}
save_model(kge_model, optimizer, save_variable_list, args)
if args.do_valid:
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, valid_triples, args, entity_dict)
log_metrics('Valid', step, metrics, writer)
if args.do_test:
logging.info('Evaluating on Test Dataset...')
metrics = kge_model.test_step(kge_model, test_triples, args, entity_dict)
log_metrics('Test', step, metrics, writer)
if args.evaluate_train:
logging.info('Evaluating on Training Dataset...')
small_train_triples = {}
indices = np.random.choice(len(train_triples['head']), args.ntriples_eval_train, replace=False)
for i in train_triples:
if 'type' in i:
small_train_triples[i] = [train_triples[i][x] for x in indices]
else:
small_train_triples[i] = train_triples[i][indices]
metrics = kge_model.test_step(kge_model, small_train_triples, args, entity_dict, random_sampling=True)
log_metrics('Train', step, metrics, writer)
if __name__ == '__main__':
main(parse_args())
| mit | -996,972,109,651,052,700 | 40.769022 | 156 | 0.623382 | false |
satishgoda/rbhus | rbhusUI/lib/selectRadioBoxMod.py | 1 | 6508 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'selectRadioBoxMod.ui'
#
# Created: Mon Mar 24 15:23:01 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_selectRadioBox(object):
def setupUi(self, selectRadioBox):
selectRadioBox.setObjectName(_fromUtf8("selectRadioBox"))
selectRadioBox.resize(267, 487)
self.centralwidget = QtGui.QWidget(selectRadioBox)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.checkSaveDefault = QtGui.QCheckBox(self.centralwidget)
self.checkSaveDefault.setObjectName(_fromUtf8("checkSaveDefault"))
self.horizontalLayout_2.addWidget(self.checkSaveDefault)
self.pushApply = QtGui.QPushButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushApply.sizePolicy().hasHeightForWidth())
self.pushApply.setSizePolicy(sizePolicy)
self.pushApply.setObjectName(_fromUtf8("pushApply"))
self.horizontalLayout_2.addWidget(self.pushApply)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
self.plainTextEditSelected = QtGui.QPlainTextEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plainTextEditSelected.sizePolicy().hasHeightForWidth())
self.plainTextEditSelected.setSizePolicy(sizePolicy)
self.plainTextEditSelected.setMaximumSize(QtCore.QSize(16777215, 50))
self.plainTextEditSelected.setReadOnly(True)
self.plainTextEditSelected.setObjectName(_fromUtf8("plainTextEditSelected"))
self.verticalLayout_3.addWidget(self.plainTextEditSelected)
self.gridLayout.addLayout(self.verticalLayout_3, 5, 0, 1, 4)
self.lineEditSearch = QtGui.QLineEdit(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditSearch.sizePolicy().hasHeightForWidth())
self.lineEditSearch.setSizePolicy(sizePolicy)
self.lineEditSearch.setObjectName(_fromUtf8("lineEditSearch"))
self.gridLayout.addWidget(self.lineEditSearch, 0, 1, 1, 2)
self.frame = QtGui.QFrame(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.frame)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.scrollArea = QtGui.QScrollArea(self.frame)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollArea.sizePolicy().hasHeightForWidth())
self.scrollArea.setSizePolicy(sizePolicy)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 239, 315))
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
self.verticalLayout = QtGui.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_2.addWidget(self.scrollArea)
self.gridLayout.addWidget(self.frame, 1, 0, 3, 4)
self.label = QtGui.QLabel(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.pushClearSearch = QtGui.QPushButton(self.centralwidget)
self.pushClearSearch.setObjectName(_fromUtf8("pushClearSearch"))
self.gridLayout.addWidget(self.pushClearSearch, 0, 3, 1, 1)
selectRadioBox.setCentralWidget(self.centralwidget)
self.retranslateUi(selectRadioBox)
QtCore.QMetaObject.connectSlotsByName(selectRadioBox)
def retranslateUi(self, selectRadioBox):
selectRadioBox.setWindowTitle(_translate("selectRadioBox", "MainWindow", None))
self.checkSaveDefault.setText(_translate("selectRadioBox", "save as default", None))
self.pushApply.setText(_translate("selectRadioBox", "apply", None))
self.plainTextEditSelected.setPlainText(_translate("selectRadioBox", "rwst", None))
self.label.setText(_translate("selectRadioBox", "search", None))
self.pushClearSearch.setText(_translate("selectRadioBox", "clear", None))
| gpl-3.0 | -2,923,608,831,434,736,000 | 51.910569 | 106 | 0.782883 | false |
TwoUnderscorez/DuckOS | filesystem/src/manualelf/createx.py | 1 | 2290 | # This script will create an ELF file
### SECTION .TEXT
# mov ebx, 1 ; prints hello
# mov eax, 4
# mov ecx, HWADDR
# mov edx, HWLEN
# int 0x80
# mov eax, 1 ; exits
# mov ebx, 0x5D
# int 0x80
### SECTION .DATA
# HWADDR db "Hello World!", 0x0A
out = ''
### ELF HEADER
# e_ident(16):
out += '\x7FELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x10'
# e_type(2) - set it to 0x02 0x00 - ELF file:
out += '\x02\x00'
# e_machine(2) - set it to 0x03 0x00 - i386:
out += '\x03\x00'
# e_version(4):
out += '\x01\x00\x00\x00'
# e_entry(4) entry point:
out += '\x80\x80\x04\x08'
# e_phoff(4) - offset from file to program header table.
out += '\x34\x00\x00\x00'
# e_shoff(4) - offset from file to section header table.
out += '\x00\x00\x00\x00'
# e_flags(4) - we don't need flags:
out += '\x00\x00\x00\x00'
# e_ehsize(2) size of the ELF header:
out += '\x34\x00'
# e_phentsize(2) - size of a program header.
out += '\x20\x00'
# e_phnum(2) - number of program headers:
out += '\x02\x00'
# e_shentsize(2), e_shnum(2), e_shstrndx(2): irrelevant:
out += '\x00\x00\x00\x00\x00\x00'
### PROGRAM HEADER
# .text segment header
# p_type(4) type of segment:
out += '\x01\x00\x00\x00'
# p_offset(4) offset from the beginning of the file:
out += '\x80\x00\x00\x00'
# p_vaddr(4) - what virtual address to assign to segment:
out += '\x80\x80\x04\x08'
# p_paddr(4) - physical addressing is irrelevant:
out += '\x00\x00\x00\x00'
# p_filesz(4) - number of bytes in file image of segment
out += '\x24\x00\x00\x00'
# p_memsz(4) - number of bytes in memory image of segment:
out += '\x24\x00\x00\x00'
# p_flags(4):
out += '\x05\x00\x00\x00'
# p_align(4) - handles alignment to memory pages:
out += '\x00\x10\x00\x00'
# .data segment header
out += '\x01\x00\x00\x00\xA4\x00\x00\x00\xA4\x80\x04\x08\x00\x00\x00\x00'
out += '\x20\x00\x00\x00\x20\x00\x00\x00\x07\x00\x00\x00\x00\x10\x00\x00'
# padding
out += '\x00' * 12
# .text segment
out += '\xBB\x01\x00\x00\x00\xB8\x04\x00\x00\x00\xB9\xA4\x80\x04\x08\xBA'
out += '\x0D\x00\x00\x00\xCD\x80\xB8\x01\x00\x00\x00\xBB\x2A\x00\x00\x00'
out += '\xCD\x80'
# padding
out += '\x00\x00'
# .data segment
out += 'Hello World!\x0A'
f = file('elffile', 'r+wb')
f.seek(0)
f.truncate()
f.writelines([out])
f.close() | bsd-3-clause | -8,952,335,182,506,612,000 | 26.939024 | 73 | 0.626638 | false |
spikeekips/txmongo2 | src/txmongo2/connection.py | 1 | 17204 | # coding: utf-8
"""
Connection
ConnectionStandalon
(class) ConnectionStandalonFactory
(class) ConnectionStandalonProtocol
ConnectionReplset
(class) ConnectionReplsetFactory
(class) ConnectionReplsetProtocol
(property) primary
(property) secondaries
connect (uri='mongodb://host[:port, 27017]?options', )
. connect to 'host:port'
. check is replset or single
. replset
- reconnect to server using `ConnectionReplset`
. single
- reconnect to server using `ConnectionStandalon`
. monitor all connections
senario
. if one node disconnected in any reason
. remove
"""
import logging
import copy
import random
from pymongo.uri_parser import parse_uri
from pymongo import errors
from pymongo.read_preferences import ReadPreference
from twisted.internet import reactor, defer
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.python import log, failure
from .factory import (
AutoDetectConnectionFactory,
SingleConnectionFactory,
ReplicaSetConnectionFactory,
)
from .protocol import Query
from .database import Database
STATE_PRIMARY = 1
STATE_SECONDARY = 2
class BaseConnection (object, ) :
factory = None
uri = None
uri_nodelist = None
class NoMoreNodeToConnect (Exception, ) : pass
@classmethod
def send_is_master (cls, proto, ) :
_query = Query(collection='admin.$cmd', query={'ismaster': 1, }, )
_d = proto.send_QUERY(_query, )
return _d
@classmethod
def send_replset_get_status (cls, proto, ) :
_query = Query(collection='admin.$cmd', query={'replSetGetStatus': 1, }, )
return proto.send_QUERY(_query, )
def __init__ (self, uri, ) :
if type(uri) in (str, unicode, ) :
uri = parse_uri('mongodb://%s' % uri, )
self.uri = uri
self.uri_nodelist = self.uri.get('nodelist')[:]
def get_factory_instance (self, uri, ) :
return self.factory(uri, )
def connect (self, ) :
log.msg('trying to connect to `%s`.' % self.uri.get('nodelist'), )
return self._connect().addErrback(self._eb, )
def _connect (self, ) :
return self.do_connect().addCallbacks(self._cb_connected, self._eb_connected, )
def do_connect (self, nodelist=None, ) :
if nodelist is None :
nodelist = self.uri_nodelist
try :
_host, _port = nodelist.pop(0, )
except IndexError :
raise self.NoMoreNodeToConnect
_uri = copy.copy(self.uri, )
_uri['nodelist'] = [(_host, _port, ), ]
_factory = self.get_factory_instance(_uri, )
return TCP4ClientEndpoint(
reactor, _host, int(_port),
).connect(_factory, ).addCallback(
lambda proto : proto.connectionReady(),
)
def _eb_connected (self, f, ) :
try :
return self._connect()
except self.NoMoreNodeToConnect :
raise errors.ConnectionFailure
def _eb (self, f, proto=None, ) :
log.msg(f.printDetailedTraceback(), logLevel=logging.ERROR, )
if proto and proto.transport :
proto.transport.loseConnection()
f.raiseException()
def _cb_connected (self, proto, ) :
raise NotImplemented
class AutoDetectConnection (BaseConnection, ) :
factory = AutoDetectConnectionFactory
def _cb_connected (self, proto, ) :
_d = BaseConnection.send_is_master(proto, )
_d.addCallback(self._cb_verify_ismaster, proto, )
_d.addErrback(self._eb, proto, )
return _d
def _cb_verify_ismaster (self, r, proto, ) :
if len(r.documents) != 1 :
raise errors.OperationFailure('Invalid document length.')
_config = r.documents[0].decode()
log.msg('[debug,%s] read data, \'%s\'' % (proto.addr, _config, ), )
if 'hosts' in _config and 'setName' in _config : # replicaset
log.msg('[debug,%s] found replicaset for `%s`' % (
proto.addr, _config.get('setName'), ), )
_connection = ReplicaSetConnection(
parse_uri('mongodb://%s' % _config.get('me'), ),
)
else :
log.msg('[debug,%s] found Single mode.' % (proto.addr, ), )
_connection = SingleConnection(self.uri.copy(), )
log.msg('[debug,%s] disconnecting for further process.' % proto.addr, )
proto.transport.loseConnection()
# reconnect
return _connection.connect()
class RealConnection (BaseConnection, ) :
connections = dict()
def __init__ (self, uri, ) :
BaseConnection.__init__(self, uri, )
self.connections = dict()
def disconnect (self, ) :
for i in self.connections.keys() :
self.remove_connection(i, )
return
def _cb_connected (self, proto, ) :
return self
def __getitem__ (self, name, ) :
return Database(self, name, )
def getprotocol (self, _type='read', ) :
raise NotImplemented
def add_connection (self, proto, config=None, ) :
if config is not None :
proto.config = config
self.connections[proto.addr] = proto
return
def remove_connection (self, name, ) :
if name not in self.connections :
return False
_proto = self.connections.get(name, )
if _proto.transport :
_proto.transport.loseConnection()
del self.connections[name]
return True
class SingleConnection (RealConnection, ) :
factory = SingleConnectionFactory
def _cb_connected (self, proto, ) :
self.add_connection(proto, config=dict(), )
return RealConnection._cb_connected(self, proto, )
def getprotocol (self, _type='read', ) :
if not self.connections:
raise errors.OperationFailure('failed to get protocol.', )
return self.connections.values()[0]
class ReplicaSetConnection (RealConnection, ) :
READ_PREFERENCES_FOR_READ = (
ReadPreference.SECONDARY,
ReadPreference.SECONDARY_ONLY,
ReadPreference.SECONDARY_PREFERRED,
)
factory = ReplicaSetConnectionFactory
hosts = list()
connections = dict()
def connect (self, ) :
return RealConnection.connect(self, ).addBoth(self._connection_done, )
def get_factory_instance (self, uri, ) :
return self.factory(self, uri, )
def connect_new (self, config, ) :
_uri = parse_uri('mongodb://%s' % config.get('name'), )
return self.do_connect([_uri.get('nodelist')[0], ], ).addCallback(
lambda proto : self.add_connection(proto, config=config, ),
)
def _cb_connected (self, proto, ) :
_d = BaseConnection.send_is_master(proto, )
_d.addCallback(self._cb_connected_member_status, proto, )
_d.addErrback(self._eb, proto, )
return _d
def _cb_connected_member_status (self, r, proto, ) :
if len(r.documents) != 1 :
raise errors.OperationFailure('Invalid document length.')
_config = r.documents[0].decode()
self.hosts = _config.get('hosts')
if not self.hosts :
raise errors.ConnectionFailure
_d = BaseConnection.send_replset_get_status(proto, )
_d.addCallback(self._cb_get_config, proto, )
_d.addErrback(self._eb, proto, )
return _d
def _cb_get_config (self, r, proto, ) :
if len(r.documents) != 1 :
raise errors.OperationFailure('Invalid document length.')
if proto.addr not in self.hosts :
proto.transport.loseConnection()
_config = r.documents[0].decode()
log.msg('[debug,%s] read data, \'%s\'' % (proto.addr, _config, ), )
if _config.get('ok') != 1.0 :
log.msg('invalid result, \'%s\'' % _config, logLevel=logging.ERROR, )
raise errors.OperationFailure('invalid result, \'%s\'' % _config, )
_dl = list()
for i in _config.get('members') :
if i.get('name') not in self.hosts :
continue
if i.get('state') not in (1, 2, ) :
continue
if i.get('self') :
self.add_connection(proto, config=i, )
continue
_dl.append(self.connect_new(i, ), )
if _dl :
return defer.DeferredList(_dl, ).addCallback(self._connect_nodes, )
return None
def _connect_nodes (self, r, ) :
return
def _connection_done (self, r, ) :
# start monitor
reactor.callLater(0.01, ReplicaSetConnectionMonitor(self, ).start, )
return self
def _filter_protocol (self, state=None, ) :
_r = filter(lambda proto : proto.config.get('state') == state, self.connections.values(), )
if len(_r) < 1 :
raise errors.OperationFailure('connections not found for %s' % state, )
return _r
def _get_protocol (self, state=None, ) :
if state is None :
state = STATE_PRIMARY
_r = self._filter_protocol(state, )
if state in (STATE_PRIMARY, ) :
return _r[0]
if len(_r) < 2 :
return _r[0]
return _r[random.choice(range(len(_r)))]
def getprotocol (self, _type='read', ) :
if not self.connections :
raise errors.OperationFailure('connections not found.', )
if _type != 'read' :
_proto = self._get_protocol(STATE_PRIMARY, )
log.msg('[debug] get primary for not read, %s.' % _proto, )
return _proto
_rf = self.uri.get('options', dict(), ).get('read_preferences', ReadPreference.SECONDARY_PREFERRED, )
if _rf not in self.READ_PREFERENCES_FOR_READ :
_proto = self._get_protocol(STATE_PRIMARY, )
log.msg('[debug] get primary, %s (%s).' % (_proto, _rf, ), )
return _proto
if _rf in (ReadPreference.SECONDARY, ReadPreference.SECONDARY_ONLY, ) :
_proto = self._get_protocol(STATE_SECONDARY, )
log.msg('[debug] get secondary, %s (%s).' % (_proto, _rf, ), )
return _proto
if _rf in (ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST, ) :
try :
_proto = self._get_protocol(STATE_SECONDARY, )
log.msg('[debug] get secondary, %s (%s).' % (_proto, _rf, ), )
return _proto
except errors.OperationFailure :
_proto = self._get_protocol(STATE_PRIMARY, )
log.msg('[debug] get secondary, but no secondary, %s (%s).' % (_proto, _rf, ), )
return _proto
_proto = self._get_protocol(STATE_SECONDARY, )
log.msg('[debug] get secondary, %s (%s).' % (_proto, _rf, ), )
return _proto
class ReplicaSetConnectionMonitor (object, ) :
interval = 1
#interval = 5
hosts = list()
def __init__ (self, connection, ) :
self._connection = connection
def start (self, ) :
log.msg('[debug] start monitor.',)
return self._monitor()
def _monitor (self, ) :
return defer.maybeDeferred(self._select_connection, ).addBoth(self._cb_start, )
def _cb_start (self, r, ) :
if isinstance(r, failure.Failure, ) :
log.msg(r.printDetailedTraceback(), logLevel=logging.ERROR, )
reactor.callLater(self.interval, self._monitor, )
return
def _select_connection (self, ) :
if not self._connection.connections :
raise errors.OperationFailure('no connection found.', )
try :
_primary = self._connection._get_protocol(STATE_PRIMARY, )
except errors.OperationFailure :
_primary = None
if _primary :
return self.configure(_primary, )
for i in self._connection.connections.values() :
return self.configure(i, )
def configure (self, proto, ) :
_d = BaseConnection.send_is_master(proto, )
_d.addCallback(self._cb_get_member_status, proto, )
return _d
def _cb_get_member_status (self, r, proto, ) :
if len(r.documents) != 1 :
raise errors.OperationFailure('Invalid document length.')
_config = r.documents[0].decode()
self.hosts = _config.get('hosts')
if not self.hosts :
raise errors.ConnectionFailure
_d = BaseConnection.send_replset_get_status(proto, )
_d.addCallback(self._filter_member, proto, )
return _d
def _filter_member (self, r, proto, ) :
if len(r.documents) != 1 :
raise errors.OperationFailure('Invalid document length.')
_config = r.documents[0].decode()
if _config.get('ok') != 1.0 :
log.msg('invalid result, \'%s\'' % _config, logLevel=logging.ERROR, )
raise errors.OperationFailure('invalid result, \'%s\'' % _config, )
# filter new node
_dl = list()
for i in _config.get('members') :
_new_node = self._check_node(i, )
if _new_node :
log.msg('[debug] found new node, `%s`.' % i.get('name', ), )
_dl.append(self._connection.connect_new(_new_node, ), )
def _eb (r, ) :
for _b, _r in r :
if isinstance(_r, failure.Failure, ) :
log.msg(_r.printDetailedTraceback(), logLevel=logging.ERROR, )
return
if _dl :
return defer.DeferredList(_dl, ).addBoth(_eb, )
return
def _check_node (self, config, ) :
if config.get('name') not in self.hosts :
if config.get('name') in self._connection.connections : # disconnect it
self._connection.remove_connection(config.get('name'))
return None
if config.get('name') not in self._connection.connections :
if config.get('state') not in (1, 2, ) :
return None
elif config.get('name') in self._connection.connections :
if config.get('state') in (1, 2, ) :
self._connection.connections[config.get('name')].config = config
else :
log.msg('[debug] > but node, `%s` is not proper state, `%s`, so disconnecting it.' % (
config.get('name'), config.get('state'),
), )
self._connection.remove_connection(config.get('name'))
return None
# if new node
return config
class _ConnectionPool (object, ) :
_index = 0
_pool = None
_pool_size = None
_cls = AutoDetectConnection
uri = None
def __init__ (self, uri=None, pool_size=1, cls=None, ) :
assert isinstance(pool_size, int)
assert pool_size >= 1
uri = uri if uri else 'mongodb://127.0.0.1:27017'
if type(uri) in (str, unicode, ) :
if not uri.startswith('mongodb://') :
uri = 'mongodb://%s' % uri
uri = parse_uri(uri, )
self.uri = uri
self._cls = cls if cls else AutoDetectConnection
self._pool_size = pool_size
self._pool = list()
def connect (self, ) :
def _cb_connection_done (connection, ) :
self._pool.append(connection, )
return
def _cb_connections_done (r, ) :
log.msg('filled the %d-sized pool.' % self._pool_size, )
return self
_dl = list()
for i in range(self._pool_size) :
_dl.append(self._cls(self.uri, ).connect().addCallback(
_cb_connection_done,
), )
return defer.DeferredList(_dl, ).addCallback(_cb_connections_done, )
def __getitem__ (self, name, ) :
return Database(self, name)
def __getattr__ (self, name, ) :
return self[name]
def disconnect (self, ) :
for connection in self._pool:
connection.disconnect()
del self._pool[self._pool.index(connection, )]
return
def getprotocol (self, _type='read', ) :
_retry = 0
_c = self._get_connection()
while not _c.connections :
if _retry > self._pool_size :
break
_c = self._get_connection()
_retry += 1
_p = _c.getprotocol(_type)
return _p
def _get_connection (self, ) :
if self._index > self._pool_size :
self._index = 0
_r = self._pool
_c = _r[self._index % len(_r)]
log.msg('choose the connection from pool, `%s`.' % _c, )
self._index += 1
return _c
def MongoConnection (host, port, pool_size=1, cls=None, ) :
return _ConnectionPool(
'mongodb://%s:%d' % (host, port, ),
pool_size=pool_size,
cls=cls,
).connect()
def MongoConnectionPool (host, port, pool_size=5, cls=None, ) :
return MongoConnection(host, port, pool_size=pool_size, cls=cls, )
Connection = MongoConnection
ConnectionPool = MongoConnectionPool
| apache-2.0 | 4,026,253,927,332,895,000 | 29.182456 | 109 | 0.560684 | false |
rekka/intro-fortran-2016 | web/python/instability_euler.py | 1 | 1073 | import math
import numpy as np
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x1 = 2.
x = np.linspace(0, x1, 100)
ax.plot(x, np.exp(-5. * x), linewidth=2, label = '$x(t)$')
N = 4
h = x1 / N
sx = np.linspace(0, x1, N + 1)
sy = [(1 - 5. * h)**n for n in range(N + 1)]
print sy
ax.plot(sx, sy, marker='.', markersize=10, label='$x_i$')
for i in range(1, N):
ax.plot(x, np.exp(-5. * x) * sy[i] / math.exp(-5. * sx[i]), '--')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_bounds(0, x1)
plt.tick_params(
axis='y',
which='both',
left='on',
right='off',
labelleft='off')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(sx)
ax.set_xticklabels(["$t_{}$".format(i) for i in range(N+1)])
ax.set_xlim((0 - 0.05, x1 + 0.05))
ax.set_ylim((-1.1 * max(np.abs(sy)), 1.1 * max(np.abs(sy))))
ax.set_ylabel('$x$', rotation=0)
ax.yaxis.set_label_coords(-0.025, 1.0)
ax.legend(frameon=False, loc='upper left')
plt.savefig('../img/instability_euler.svg')
| mit | -9,033,655,638,325,829,000 | 21.829787 | 69 | 0.60671 | false |
google/material-design-icons | update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_P_K_G_.py | 5 | 3677 | from fontTools.misc.py23 import bytesjoin
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval, readHex
from . import DefaultTable
import sys
import array
GPKGFormat = """
> # big endian
version: H
flags: H
numGMAPs: H
numGlyplets: H
"""
# psFontName is a byte string which follows the record above. This is zero padded
# to the beginning of the records array. The recordsOffsst is 32 bit aligned.
class table_G_P_K_G_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
GMAPoffsets = array.array("I")
endPos = (self.numGMAPs+1) * 4
GMAPoffsets.frombytes(newData[:endPos])
if sys.byteorder != "big": GMAPoffsets.byteswap()
self.GMAPs = []
for i in range(self.numGMAPs):
start = GMAPoffsets[i]
end = GMAPoffsets[i+1]
self.GMAPs.append(data[start:end])
pos = endPos
endPos = pos + (self.numGlyplets + 1)*4
glyphletOffsets = array.array("I")
glyphletOffsets.frombytes(newData[pos:endPos])
if sys.byteorder != "big": glyphletOffsets.byteswap()
self.glyphlets = []
for i in range(self.numGlyplets):
start = glyphletOffsets[i]
end = glyphletOffsets[i+1]
self.glyphlets.append(data[start:end])
def compile(self, ttFont):
self.numGMAPs = len(self.GMAPs)
self.numGlyplets = len(self.glyphlets)
GMAPoffsets = [0]*(self.numGMAPs + 1)
glyphletOffsets = [0]*(self.numGlyplets + 1)
dataList =[ sstruct.pack(GPKGFormat, self)]
pos = len(dataList[0]) + (self.numGMAPs + 1)*4 + (self.numGlyplets + 1)*4
GMAPoffsets[0] = pos
for i in range(1, self.numGMAPs +1):
pos += len(self.GMAPs[i-1])
GMAPoffsets[i] = pos
gmapArray = array.array("I", GMAPoffsets)
if sys.byteorder != "big": gmapArray.byteswap()
dataList.append(gmapArray.tobytes())
glyphletOffsets[0] = pos
for i in range(1, self.numGlyplets +1):
pos += len(self.glyphlets[i-1])
glyphletOffsets[i] = pos
glyphletArray = array.array("I", glyphletOffsets)
if sys.byteorder != "big": glyphletArray.byteswap()
dataList.append(glyphletArray.tobytes())
dataList += self.GMAPs
dataList += self.glyphlets
data = bytesjoin(dataList)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(GPKGFormat)
for name in names:
value = getattr(self, name)
writer.simpletag(name, value=value)
writer.newline()
writer.begintag("GMAPs")
writer.newline()
for gmapData in self.GMAPs:
writer.begintag("hexdata")
writer.newline()
writer.dumphex(gmapData)
writer.endtag("hexdata")
writer.newline()
writer.endtag("GMAPs")
writer.newline()
writer.begintag("glyphlets")
writer.newline()
for glyphletData in self.glyphlets:
writer.begintag("hexdata")
writer.newline()
writer.dumphex(glyphletData)
writer.endtag("hexdata")
writer.newline()
writer.endtag("glyphlets")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "GMAPs":
if not hasattr(self, "GMAPs"):
self.GMAPs = []
for element in content:
if isinstance(element, str):
continue
itemName, itemAttrs, itemContent = element
if itemName == "hexdata":
self.GMAPs.append(readHex(itemContent))
elif name == "glyphlets":
if not hasattr(self, "glyphlets"):
self.glyphlets = []
for element in content:
if isinstance(element, str):
continue
itemName, itemAttrs, itemContent = element
if itemName == "hexdata":
self.glyphlets.append(readHex(itemContent))
else:
setattr(self, name, safeEval(attrs["value"]))
| apache-2.0 | 5,304,961,493,776,114,000 | 28.653226 | 81 | 0.697852 | false |
m85091081/hakureshop | muMDAU_app/index.py | 1 | 3441 | # -*- coding: utf-8 -*-
# muMDAU_app main / first page
from muMDAU_app import app, socketio
from flask import make_response, request, render_template, Blueprint, url_for, redirect, session
import dbmongo, hashlib
from dbmongo import User, Data, Bid , Item
import subprocess, os
from subprocess import PIPE
from time import sleep
main = Blueprint('main', __name__)
# index page main route page
@main.route('/')
def index():
item = Item.find()
return render_template('shop.html',item = item)
@main.route('/buy/<itmid>', methods=['POST'])
def buyit(itmid):
item = Item.finddata(itmid)
many = request.form['many']
if many == "" :
many = int(1)
if int(many) <= item.get('count'):
return render_template('buy.html',**locals())
else:
return 'Fuck U NO GG'
@main.route('/delbidc')
def delbidc():
response = make_response(redirect(url_for('main.index')))
response.set_cookie('bid','',expires=0)
return response
@main.route('/keepbuy',methods=['POST'])
def keepbuy():
item = request.form['item']
many = request.form['many']
combine = request.form['combine']
print(combine)
if int(many) <= Item.finddata(item).get('count') :
if combine == "" :
itm = {item:many}
bid = Data.sent(itm)
fitem = Data.find_item(bid)
response = make_response(render_template('result.html',**locals()))
response.set_cookie('bid',bid)
return response
else:
if Data.find_item(combine) == None:
itm = {item:many}
bid = Data.sent(itm)
fitem = Data.find_item(bid)
response = make_response(render_template('result.html',**locals()))
response.set_cookie('bid',bid)
return response
else:
itm = Data.find_item(combine)
if not itm.get(item) == None :
itmm = itm.get(item)
itm2 = {item:int(many)+int(itmm)}
itm.update(itm2)
bid = Data.update(combine,itm)
fitem = Data.find_item(bid)
response = make_response(render_template('result.html',**locals()))
response.set_cookie('bid',bid)
return response
else:
itm2 = {item:many}
itm.update(itm2)
bid = Data.update(combine,itm)
fitem = Data.find_item(bid)
response = make_response(render_template('result.html',**locals()))
response.set_cookie('bid',bid)
return response
@main.route('/find', methods=['GET', 'POST'])
def find():
if request.method == 'GET':
return render_template('find.html')
else:
bid = request.form['bid']
stat = Data.find_bill(bid)
if stat == False:
fitem = Data.find_item(bid)
return render_template('result.html',**locals())
elif stat =='pre':
return render_template('result/pre.html',bid = bid)
elif stat =='nbid':
dic = Bid.finddict(bid)
u = Bid.findmoney(bid)
return render_template('result/nbid.html',**locals())
else:
return render_template('warning.html',message = '單號並不存在!')
| gpl-3.0 | -3,453,045,532,743,228,000 | 34.71875 | 96 | 0.5366 | false |
zengchunyun/s12 | day9/temp/rabb.py | 1 | 34557 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: zengchunyun
"""
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is RabbitMQ Management Plugin.
#
# The Initial Developer of the Original Code is GoPivotal, Inc.
# Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved.
import sys
if sys.version_info[0] < 2 or (sys.version_info[0] == 2 and sys.version_info[1] < 6):
print("Sorry, rabbitmqadmin requires at least Python 2.6.")
sys.exit(1)
from optparse import OptionParser, TitledHelpFormatter
import urllib
import base64
import json
import os
import socket
if sys.version_info[0] == 2:
from ConfigParser import ConfigParser, NoSectionError
import httplib
import urlparse
from urllib import quote_plus
def b64(s):
return base64.b64encode(s)
else:
from configparser import ConfigParser, NoSectionError
import http.client as httplib
import urllib.parse as urlparse
from urllib.parse import quote_plus
def b64(s):
return base64.b64encode(s.encode('utf-8')).decode('utf-8')
VERSION = '3.6.1'
LISTABLE = {'connections': {'vhost': False, 'cols': ['name','user','channels']},
'channels': {'vhost': False, 'cols': ['name', 'user']},
'consumers': {'vhost': True},
'exchanges': {'vhost': True, 'cols': ['name', 'type']},
'queues': {'vhost': True, 'cols': ['name', 'messages']},
'bindings': {'vhost': True, 'cols': ['source', 'destination',
'routing_key']},
'users': {'vhost': False},
'vhosts': {'vhost': False, 'cols': ['name', 'messages']},
'permissions': {'vhost': False},
'nodes': {'vhost': False, 'cols': ['name','type','mem_used']},
'parameters': {'vhost': False, 'json': ['value']},
'policies': {'vhost': False, 'json': ['definition']}}
SHOWABLE = {'overview': {'vhost': False, 'cols': ['rabbitmq_version',
'cluster_name',
'queue_totals.messages',
'object_totals.queues']}}
PROMOTE_COLUMNS = ['vhost', 'name', 'type',
'source', 'destination', 'destination_type', 'routing_key']
URIS = {
'exchange': '/exchanges/{vhost}/{name}',
'queue': '/queues/{vhost}/{name}',
'binding': '/bindings/{vhost}/e/{source}/{destination_char}/{destination}',
'binding_del':'/bindings/{vhost}/e/{source}/{destination_char}/{destination}/{properties_key}',
'vhost': '/vhosts/{name}',
'user': '/users/{name}',
'permission': '/permissions/{vhost}/{user}',
'parameter': '/parameters/{component}/{vhost}/{name}',
'policy': '/policies/{vhost}/{name}'
}
DECLARABLE = {
'exchange': {'mandatory': ['name', 'type'],
'json': ['arguments'],
'optional': {'auto_delete': 'false', 'durable': 'true',
'internal': 'false', 'arguments': {}}},
'queue': {'mandatory': ['name'],
'json': ['arguments'],
'optional': {'auto_delete': 'false', 'durable': 'true',
'arguments': {}, 'node': None}},
'binding': {'mandatory': ['source', 'destination'],
'json': ['arguments'],
'optional': {'destination_type': 'queue',
'routing_key': '', 'arguments': {}}},
'vhost': {'mandatory': ['name'],
'optional': {'tracing': None}},
'user': {'mandatory': ['name', 'password', 'tags'],
'optional': {}},
'permission': {'mandatory': ['vhost', 'user', 'configure', 'write', 'read'],
'optional': {}},
'parameter': {'mandatory': ['component', 'name', 'value'],
'json': ['value'],
'optional': {}},
# Priority is 'json' to convert to int
'policy': {'mandatory': ['name', 'pattern', 'definition'],
'json': ['definition', 'priority'],
'optional': {'priority' : 0, 'apply-to': None}}
}
DELETABLE = {
'exchange': {'mandatory': ['name']},
'queue': {'mandatory': ['name']},
'binding': {'mandatory': ['source', 'destination_type', 'destination',
'properties_key']},
'vhost': {'mandatory': ['name']},
'user': {'mandatory': ['name']},
'permission': {'mandatory': ['vhost', 'user']},
'parameter': {'mandatory': ['component', 'name']},
'policy': {'mandatory': ['name']}
}
CLOSABLE = {
'connection': {'mandatory': ['name'],
'optional': {},
'uri': '/connections/{name}'}
}
PURGABLE = {
'queue': {'mandatory': ['name'],
'optional': {},
'uri': '/queues/{vhost}/{name}/contents'}
}
EXTRA_VERBS = {
'publish': {'mandatory': ['routing_key'],
'optional': {'payload': None,
'properties': {},
'exchange': 'amq.default',
'payload_encoding': 'string'},
'json': ['properties'],
'uri': '/exchanges/{vhost}/{exchange}/publish'},
'get': {'mandatory': ['queue'],
'optional': {'count': '1', 'requeue': 'true',
'payload_file': None, 'encoding': 'auto'},
'uri': '/queues/{vhost}/{queue}/get'}
}
for k in DECLARABLE:
DECLARABLE[k]['uri'] = URIS[k]
for k in DELETABLE:
DELETABLE[k]['uri'] = URIS[k]
DELETABLE[k]['optional'] = {}
DELETABLE['binding']['uri'] = URIS['binding_del']
def short_usage():
return "rabbitmqadmin [options] subcommand"
def title(name):
return "\n%s\n%s\n\n" % (name, '=' * len(name))
def subcommands_usage():
usage = """Usage
=====
""" + short_usage() + """
where subcommand is one of:
""" + title("Display")
for l in LISTABLE:
usage += " list {0} [<column>...]\n".format(l)
for s in SHOWABLE:
usage += " show {0} [<column>...]\n".format(s)
usage += title("Object Manipulation")
usage += fmt_usage_stanza(DECLARABLE, 'declare')
usage += fmt_usage_stanza(DELETABLE, 'delete')
usage += fmt_usage_stanza(CLOSABLE, 'close')
usage += fmt_usage_stanza(PURGABLE, 'purge')
usage += title("Broker Definitions")
usage += """ export <file>
import <file>
"""
usage += title("Publishing and Consuming")
usage += fmt_usage_stanza(EXTRA_VERBS, '')
usage += """
* If payload is not specified on publish, standard input is used
* If payload_file is not specified on get, the payload will be shown on
standard output along with the message metadata
* If payload_file is specified on get, count must not be set
"""
return usage
def config_usage():
usage = "Usage\n=====\n" + short_usage()
usage += "\n" + title("Configuration File")
usage += """ It is possible to specify a configuration file from the command line.
Hosts can be configured easily in a configuration file and called
from the command line.
"""
usage += title("Example")
usage += """ # rabbitmqadmin.conf.example START
[host_normal]
hostname = localhost
port = 15672
username = guest
password = guest
declare_vhost = / # Used as default for declare / delete only
vhost = / # Used as default for declare / delete / list
[host_ssl]
hostname = otherhost
port = 15672
username = guest
password = guest
ssl = True
ssl_key_file = /path/to/key.pem
ssl_cert_file = /path/to/cert.pem
# rabbitmqadmin.conf.example END
"""
usage += title("Use")
usage += """ rabbitmqadmin -c rabbitmqadmin.conf.example -N host_normal ..."""
return usage
def more_help():
return """
More Help
=========
For more help use the help subcommand:
rabbitmqadmin help subcommands # For a list of available subcommands
rabbitmqadmin help config # For help with the configuration file
"""
def fmt_usage_stanza(root, verb):
def fmt_args(args):
res = " ".join(["{0}=...".format(a) for a in args['mandatory']])
opts = " ".join("{0}=...".format(o) for o in args['optional'].keys())
if opts != "":
res += " [{0}]".format(opts)
return res
text = ""
if verb != "":
verb = " " + verb
for k in root.keys():
text += " {0} {1} {2}\n".format(verb, k, fmt_args(root[k]))
return text
default_options = { "hostname" : "localhost",
"port" : "15672",
"path_prefix" : "",
"declare_vhost" : "/",
"username" : "guest",
"password" : "guest",
"ssl" : False,
"verbose" : True,
"format" : "table",
"depth" : 1,
"bash_completion" : False }
class MyFormatter(TitledHelpFormatter):
def format_epilog(self, epilog):
return epilog
parser = OptionParser(usage=short_usage(),
formatter=MyFormatter(),
epilog=more_help())
def make_parser():
def add(*args, **kwargs):
key = kwargs['dest']
if key in default_options:
default = " [default: %s]" % default_options[key]
kwargs['help'] = kwargs['help'] + default
parser.add_option(*args, **kwargs)
add("-c", "--config", dest="config",
help="configuration file [default: ~/.rabbitmqadmin.conf]",
metavar="CONFIG")
add("-N", "--node", dest="node",
help="node described in the configuration file [default: 'default'" + \
" only if configuration file is specified]",
metavar="NODE")
add("-H", "--host", dest="hostname",
help="connect to host HOST" ,
metavar="HOST")
add("-P", "--port", dest="port",
help="connect to port PORT",
metavar="PORT")
add("--path-prefix", dest="path_prefix",
help="use specific URI path prefix for the RabbitMQ HTTP API (default: blank string)")
add("-V", "--vhost", dest="vhost",
help="connect to vhost VHOST [default: all vhosts for list, '/' for declare]",
metavar="VHOST")
add("-u", "--username", dest="username",
help="connect using username USERNAME",
metavar="USERNAME")
add("-p", "--password", dest="password",
help="connect using password PASSWORD",
metavar="PASSWORD")
add("-q", "--quiet", action="store_false", dest="verbose",
help="suppress status messages")
add("-s", "--ssl", action="store_true", dest="ssl",
help="connect with ssl")
add("--ssl-key-file", dest="ssl_key_file",
help="PEM format key file for SSL")
add("--ssl-cert-file", dest="ssl_cert_file",
help="PEM format certificate file for SSL")
add("-f", "--format", dest="format",
help="format for listing commands - one of [" + ", ".join(FORMATS.keys()) + "]")
add("-S", "--sort", dest="sort", help="sort key for listing queries")
add("-R", "--sort-reverse", action="store_true", dest="sort_reverse",
help="reverse the sort order")
add("-d", "--depth", dest="depth",
help="maximum depth to recurse for listing tables")
add("--bash-completion", action="store_true",
dest="bash_completion",
help="Print bash completion script")
add("--version", action="store_true",
dest="version",
help="Display version and exit")
def default_config():
home = os.getenv('USERPROFILE') or os.getenv('HOME')
if home is not None:
config_file = home + os.sep + ".rabbitmqadmin.conf"
if os.path.isfile(config_file):
return config_file
return None
def make_configuration():
make_parser()
(options, args) = parser.parse_args()
setattr(options, "declare_vhost", None)
if options.version:
print_version()
if options.config is None:
config_file = default_config()
if config_file is not None:
setattr(options, "config", config_file)
else:
if not os.path.isfile(options.config):
assert_usage(False,
"Could not read config file '%s'" % options.config)
if options.node is None and options.config:
options.node = "default"
else:
options.node = options.node
for (key, val) in default_options.items():
if getattr(options, key) is None:
setattr(options, key, val)
if options.config is not None:
config = ConfigParser()
try:
config.read(options.config)
new_conf = dict(config.items(options.node))
except NoSectionError as error:
if options.node == "default":
pass
else:
assert_usage(False, ("Could not read section '%s' in config file" +
" '%s':\n %s") %
(options.node, options.config, error))
else:
for key, val in new_conf.items():
if key == 'ssl':
setattr(options, key, val == "True")
else:
setattr(options, key, val)
return (options, args)
def assert_usage(expr, error):
if not expr:
output("\nERROR: {0}\n".format(error))
output("{0} --help for help\n".format(os.path.basename(sys.argv[0])))
sys.exit(1)
def print_version():
output("rabbitmqadmin {0}".format(VERSION))
sys.exit(0)
def column_sort_key(col):
if col in PROMOTE_COLUMNS:
return (1, PROMOTE_COLUMNS.index(col))
else:
return (2, col)
def main():
(options, args) = make_configuration()
if options.bash_completion:
print_bash_completion()
exit(0)
assert_usage(len(args) > 0, 'Action not specified')
mgmt = Management(options, args[1:])
mode = "invoke_" + args[0]
assert_usage(hasattr(mgmt, mode),
'Action {0} not understood'.format(args[0]))
method = getattr(mgmt, "invoke_%s" % args[0])
method()
def output(s):
print(maybe_utf8(s, sys.stdout))
def die(s):
sys.stderr.write(maybe_utf8("*** {0}\n".format(s), sys.stderr))
exit(1)
def maybe_utf8(s, stream):
if sys.version_info[0] == 3 or stream.isatty():
# It will have an encoding, which Python will respect
return s
else:
# It won't have an encoding, and Python will pick ASCII by default
return s.encode('utf-8')
class Management:
def __init__(self, options, args):
self.options = options
self.args = args
def get(self, path):
return self.http("GET", "%s/api%s" % (self.options.path_prefix, path), "")
def put(self, path, body):
return self.http("PUT", "%s/api%s" % (self.options.path_prefix, path), body)
def post(self, path, body):
return self.http("POST", "%s/api%s" % (self.options.path_prefix, path), body)
def delete(self, path):
return self.http("DELETE", "%s/api%s" % (self.options.path_prefix, path), "")
def http(self, method, path, body):
if self.options.ssl:
conn = httplib.HTTPSConnection(self.options.hostname,
self.options.port,
self.options.ssl_key_file,
self.options.ssl_cert_file)
else:
conn = httplib.HTTPConnection(self.options.hostname,
self.options.port)
auth = (self.options.username + ":" + self.options.password)
headers = {"Authorization": "Basic " + b64(auth)}
if body != "":
headers["Content-Type"] = "application/json"
try:
conn.request(method, path, body, headers)
except socket.error as e:
die("Could not connect: {0}".format(e))
resp = conn.getresponse()
if resp.status == 400:
die(json.loads(resp.read())['reason'])
if resp.status == 401:
die("Access refused: {0}".format(path))
if resp.status == 404:
die("Not found: {0}".format(path))
if resp.status == 301:
url = urlparse.urlparse(resp.getheader('location'))
[host, port] = url.netloc.split(':')
self.options.hostname = host
self.options.port = int(port)
return self.http(method, url.path + '?' + url.query, body)
if resp.status < 200 or resp.status > 400:
raise Exception("Received %d %s for path %s\n%s"
% (resp.status, resp.reason, path, resp.read()))
return resp.read().decode('utf-8')
def verbose(self, string):
if self.options.verbose:
output(string)
def get_arg(self):
assert_usage(len(self.args) == 1, 'Exactly one argument required')
return self.args[0]
def use_cols(self):
# Deliberately do not cast to int here; we only care about the
# default, not explicit setting.
return self.options.depth == 1 and not 'json' in self.options.format
def invoke_help(self):
if len(self.args) == 0:
parser.print_help()
else:
help_cmd = self.get_arg()
if help_cmd == 'subcommands':
usage = subcommands_usage()
elif help_cmd == 'config':
usage = config_usage()
else:
assert_usage(False, """help topic must be one of:
subcommands
config""")
print(usage)
exit(0)
def invoke_publish(self):
(uri, upload) = self.parse_args(self.args, EXTRA_VERBS['publish'])
if not 'payload' in upload:
data = sys.stdin.read()
upload['payload'] = b64(data)
upload['payload_encoding'] = 'base64'
resp = json.loads(self.post(uri, json.dumps(upload)))
if resp['routed']:
self.verbose("Message published")
else:
self.verbose("Message published but NOT routed")
def invoke_get(self):
(uri, upload) = self.parse_args(self.args, EXTRA_VERBS['get'])
payload_file = 'payload_file' in upload and upload['payload_file'] or None
assert_usage(not payload_file or upload['count'] == '1',
'Cannot get multiple messages using payload_file')
result = self.post(uri, json.dumps(upload))
if payload_file:
write_payload_file(payload_file, result)
columns = ['routing_key', 'exchange', 'message_count',
'payload_bytes', 'redelivered']
format_list(result, columns, {}, self.options)
else:
format_list(result, [], {}, self.options)
def invoke_export(self):
path = self.get_arg()
uri = "/definitions"
if self.options.vhost:
uri += "/%s" % quote_plus(self.options.vhost)
definitions = self.get(uri)
f = open(path, 'w')
f.write(definitions)
f.close()
self.verbose("Exported definitions for %s to \"%s\""
% (self.options.hostname, path))
def invoke_import(self):
path = self.get_arg()
f = open(path, 'r')
definitions = f.read()
f.close()
uri = "/definitions"
if self.options.vhost:
uri += "/%s" % quote_plus(self.options.vhost)
self.post(uri, definitions)
self.verbose("Imported definitions for %s from \"%s\""
% (self.options.hostname, path))
def invoke_list(self):
(uri, obj_info, cols) = self.list_show_uri(LISTABLE, 'list')
format_list(self.get(uri), cols, obj_info, self.options)
def invoke_show(self):
(uri, obj_info, cols) = self.list_show_uri(SHOWABLE, 'show')
format_list('[{0}]'.format(self.get(uri)), cols, obj_info, self.options)
def list_show_uri(self, obj_types, verb):
obj_type = self.args[0]
assert_usage(obj_type in obj_types,
"Don't know how to {0} {1}".format(verb, obj_type))
obj_info = obj_types[obj_type]
uri = "/%s" % obj_type
query = []
if obj_info['vhost'] and self.options.vhost:
uri += "/%s" % quote_plus(self.options.vhost)
cols = self.args[1:]
if cols == [] and 'cols' in obj_info and self.use_cols():
cols = obj_info['cols']
if cols != []:
query.append("columns=" + ",".join(cols))
sort = self.options.sort
if sort:
query.append("sort=" + sort)
if self.options.sort_reverse:
query.append("sort_reverse=true")
query = "&".join(query)
if query != "":
uri += "?" + query
return (uri, obj_info, cols)
def invoke_declare(self):
(obj_type, uri, upload) = self.declare_delete_parse(DECLARABLE)
if obj_type == 'binding':
self.post(uri, json.dumps(upload))
else:
self.put(uri, json.dumps(upload))
self.verbose("{0} declared".format(obj_type))
def invoke_delete(self):
(obj_type, uri, upload) = self.declare_delete_parse(DELETABLE)
self.delete(uri)
self.verbose("{0} deleted".format(obj_type))
def invoke_close(self):
(obj_type, uri, upload) = self.declare_delete_parse(CLOSABLE)
self.delete(uri)
self.verbose("{0} closed".format(obj_type))
def invoke_purge(self):
(obj_type, uri, upload) = self.declare_delete_parse(PURGABLE)
self.delete(uri)
self.verbose("{0} purged".format(obj_type))
def declare_delete_parse(self, root):
assert_usage(len(self.args) > 0, 'Type not specified')
obj_type = self.args[0]
assert_usage(obj_type in root,
'Type {0} not recognised'.format(obj_type))
obj = root[obj_type]
(uri, upload) = self.parse_args(self.args[1:], obj)
return (obj_type, uri, upload)
def parse_args(self, args, obj):
mandatory = obj['mandatory']
optional = obj['optional']
uri_template = obj['uri']
upload = {}
for k in optional.keys():
if optional[k] is not None:
upload[k] = optional[k]
for arg in args:
assert_usage("=" in arg,
'Argument "{0}" not in format name=value'.format(arg))
(name, value) = arg.split("=", 1)
assert_usage(name in mandatory or name in optional.keys(),
'Argument "{0}" not recognised'.format(name))
if 'json' in obj and name in obj['json']:
upload[name] = self.parse_json(value)
else:
upload[name] = value
for m in mandatory:
assert_usage(m in upload.keys(),
'mandatory argument "{0}" required'.format(m))
if 'vhost' not in mandatory:
upload['vhost'] = self.options.vhost or self.options.declare_vhost
uri_args = {}
for k in upload:
v = upload[k]
if v and isinstance(v, (str, bytes)):
uri_args[k] = quote_plus(v)
if k == 'destination_type':
uri_args['destination_char'] = v[0]
uri = uri_template.format(**uri_args)
return (uri, upload)
def parse_json(self, text):
try:
return json.loads(text)
except ValueError:
print("Could not parse JSON:\n {0}".format(text))
sys.exit(1)
def format_list(json_list, columns, args, options):
format = options.format
formatter = None
if format == "raw_json":
output(json_list)
return
elif format == "pretty_json":
enc = json.JSONEncoder(False, False, True, True, True, 2)
output(enc.encode(json.loads(json_list)))
return
else:
formatter = FORMATS[format]
assert_usage(formatter != None,
"Format {0} not recognised".format(format))
formatter_instance = formatter(columns, args, options)
formatter_instance.display(json_list)
class Lister:
def verbose(self, string):
if self.options.verbose:
output(string)
def display(self, json_list):
depth = sys.maxsize
if len(self.columns) == 0:
depth = int(self.options.depth)
(columns, table) = self.list_to_table(json.loads(json_list), depth)
if len(table) > 0:
self.display_list(columns, table)
else:
self.verbose("No items")
def list_to_table(self, items, max_depth):
columns = {}
column_ix = {}
row = None
table = []
def add(prefix, depth, item, fun):
for key in item:
column = prefix == '' and key or (prefix + '.' + key)
subitem = item[key]
if type(subitem) == dict:
if 'json' in self.obj_info and key in self.obj_info['json']:
fun(column, json.dumps(subitem))
else:
if depth < max_depth:
add(column, depth + 1, subitem, fun)
elif type(subitem) == list:
# The first branch has slave nodes in queues in
# mind (which come out looking decent); the second
# one has applications in nodes (which look less
# so, but what would look good?).
if [x for x in subitem if type(x) != str] == []:
serialised = " ".join(subitem)
else:
serialised = json.dumps(subitem)
fun(column, serialised)
else:
fun(column, subitem)
def add_to_columns(col, val):
columns[col] = True
def add_to_row(col, val):
if col in column_ix:
row[column_ix[col]] = str(val)
if len(self.columns) == 0:
for item in items:
add('', 1, item, add_to_columns)
columns = list(columns.keys())
columns.sort(key=column_sort_key)
else:
columns = self.columns
for i in range(0, len(columns)):
column_ix[columns[i]] = i
for item in items:
row = len(columns) * ['']
add('', 1, item, add_to_row)
table.append(row)
return (columns, table)
class TSVList(Lister):
def __init__(self, columns, obj_info, options):
self.columns = columns
self.obj_info = obj_info
self.options = options
def display_list(self, columns, table):
head = "\t".join(columns)
self.verbose(head)
for row in table:
line = "\t".join(row)
output(line)
class LongList(Lister):
def __init__(self, columns, obj_info, options):
self.columns = columns
self.obj_info = obj_info
self.options = options
def display_list(self, columns, table):
sep = "\n" + "-" * 80 + "\n"
max_width = 0
for col in columns:
max_width = max(max_width, len(col))
fmt = "{0:>" + str(max_width) + "}: {1}"
output(sep)
for i in range(0, len(table)):
for j in range(0, len(columns)):
output(fmt.format(columns[j], table[i][j]))
output(sep)
class TableList(Lister):
def __init__(self, columns, obj_info, options):
self.columns = columns
self.obj_info = obj_info
self.options = options
def display_list(self, columns, table):
total = [columns]
total.extend(table)
self.ascii_table(total)
def ascii_table(self, rows):
table = ""
col_widths = [0] * len(rows[0])
for i in range(0, len(rows[0])):
for j in range(0, len(rows)):
col_widths[i] = max(col_widths[i], len(rows[j][i]))
self.ascii_bar(col_widths)
self.ascii_row(col_widths, rows[0], "^")
self.ascii_bar(col_widths)
for row in rows[1:]:
self.ascii_row(col_widths, row, "<")
self.ascii_bar(col_widths)
def ascii_row(self, col_widths, row, align):
txt = "|"
for i in range(0, len(col_widths)):
fmt = " {0:" + align + str(col_widths[i]) + "} "
txt += fmt.format(row[i]) + "|"
output(txt)
def ascii_bar(self, col_widths):
txt = "+"
for w in col_widths:
txt += ("-" * (w + 2)) + "+"
output(txt)
class KeyValueList(Lister):
def __init__(self, columns, obj_info, options):
self.columns = columns
self.obj_info = obj_info
self.options = options
def display_list(self, columns, table):
for i in range(0, len(table)):
row = []
for j in range(0, len(columns)):
row.append("{0}=\"{1}\"".format(columns[j], table[i][j]))
output(" ".join(row))
# TODO handle spaces etc in completable names
class BashList(Lister):
def __init__(self, columns, obj_info, options):
self.columns = columns
self.obj_info = obj_info
self.options = options
def display_list(self, columns, table):
ix = None
for i in range(0, len(columns)):
if columns[i] == 'name':
ix = i
if ix is not None:
res = []
for row in table:
res.append(row[ix])
output(" ".join(res))
FORMATS = {
'raw_json' : None, # Special cased
'pretty_json' : None, # Ditto
'tsv' : TSVList,
'long' : LongList,
'table' : TableList,
'kvp' : KeyValueList,
'bash' : BashList
}
def write_payload_file(payload_file, json_list):
result = json.loads(json_list)[0]
payload = result['payload']
payload_encoding = result['payload_encoding']
f = open(payload_file, 'w')
if payload_encoding == 'base64':
data = base64.b64decode(payload)
else:
data = payload
f.write(data)
f.close()
def print_bash_completion():
script = """# This is a bash completion script for rabbitmqadmin.
# Redirect it to a file, then source it or copy it to /etc/bash_completion.d
# to get tab completion. rabbitmqadmin must be on your PATH for this to work.
_rabbitmqadmin()
{
local cur prev opts base
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts="list show declare delete close purge import export get publish help"
fargs="--help --host --port --vhost --username --password --format --depth --sort --sort-reverse"
case "${prev}" in
list)
COMPREPLY=( $(compgen -W '""" + " ".join(LISTABLE) + """' -- ${cur}) )
return 0
;;
show)
COMPREPLY=( $(compgen -W '""" + " ".join(SHOWABLE) + """' -- ${cur}) )
return 0
;;
declare)
COMPREPLY=( $(compgen -W '""" + " ".join(DECLARABLE.keys()) + """' -- ${cur}) )
return 0
;;
delete)
COMPREPLY=( $(compgen -W '""" + " ".join(DELETABLE.keys()) + """' -- ${cur}) )
return 0
;;
close)
COMPREPLY=( $(compgen -W '""" + " ".join(CLOSABLE.keys()) + """' -- ${cur}) )
return 0
;;
purge)
COMPREPLY=( $(compgen -W '""" + " ".join(PURGABLE.keys()) + """' -- ${cur}) )
return 0
;;
export)
COMPREPLY=( $(compgen -f ${cur}) )
return 0
;;
import)
COMPREPLY=( $(compgen -f ${cur}) )
return 0
;;
help)
opts="subcommands config"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
;;
-H)
COMPREPLY=( $(compgen -A hostname ${cur}) )
return 0
;;
--host)
COMPREPLY=( $(compgen -A hostname ${cur}) )
return 0
;;
-V)
opts="$(rabbitmqadmin -q -f bash list vhosts)"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
;;
--vhost)
opts="$(rabbitmqadmin -q -f bash list vhosts)"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
;;
-u)
opts="$(rabbitmqadmin -q -f bash list users)"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
;;
--username)
opts="$(rabbitmqadmin -q -f bash list users)"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
;;
-f)
COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
return 0
;;
--format)
COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) )
return 0
;;
"""
for l in LISTABLE:
key = l[0:len(l) - 1]
script += " " + key + """)
opts="$(rabbitmqadmin -q -f bash list """ + l + """)"
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
;;
"""
script += """ *)
;;
esac
COMPREPLY=($(compgen -W "${opts} ${fargs}" -- ${cur}))
return 0
}
complete -F _rabbitmqadmin rabbitmqadmin
"""
output(script)
if __name__ == "__main__":
main() | gpl-2.0 | -244,143,571,938,590,200 | 34.227319 | 101 | 0.519547 | false |
nathanhilbert/flaskboiler | flaskboiler/lib/util.py | 1 | 4805 | import types
from hashlib import sha1
def flatten(data, sep='.'):
out = {}
for k, v in data.items():
ksep = k + sep
if isinstance(v, dict):
for ik, iv in flatten(v, sep).items():
out[ksep + ik] = iv
else:
out[k] = v
return out
def nestify(data):
""" Cubes returns entries and aggregation results in a non-nested way
with a dotted notation for key names. This function turns that format
into a set of nested dictionaries. """
nested = {}
for key, value in data.items():
path = key.split('.')
out = nested
for i, level in enumerate(path):
if i == len(path) - 1:
out[level] = value
elif level not in out:
out[level] = {}
out = out[level]
return nested
def hash_values(iterable):
"""Return a cryptographic hash of an iterable."""
return sha1(''.join(sha1(unicode(val).encode('utf-8')).hexdigest()
for val in iterable)).hexdigest()
def cache_hash(*a, **kw):
""" Try to hash an arbitrary object for caching. """
def cache_str(o):
if isinstance(o, (types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType,
types.UnboundMethodType)):
return getattr(o, 'func_name', 'func')
if isinstance(o, dict):
o = [k + ':' + cache_str(v) for k, v in o.items()]
if isinstance(o, (list, tuple, set)):
o = sorted(map(cache_str, o))
o = '|'.join(o)
if isinstance(o, basestring):
return o
if hasattr(o, 'updated_at'):
return cache_str((repr(o), o.updated_at))
return repr(o)
hash = cache_str((a, kw)).encode('utf-8')
return sha1(hash).hexdigest()
def sort_by_reference(ref, sort, sort_fn=None):
"""
Sort the iterable ``sort`` by ``sort_fn`` (if omitted, the whole object
will be used to sort) according the order defined by the list given in
``ref``.
Will raise nasty errors if ``ref`` and ``sort`` aren't 1-to-1, and doesn't
currently perform any error-checking to ensure that they are.
Example:
ids = [4, 7, 1, 3]
objs = [{'id': 1}, {'id': 7}, {'id': 4}, {'id': 3}]
sorted = sort_list_pair(ids, objs, lambda x: x['id'])
# => [{'id': 4}, {'id': 7}, {'id': 1}, {'id': 3}]
"""
if sort_fn is None:
sort_fn = lambda x: x
ref_map = dict((r, idx) for idx, r in enumerate(ref))
ordered = [None] * len(ref)
for x in sort:
key = sort_fn(x)
if key in ref_map:
ordered[ref_map[key]] = x
return filter(lambda x: x is not None, ordered)
def expand_facets(facets, dataset):
"""
For the given dataset we return the facets as a dict with facet
names for keys and the value is the list of its members along with
the total count (facet_values).
"""
# We'll fill in and return this dict
expanded_facets = {}
# Find dimension names in the dataset
dimension_names = [d.name for d in dataset.model.dimensions]
# Loop over all facets (their names)
for (facet_name, facet_members) in facets.iteritems():
# We only act on facets which are compound dimensions
if facet_name in dimension_names and dataset.model[facet_name].is_compound:
# Get the dimension from the dataset
dimension = dataset.model[facet_name]
# We get the member names and their facet values into
# their own variables because we need to work more with
# the member names
member_names = []
facet_values = []
for member in facet_members:
# We've processed the members so that they're tuples
# that look like: (name,count)
member_names.append(member[0])
facet_values.append(member[1])
# Get all the members for this dimension
members = dimension.members(dimension.alias.c.name.
in_(member_names))
# We need to sort them by the member names so that they retain
# the same order as the facet_alues
members = sort_by_reference(member_names, members,
lambda x: x['name'])
# Now we zip them all up into tuples and add into the output dict
expanded_facets[facet_name] = zip(members, facet_values)
else:
# If the facet isn't a compound dimension we still want to keep
# it around
expanded_facets[facet_name] = facet_members
# ... and return it
return expanded_facets
| agpl-3.0 | 3,051,748,131,206,881,300 | 32.838028 | 83 | 0.559417 | false |
google/autocjk | src/utils/decomposer.py | 1 | 13949 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for storing decompositions and retrieving them."""
from absl import logging
from src.utils import ideographic_description_sequence
from src.utils import region
from src.utils import shape as shape_lib
from third_party import babelstone
from typing import Any, Dict, Iterable, List, Optional, Text, Tuple, Callable, TypeVar
import icu
import networkx as nx
import re
import sys
# The ids.txt database uses a string like "^⿲冫虫𮫙$(K)" to list an IDS
# (ideographic description sequence) and its list of valid regions.
_IDS_AND_REGIONS_REGEX = re.compile(
r"\^(?P<ids>\S+)\$\s*(\((?P<regions>[A-Z]+)\))?\s*")
VisualMetadataMap = Dict[Text, List[shape_lib.VisualMetadata]]
# Type variables for |_build_paths|.
K = TypeVar("K")
V = TypeVar("V")
def _build_paths(g: nx.DiGraph, node: Any, data_lookup: Callable[[Dict[K, V]],
V],
accumulator: Callable[[V, V], V]):
r"""Utility for accumulating the set of metadata along every path from a node.
Example:
-{f:'a'}-> 2 -{f:'b'}-> 3
/
1
\
-{f:'d'}-> 4 -{f:'e'}-> 5
$ _build_paths(g, node=1, data_lookup=lambda d: d['f'], accumulator a, b: a+b)
> { 2: 'a', 3: 'ba', 4: 'd', 5: 'de' }
Args:
g: The DiGraph in question.
node: The source node
data_lookup: The lambda which can accept a networkx-style metadata
dictionary and extract a particular key (and, optionally, transform it).
accumulator: The lambda which can accept two bits of metadata and join them
in some arbitrary way.
Returns:
A map where the keys are nodes which appear below |node| in the graph, and
where the values are accumulated results of the list of metadata items
extracted by |data_lookup| from each edge along the path from |node| down to
that node.
"""
result = {}
for succ in g.successors(node):
if succ == node:
continue
result[succ] = data_lookup(g.get_edge_data(node, succ))
for subnode, d in _build_paths(g, succ, data_lookup,
accumulator).items():
result[subnode] = accumulator(d, result[succ])
return result
class NoDecompositionError(Exception):
"""Used when the Decomposer class has no decomposition for a character."""
def __init__(self, character: Text):
super().__init__(self)
self.character = character
def __str__(self):
return f"[{self.character}] has no decomposition."
def _get_decomposition_and_regions(
text_in: Text) -> Optional[Tuple[Text, Optional[Text]]]:
"""Given a string matching _IDS_AND_REGIONS_REGEX returns the components.
Args:
text_in: A string matching _IDS_AND_REGIONS_REGEX.
Returns:
A tuple of (a) the IDS string, and (b) a string-list of regions like
"(ABCD)", or None on failure.
"""
match = _IDS_AND_REGIONS_REGEX.match(text_in)
if match is None:
logging.debug("Didn't match compiled regex.")
return None
d = match.groupdict()
return d["ids"], d["regions"]
def parse(
text_in: Text
) -> Optional[ideographic_description_sequence.IdeographicSequenceGroup]:
"""Parses a line of text from //third_party/babelstone:babelstone_ids.
Given a line of text from //third_party/babelstone:babelstone_ids, parses
it and returns the resultant IdeographicSequenceGroup, if valid.
Expects a tab-separated string where the second column is a single CJK
character, and the third and all following columns match
_IDS_AND_REGIONS_REGEX. For example:
"U+4E05 丅 ^⿱一丨$(GJT) ^下-㇔$(X)"
Args:
text_in: a line of text
Returns:
A resultant IdeographicSequenceGroup, or None if the input was invalid.
"""
split_input = text_in.rstrip().split("\t")
if len(split_input) < 3:
logging.debug("Input must be at least three columns.")
return None
character = split_input[1]
maybe_decomposition_and_regions = _get_decomposition_and_regions(
split_input[2])
if maybe_decomposition_and_regions is None:
logging.debug("Invalid input: %s", text_in)
return None
default_decomposition, _ = maybe_decomposition_and_regions
s_set = ideographic_description_sequence.IdeographicSequenceGroup(
ideographic_description_sequence.IdeographicSequence(
character, default_decomposition))
for i in range(3, len(split_input)):
maybe_decomposition_and_regions = _get_decomposition_and_regions(
split_input[i])
if maybe_decomposition_and_regions is None:
return None
alt_decomposition, alt_regions = maybe_decomposition_and_regions
if not alt_regions:
return None
s_set.insert([region.Region[r] for r in alt_regions],
ideographic_description_sequence.IdeographicSequence(
character, alt_decomposition))
return s_set
class Decomposer():
"""A class for holding sequences and retrieving them."""
def __init__(self):
# Graph where the nodes are unicode characters and the edges are "contains"
# such that successors(尔) = [...你...]., and predecessors(你) = [亻,尔].
# So, insert with self._graph.add_edge( "亻", "你" )
# self._graph.add_edge( "尔", "你" )
self._graph = nx.DiGraph()
with open(babelstone.PATH_TO_IDS_TXT, encoding="UTF-8") as fp:
for line in fp:
# Ignore comments
if line.startswith("#"):
continue
# TODO(ambuc): ids.txt uses:
# {1}, {2}, etc. to represent unencoded components.
# ↔ as a mirror operator, i.e. to represent a component without
# a Unicode encoding, but whose mirror does have a Unicode
# encoding.
# ↷ as a rotation operator, i.e. to represent a component
# without a Unicode encoding, but whose 180deg rotation does
# have a Unicode encoding.
# 〾 as a variation indicator. We should try to handle these.
# ?, ? ids.txt uses these to represent an unencodable component.
# We should probably try to handle these edge cases.
elif re.search("[{}↔↷〾??]", line):
continue
maybe_parsed_set = parse(str(icu.UnicodeString(line)))
if maybe_parsed_set is not None:
self.insert(maybe_parsed_set)
def characters(self) -> Iterable[Text]:
"""Returns an iterable of characters which have decompositions."""
return [
node for node in self._graph.nodes()
if list(self._graph.predecessors(node))
]
def contains(self, character: Text) -> bool:
"""Returns True if a decomposer is aware of a character, False otherwise.
Args:
character: The query character.
Returns:
A boolean, true if the decomposer can handle the character.
"""
if character not in self._graph.nodes():
return False
return bool(list(self._graph.predecessors(character)))
def decompose(
self, input_region: region.Region, character: Text
) -> ideographic_description_sequence.IdeographicSequence:
"""Attempts to decompose a character with respect to input region.
Args:
input_region: The input region. See region.py
character: Some input character.
Returns:
An IdeographicSequence, if the character has a known decomposition
Raises:
NoDecompositionError: if the character has no known decomposition.
"""
if character not in self._graph.nodes:
raise NoDecompositionError(character)
if "idg" not in self._graph.nodes[character]:
raise NoDecompositionError(character)
if not list(self._graph.predecessors(character)):
raise NoDecompositionError(character)
return self._graph.nodes[character]["idg"].get_sequence_at_region(
input_region)
def insert(
self,
sequence_set: ideographic_description_sequence.IdeographicSequenceGroup
) -> bool:
"""Inserts a character into the decomposer's internal map.
TODO(ambuc): Support region-aware insertion.
Args:
sequence_set: the IdeographicSequenceGroup being inserted.
Returns:
True on success, False otherwise.
"""
char = sequence_set.character()
decomp = sequence_set.default_sequence.decomposition
i = self._traverse_sequence(
0, char, decomp,
shape_lib.VisualMetadata(shape=shape_lib.UnitSquare, parent=char))
if i < len(decomp):
logging.debug("Something went wrong trying to parse decomp: %s",
",".join(["U+%04x" % ord(o) for o in decomp]))
return False
self._graph.add_node(char, idg=sequence_set)
return True
def _get_with_component(
self, component: Text) -> Iterable[Tuple[Text, shape_lib.Shape]]:
return _build_paths(g=self._graph,
node=component,
data_lookup=lambda m: m["metadata"].shape,
accumulator=lambda a, b: a.portion(b)).items()
def get_component(self, component: Text) -> List[Text]:
return [c for c, _ in self._get_with_component(component)]
def get_component_with_shape(self,
component: Text,
shape: shape_lib.Shape,
w_t: float = 0.0,
h_t: float = 0.0,
x_t: float = 0.0,
y_t: float = 0.0) -> List[Text]:
"""Returns characters containing |component| at |shape| in pure-space."""
# pylint: disable=g-complex-comprehension
return [
c for c, summed_shape in self._get_with_component(component)
if shape_lib.is_in_window(summed_shape.width, shape.width, w_t)
and shape_lib.is_in_window(summed_shape.height, shape.height, h_t)
and shape_lib.is_in_window(summed_shape.x_offset, shape.x_offset,
x_t) and
shape_lib.is_in_window(summed_shape.y_offset, shape.y_offset, y_t)
]
# pylint: enable=g-complex-comprehension
def get_component_with_aspectratio(self,
component: Text,
w: float = 0.0,
h: float = 0.0,
w_t: float = 0.0,
h_t: float = 0.0) -> List[Text]:
return [
c for c, summed_shape in self._get_with_component(component)
if shape_lib.is_in_window(summed_shape.width, w, w_t)
and shape_lib.is_in_window(summed_shape.height, h, h_t)
]
def get_shape_within(self, subcomponent: Text,
component: Text) -> shape_lib.Shape:
"""Returns the pure-space shape of |subcomponent| within |component|.
Example usage:
get_shape_within('尔', '你') => shape_lib.RightHalf
Args:
subcomponent: the subcomponent inside |component| we're seeking.
component: a character which contains |subcomponent|.
Returns:
The pure-space shape of one within the other.
"""
path = nx.shortest_path(self._graph, subcomponent, component)
shape = shape_lib.UnitSquare
while len(path) >= 2:
shape = shape.portion(
self._graph.get_edge_data(path[0], path[1])["metadata"].shape)
path.pop(0)
return shape
def _traverse_sequence(self, i: int, character: Text, decomposition: Text,
metadata: shape_lib.VisualMetadata) -> int:
"""Given some partially-traversed decomposition AST, finishes traversing it.
Args:
i: The index in the decomposition our cursor is currently at.
character: The character this tree corresponds to.
decomposition: The decomposition AST, possibly partially traversed as
indicated by |i|.
metadata: A VisualMetadata struct representing the intermediate state of
this node relative to the whole unit square.
Returns:
A new cursor position, or possibly the same cursor position if we are at
the end.
"""
if i >= len(decomposition):
return i
head = decomposition[i]
i += 1
# If there is no decomposition, we've reached a fundamental particle and
# can't go any further.
if not shape_lib.is_verb(head):
self._graph.add_edge(head, character, metadata=metadata)
return i
for arg in shape_lib.get_subshapes_of_verb(head):
i = self._traverse_sequence(
i, character, decomposition,
shape_lib.VisualMetadata(shape=metadata.shape.portion(arg),
parent=character))
return i
| apache-2.0 | 3,975,600,111,754,819,600 | 37.264463 | 87 | 0.6 | false |
rohe/pysaml2-3 | tools/verify_metadata.py | 1 | 1803 | #!/usr/bin/env python
from saml2.sigver import _get_xmlsec_cryptobackend, SecurityContext
from saml2.httpbase import HTTPBase
from saml2 import saml
from saml2 import md
from saml2.attribute_converter import ac_factory
from saml2.extension import dri
from saml2.extension import idpdisc
from saml2.extension import mdattr
from saml2.extension import mdrpi
from saml2.extension import mdui
from saml2.extension import shibmd
from saml2.extension import ui
import xmldsig
import xmlenc
import argparse
from saml2.mdstore import MetaDataFile, MetaDataExtern
__author__ = 'rolandh'
"""
A script that imports and verifies metadata.
"""
ONTS = {
saml.NAMESPACE: saml,
mdui.NAMESPACE: mdui,
mdattr.NAMESPACE: mdattr,
mdrpi.NAMESPACE: mdrpi,
dri.NAMESPACE: dri,
ui.NAMESPACE: ui,
idpdisc.NAMESPACE: idpdisc,
md.NAMESPACE: md,
xmldsig.NAMESPACE: xmldsig,
xmlenc.NAMESPACE: xmlenc,
shibmd.NAMESPACE: shibmd
}
parser = argparse.ArgumentParser()
parser.add_argument('-t', dest='type')
parser.add_argument('-u', dest='url')
parser.add_argument('-c', dest='cert')
parser.add_argument('-a', dest='attrsmap')
parser.add_argument('-o', dest='output')
parser.add_argument('-x', dest='xmlsec')
parser.add_argument(dest="item")
args = parser.parse_args()
metad = None
if args.type == "local":
metad = MetaDataFile(list(ONTS.values()), args.item, args.item)
elif args.type == "external":
ATTRCONV = ac_factory(args.attrsmap)
httpc = HTTPBase()
crypto = _get_xmlsec_cryptobackend(args.xmlsec)
sc = SecurityContext(crypto)
metad = MetaDataExtern(list(ONTS.values()), ATTRCONV, args.url,
sc, cert=args.cert, http=httpc)
if metad:
try:
metad.load()
except:
raise
else:
print("OK")
| bsd-2-clause | 855,684,009,197,696,600 | 22.723684 | 67 | 0.703272 | false |
doganulus/montre | montre/algebra.py | 1 | 4581 | import os
import sys
from ctypes import *
from distutils.sysconfig import get_python_lib
# if os.name == 'nt':
# libmontre = windll.LoadLibrary(os.path.join(get_python_lib(), "libmontre"))
# else:
# libmontre = cdll.LoadLibrary(os.path.join(get_python_lib(), "libmontre.so"))
if os.name == 'nt':
libmontre = windll.LoadLibrary(os.path.join("..", "libmontre"))
else:
libmontre = cdll.LoadLibrary(os.path.join("..", "libmontre.so"))
zone_type = POINTER(c_int64)
libmontre.zs_create.restype = c_void_p
libmontre.zs_create.argtypes = []
libmontre.zs_destroy.restype = None
libmontre.zs_destroy.argtypes = []
libmontre.zs_size.restype = c_int64
libmontre.zs_includes.restype = c_int64
libmontre.zs_append.argtypes = [c_void_p, c_int64, c_int64, c_int64, c_int64,c_int64,c_int64]
libmontre.zs_append.restype = None
libmontre.zs_append_not_anchored.argtypes = [c_void_p, c_int64, c_int64]
libmontre.zs_append_not_anchored.restype = None
libmontre.zs_get_zone.restype = zone_type
libmontre.zs_get_zone.argtypes = [c_void_p, c_int64]
class TimedRelation:
def __init__(self, obj=None):
if obj == None:
self.obj = libmontre.zs_create()
else:
self.obj = obj
def __del__(self):
libmontre.zs_destroy(self.obj)
def __len__(self):
return libmontre.zs_size(self.obj)
def __iter__(self):
def get_zone(self, i):
z = libmontre.zs_get_zone(self.obj, i)
return (z[1], z[3], z[2], z[6], z[5], z[7])
return (get_zone(self, i) for i in range(len(self)))
@property
def zones(self):
return [((-(z[0]//2), z[1]//2, -(z[2]//2), z[3]//2, -(z[4]//2), z[5]//2), (z[0]%2, z[1]%2, z[2]%2, z[3]%2, z[4]%2, z[5]%2)) for z in self]
def __str__(self):
return "\n".join([
"({bminval}{bminbnd}x{bmaxbnd}{bmaxval}, {eminval}{eminbnd}y{emaxbnd}{emaxval}, {dminval}{dminbnd}y-x{dmaxbnd}{dmaxval})".format(
bminval=v[0], bminbnd="<" if b[0] == 0 else "<=",
bmaxval=v[1], bmaxbnd="<" if b[1] == 0 else "<=",
eminval=v[2], eminbnd="<" if b[2] == 0 else "<=",
emaxval=v[3], emaxbnd="<" if b[3] == 0 else "<=",
dminval=v[4], dminbnd="<" if b[4] == 0 else "<=",
dmaxval=v[5], dmaxbnd="<" if b[5] == 0 else "<=",
) for v, b in self.zones]
)
# return str([values for values, bounds in self.zones])
# def __getitem__(self, i): # access elements in vector at index
# if 0 <= i < len(self):
# return libmontre.zs_get_zone(self.obj, i)
# raise IndexError('Vector index out of range')
def append(self, bmin=0, bmax=sys.maxsize, emin=0, emax=sys.maxsize, dmin=0, dmax=sys.maxsize):
libmontre.zs_append(self.obj, bmin, bmax, emin, emax, dmin, dmax)
return self
def append_not_anchored(self, begin, end):
libmontre.zs_append_not_anchored(self.obj, begin, end)
return self
def absorb(self):
retobj = libmontre.zs_create()
libmontre.zs_filter(retobj, self.obj)
self.obj = retobj
@staticmethod
def absorbed(self):
retobj = libmontre.zs_create()
libmontre.zs_filter(retobj, self.obj)
return TimedRelation(retobj)
def includes(self, other):
return libmontre.zs_includes(self.obj, other.obj) != 0
def restrict(self, other, a, b):
retobj = libmontre.zs_create()
libmontre.zs_restrict(retobj, self.obj, c_int64(a), c_int64(b))
return TimedRelation(retobj)
def intersect(self, other):
retobj = libmontre.zs_create()
libmontre.zs_intersect(retobj, self.obj, other.obj)
return TimedRelation(retobj)
def concatenate(self, other):
retobj = libmontre.zs_create()
libmontre.zs_concatenate(retobj, self.obj, other.obj)
return TimedRelation(retobj)
def union(self, other):
retobj = libmontre.zs_create()
libmontre.zs_union(retobj, self.obj, other.obj)
return TimedRelation(retobj)
def plus(self, other):
retobj = libmontre.zs_create()
libmontre.zs_plus(retobj, self.obj)
return TimedRelation(retobj)
class Bound:
@staticmethod
def lt(c):
return 2*c
@staticmethod
def leq(c):
return 2*c + 1
@staticmethod
def gt(c):
return -2*c
@staticmethod
def geq(c):
return -2*c + 1
| gpl-3.0 | -8,366,900,255,173,163,000 | 30.721429 | 146 | 0.577385 | false |
petchat/senz.dev.dashboard | templates/other/models.py | 1 | 33627 | # -*- coding: utf-8 -*-
import leancloud
from leancloud import Object
from leancloud import LeanCloudError
from leancloud import Query
from leancloud import User
from wsgi import signer
not_binary_label_dict = {'field':['field__manufacture', 'field__financial', 'field__infotech', 'field__law', 'field__agriculture', 'field__human_resource', 'field__commerce', 'field__natural', 'field__service', 'field__humanities', 'field__medical', 'field__architecture', 'field__athlete'], 'age':['age__16to35', 'age__35to55', 'age__55up', 'age__16down'], 'sport':['sport__basketball', 'sport__bicycling', 'sport__tabel_tennis', 'sport__football', 'sport__jogging', 'sport__badminton', 'sport__fitness'],'consumption': ['consumption__10000to20000', 'consumption__20000up', 'consumption__5000to10000', 'consumption__5000down'], 'occupation':['occupation__freelancer', 'occupation__supervisor', 'occupation__student', 'occupation__others', 'occupation__official', 'occupation__salesman', 'occupation__teacher', 'occupation__soldier', 'occupation__engineer']}
binary_label_list = [u'ACG', u'indoorsman', u'game_show', u'has_car', u'game_news', u'entertainment_news', u'health', u'online_shopping', u'variety_show', u'business_news', u'tvseries_show', u'current_news', u'sports_news', u'tech_news', u'offline_shopping', u'pregnant', u'gender', u'study', u'married', u'sports_show', u'gamer', u'social', u'has_pet']
query_limit = 1000
class Dashboard:
def __init__(self):
self.app_id = None
pass
def get_all_tracker(self):
try:
# 这里不能认为终端用户的数量少于1000
Application = Object.extend('Application')
query = Query(Application)
query.equal_to('app_id',self.app_id)
query.ascending('createdAt')
query.limit(1000)
result_list = query.find()
all_tracker_dict = {}
if result_list:
for result in result_list:
all_tracker_dict[result.get('username')] = result.id
self.all_tracker_dict = all_tracker_dict
return 1
except LeanCloudError,e:
print e
return 0
def get_age_and_gender_data_dict(self,table_name='AppStaticInfo',filed_name = 'app'):
try:
Application = Object.extend('Application')
query = Query(Application)
query.equal_to('app_id',self.app_id)
result_list = query.find()
length = len(result_list)
if length==0:
print 'error: application not exists in table Applicaiton'
return 0
elif length != 1:
print 'error: multi application exists in table Applicaiton'
return 0
else:
app = result_list[0]
DbTable = Object.extend(table_name)
query = Query(DbTable)
query.equal_to(filed_name,app)
result_list = query.find()
length = len(result_list)
if length==0:
print 'error: application not exists in table %s' %(str(table_name))
return 0
elif length != 1:
print 'error: multi application exists in table %s' %(str(table_name))
return 0
else:
app_static_info = result_list[0]
age_and_gender_dict = app_static_info.get('age_and_gender')
return age_and_gender_dict
# WeightedStaticInfo = Object.extend('WeightedStaticInfo')
# query = Query(WeightedStaticInfo)
# query.exists('objectId')
# query.select('age','gender')
# staticInfoList = query.find()
# gender_type_list =['man','woman']
# age_type_list = ['16down','16to35','35to55','55up']
# dataDict ={gender_type:{age_type:0 for age_type in age_type_list} for gender_type in gender_type_list}
#
# for staticInfo in staticInfoList:
# gender = 'man' if staticInfo.get('gender') >0 else 'woman'
# age_info_dict= staticInfo.get('age')
# dataDict[gender][age_info_dict.keys()[0]] += 1
# # dataDict ={'man' if staticInfo.get('gender') >0 else 'woman':dataDict['man' if staticInfo.get('gender') >0 else 'woman'][staticInfo.get('age').keys()[0]] +=1 for staticInfo in staticInfoList}
# new_data_dict = {key:[0 for i in range(4)] for key in dataDict.keys()}
# for index ,age_type in enumerate(age_type_list):
# for gender_type in dataDict.keys():
# new_data_dict[gender_type][index] = dataDict[gender_type][age_type]
except LeanCloudError, e:
raise e
return age_and_gender_dict
def get_occupation_data_dict(self):
try:
WeightedStaticInfo = Object.extend('WeightedStaticInfo')
query = Query(WeightedStaticInfo)
query.exists('objectId')
staticInfoList = query.find()
dataDict ={gender_type:{age_type:0 for age_type in age_type_list} for gender_type in gender_type_list}
for staticInfo in staticInfoList:
gender = 'man' if staticInfo.get('gender') >0 else 'woman'
age_info_dict= staticInfo.get('age')
dataDict[gender][age_info_dict.keys()[0]] += 1
# dataDict ={'man' if staticInfo.get('gender') >0 else 'woman':dataDict['man' if staticInfo.get('gender') >0 else 'woman'][staticInfo.get('age').keys()[0]] +=1 for staticInfo in staticInfoList}
new_data_dict = {key:[0 for i in range(4)] for key in dataDict.keys()}
for index ,age_type in enumerate(age_type_list):
for gender_type in dataDict.keys():
new_data_dict[gender_type][index] = dataDict[gender_type][age_type]
except LeanCloudError, e:
raise e
return new_data_dict
#下面三个函数的代码可以优化合并
def get_location_distribution_data_dict(self):
field = 'location'
k = 5
unknown = 'unknown'
try:
WeightedStaticInfo = Object.extend('WeightedUserContext')
query = Query(WeightedStaticInfo)
query.exists('objectId')
query.select(field)
# 这个地方后面需要做根据applicationid查询
#另外也需要分组查询
resultList = query.find()
seen_location_dict = {}
user_count = len(resultList)
for result in resultList:
location_dict = result.get(field)
for key, value in location_dict.items():
if key in seen_location_dict.keys():
seen_location_dict[key] += location_dict[key]
else:
seen_location_dict[key] = location_dict[key]
total_unknown_location_value = seen_location_dict.get(unknown)
#如果seen_location_dict中含有unknown字段的话,就删掉
if total_unknown_location_value:
del seen_location_dict[unknown]
sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True)
sorted_frequent_location = sorted_seen_location[0:k]
total_known_time = user_count - total_unknown_location_value
sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location]
sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage])))
except LeanCloudError, e:
raise e
return sorted_frequent_location_percentage
def get_motion_distribution_data_dict(self):
field = 'motion'
k = 5
unknown = 'unknown'
try:
WeightedStaticInfo = Object.extend('WeightedUserContext')
query = Query(WeightedStaticInfo)
query.exists('objectId')
query.select(field)
# 这个地方后面需要做根据applicationid查询
#另外也需要分组查询
resultList = query.find()
seen_location_dict = {}
user_count = len(resultList)
for result in resultList:
location_dict = result.get(field)
for key, valu in location_dict.items():
if key in seen_location_dict.keys():
seen_location_dict[key] += location_dict[key]
else:
seen_location_dict[key] = location_dict[key]
total_unknown_location_value = seen_location_dict.get(unknown)
#如果seen_location_dict中含有unknown字段的话,就删掉
if total_unknown_location_value:
del seen_location_dict[unknown]
sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True)
sorted_frequent_location = sorted_seen_location[0:k]
total_known_time = user_count - total_unknown_location_value
sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location]
sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage])))
except LeanCloudError, e:
raise e
return sorted_frequent_location_percentage
def get_sound_distribution_data_dict(self):
field = 'sound'
k = 5
unknown = 'unknown'
try:
WeightedStaticInfo = Object.extend('WeightedUserContext')
query = Query(WeightedStaticInfo)
query.exists('objectId')
query.select(field)
# 这个地方后面需要做根据applicationid查询
#另外也需要分组查询
resultList = query.find()
seen_location_dict = {}
user_count = len(resultList)
for result in resultList:
location_dict = result.get(field)
for key, valu in location_dict.items():
if key in seen_location_dict.keys():
seen_location_dict[key] += location_dict[key]
else:
seen_location_dict[key] = location_dict[key]
total_unknown_location_value = seen_location_dict.get(unknown)
#如果seen_location_dict中含有unknown字段的话,就删掉
if total_unknown_location_value:
del seen_location_dict[unknown]
sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True)
sorted_frequent_location = sorted_seen_location[0:k]
total_known_time = user_count - total_unknown_location_value
sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location]
sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage])))
except LeanCloudError, e:
raise e
return sorted_frequent_location_percentage
def get_event_to_activity_data(self,application_id,event_name,db_name='EventActivity'):
try:
DbTable = Object.extend(db_name)
query = Query(DbTable)
#这里只是测试知道是少于1K条的
query.equal_to('event_name',event_name)
# query.equal_to('application_id',application_id)
query.descending('createdAt')
query.limit(1)
result = query.find()
activity_statistics_dict = result[0].get('activity_dict')
except LeanCloudError, e:
raise e
return activity_statistics_dict
# query.select('user','timestamp')
# resultList = query.find()
# DBTable = Object.extend('MergedUserContext')
# activity_dict = {}
# total_count = len(resultList)
# print 'the length of resultList is : %s' %(str(total_count))
# for index1,result in enumerate(resultList):
# query = Query(DBTable)
# query.equal_to('user',result.get('user'))
# query.less_than_or_equal_to('startTime',result.get('timestamp'))
# query.greater_than_or_equal_to('endTime',result.get('timestamp'))
# resultList1 = query.find()
# if len(resultList1) == 1 or len(resultList1) == 2 :
# activity = resultList1[0].get('event')[0]
# if activity in activity_dict.keys():
# activity_dict[activity]+=1
# else:
# activity_dict[activity] =1
# else:
# print 'length of resultList1: %s' %(str(len(resultList1)))
# print 'Seems to be an error,index: %s,user: %s; timestamp: %s \n' %(str(index1),str(result.get('user').id),str(result.get('timestamp')))
#
# activity_dict['others'] = total_count-sum(activity_dict.values())
#注意这里的Developer并没有继承自User
class Developer:
def __init__(self,user_id=None):
self.user = User()
self.user_id = user_id
@classmethod
def is_valid_email(self,email):
query = Query(User)
query.exists('email',email)
return 0 if query.find() else 1;
def signup(self,email,username,password):
self.user.set('email',email)
self.user.set('username',username)
self.user.set('password',password)
try:
result = self.user.sign_up()
print result
return 1
except LeanCloudError,e:
print e
return 0
def login_with_email(self,email,password):
# self.user.login(email,password)
pass
def login_with_username(self,username,password):
try:
self.user.login(username,password)
self.user_id = self.user.id
self.session_token = self.user.get_session_token()
print 'user.id: %s' %(str(self.user_id))
print 'session_token: %s' %(str(self.session_token))
return 1
except LeanCloudError,e:
print e
return 0
def init_developer_with_user_id(self,user_id):
query = Query(User)
query.equal_to('objectId',user_id)
result = query.find()
if len(result)==1:
return result[0]
else:
print len(result)
print user_id
def get_all_application(self):
try:
# 这里认为应用的数量少于1000
Application = Object.extend('Application')
query = Query(Application)
query.equal_to('user',self.user.become(self.session_token))
query.ascending('createdAt')
query.limit(1000)
result_list = query.find()
all_application_dict = {}
if result_list:
for result in result_list:
all_application_dict[result.get('app_name')] = result.get('app_id')
self.all_application_dict = all_application_dict
return 1
except LeanCloudError,e:
print e
return 0
def get_all_tracker(self):
try:
# 这里认为应用的数量少于1000
Tracker = Object.extend('Tracker')
query = Query(Tracker)
query.exists('objectId')
query.ascending('createdAt')
query.limit(1000)
result_list = query.find()
all_tracker_dict = {}
if result_list:
for result in result_list:
all_tracker_dict[result.get('username')] = result.id
self.all_tracker_dict = all_tracker_dict
return 1
except LeanCloudError,e:
print e
return 0
def create_new_app(self,app_name):
try:
user = self.user.become(self.session_token)
Application = Object.extend('Application')
application = Application()
query = Query(Application)
query.equal_to('user',user)
query.equal_to('app_name',app_name)
if query.find():
print 'Application name exists!'
return 0
else:
application.set('app_name',app_name)
application.set('user',user)
application.save()
app_id = application.id
app_key = (signer.sign(app_id).split(app_id+'.'))[1]
# app_key = app_id+"this is app_key"
application.set('app_id',app_id)
application.set('app_key',app_key)
application.save()
return 1
except LeanCloudError,e:
print e
return 0
# query = Query(Application)
# app_id = signer.sign(app_name).split(app_name+'.')[1]
# query.equal_to('user',user)
# query.equal_to('app_id',app_id)
# if query.find():
# print 'Application name exists'
# return 0
# else:
# application.set('app_name',app_name)
# application.set('app_id',app_id)
# application.set('user',user)
# application.save()
def connect_new_tracker(self,tracker_id='',app_id=''):
try:
user = self.user.become(self.session_token)
Application = Object.extend('Application')
query = Query(Application)
query.equal_to('user',user)
query.equal_to('app_id',app_id)
app_list = query.find()
if len(app_list)!=1:
print 'error with the length of app_list: %s' %(str(len(app_list)))
return 0
else:
the_app = app_list[0]
print 'successfully get the app with app_id: %s' %(str(the_app.id))
Tracker = Object.extend('Tracker')
query = Query(Tracker)
query.equal_to('objectId',tracker_id)
tracker_list = query.find()
if len(tracker_list) != 1:
print "error with the length of tracker_list: %s" %(str(len(tracker_list)))
return 0
else:
the_tracker = tracker_list[0]
print 'successfully get the tracker with object_id: %s' %(str(the_tracker.id))
app_relation_to_tracker = the_app.relation('tracker')
# tracker_relation_to_app = the_tracker.relation('application')
app_relation_to_tracker.add(the_tracker)
# tracker_relation_to_app.add(the_app)
print 'ready to save'
# the_tracker.save()
# print 'successful save the_tracker'
the_app.save()
print 'successful save the_app'
return 1
except LeanCloudError,e:
print e
return 0
# # -*- coding: utf-8 -*-
# import leancloud
# from leancloud import Object
# from leancloud import LeanCloudError
# from leancloud import Query
# from leancloud import User
# import time
# import datetime
# import operator
# import numpy as np
# from logentries import LogentriesHandler
# import logging
# # from flask import current_app
#
# from wsgi import signer
#
#
# not_binary_label_dict = {'field':['field__manufacture', 'field__financial', 'field__infotech', 'field__law', 'field__agriculture', 'field__human_resource', 'field__commerce', 'field__natural', 'field__service', 'field__humanities', 'field__medical', 'field__architecture', 'field__athlete'], 'age':['age__16to35', 'age__35to55', 'age__55up', 'age__16down'], 'sport':['sport__basketball', 'sport__bicycling', 'sport__tabel_tennis', 'sport__football', 'sport__jogging', 'sport__badminton', 'sport__fitness'],'consumption': ['consumption__10000to20000', 'consumption__20000up', 'consumption__5000to10000', 'consumption__5000down'], 'occupation':['occupation__freelancer', 'occupation__supervisor', 'occupation__student', 'occupation__others', 'occupation__official', 'occupation__salesman', 'occupation__teacher', 'occupation__soldier', 'occupation__engineer']}
# binary_label_list = [u'ACG', u'indoorsman', u'game_show', u'has_car', u'game_news', u'entertainment_news', u'health', u'online_shopping', u'variety_show', u'business_news', u'tvseries_show', u'current_news', u'sports_news', u'tech_news', u'offline_shopping', u'pregnant', u'gender', u'study', u'married', u'sports_show', u'gamer', u'social', u'has_pet']
#
# class Dashboard:
# def get_age_and_gender_data_dict(self):
# try:
# WeightedStaticInfo = Object.extend('WeightedStaticInfo')
# query = Query(WeightedStaticInfo)
# query.exists('objectId')
# query.select('age','gender')
# staticInfoList = query.find()
# gender_type_list =['man','woman']
# age_type_list = ['16down','16to35','35to55','55up']
# dataDict ={gender_type:{age_type:0 for age_type in age_type_list} for gender_type in gender_type_list}
#
# for staticInfo in staticInfoList:
# gender = 'man' if staticInfo.get('gender') >0 else 'woman'
# age_info_dict= staticInfo.get('age')
# dataDict[gender][age_info_dict.keys()[0]] += 1
# # dataDict ={'man' if staticInfo.get('gender') >0 else 'woman':dataDict['man' if staticInfo.get('gender') >0 else 'woman'][staticInfo.get('age').keys()[0]] +=1 for staticInfo in staticInfoList}
# new_data_dict = {key:[0 for i in range(4)] for key in dataDict.keys()}
# for index ,age_type in enumerate(age_type_list):
# for gender_type in dataDict.keys():
# new_data_dict[gender_type][index] = dataDict[gender_type][age_type]
#
# except LeanCloudError, e:
#
# raise e
# return new_data_dict
# def get_occupation_data_dict(self):
# try:
# WeightedStaticInfo = Object.extend('WeightedStaticInfo')
# query = Query(WeightedStaticInfo)
# query.exists('objectId')
# staticInfoList = query.find()
# dataDict ={gender_type:{age_type:0 for age_type in age_type_list} for gender_type in gender_type_list}
#
# for staticInfo in staticInfoList:
# gender = 'man' if staticInfo.get('gender') >0 else 'woman'
# age_info_dict= staticInfo.get('age')
# dataDict[gender][age_info_dict.keys()[0]] += 1
# # dataDict ={'man' if staticInfo.get('gender') >0 else 'woman':dataDict['man' if staticInfo.get('gender') >0 else 'woman'][staticInfo.get('age').keys()[0]] +=1 for staticInfo in staticInfoList}
# new_data_dict = {key:[0 for i in range(4)] for key in dataDict.keys()}
# for index ,age_type in enumerate(age_type_list):
# for gender_type in dataDict.keys():
# new_data_dict[gender_type][index] = dataDict[gender_type][age_type]
#
# except LeanCloudError, e:
#
# raise e
# return new_data_dict
#
# #下面三个函数的代码可以优化合并
# def get_location_distribution_data_dict(self):
# field = 'location'
# k = 5
# unknown = 'unknown'
# try:
# WeightedStaticInfo = Object.extend('WeightedUserContext')
# query = Query(WeightedStaticInfo)
# query.exists('objectId')
# query.select(field)
# # 这个地方后面需要做根据applicationid查询
# #另外也需要分组查询
# resultList = query.find()
# seen_location_dict = {}
# user_count = len(resultList)
#
# for result in resultList:
# location_dict = result.get(field)
# for key, valu in location_dict.items():
# if key in seen_location_dict.keys():
# seen_location_dict[key] += location_dict[key]
# else:
# seen_location_dict[key] = location_dict[key]
# total_unknown_location_value = seen_location_dict.get(unknown)
# #如果seen_location_dict中含有unknown字段的话,就删掉
# if total_unknown_location_value:
# del seen_location_dict[unknown]
#
# sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True)
# sorted_frequent_location = sorted_seen_location[0:k]
# total_known_time = user_count - total_unknown_location_value
# sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location]
# sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage])))
#
#
#
# except LeanCloudError, e:
#
# raise e
# return sorted_frequent_location_percentage
# def get_motion_distribution_data_dict(self):
# field = 'motion'
# k = 5
# unknown = 'unknown'
# try:
# WeightedStaticInfo = Object.extend('WeightedUserContext')
# query = Query(WeightedStaticInfo)
# query.exists('objectId')
# query.select(field)
# # 这个地方后面需要做根据applicationid查询
# #另外也需要分组查询
# resultList = query.find()
# seen_location_dict = {}
# user_count = len(resultList)
#
# for result in resultList:
# location_dict = result.get(field)
# for key, valu in location_dict.items():
# if key in seen_location_dict.keys():
# seen_location_dict[key] += location_dict[key]
# else:
# seen_location_dict[key] = location_dict[key]
# total_unknown_location_value = seen_location_dict.get(unknown)
# #如果seen_location_dict中含有unknown字段的话,就删掉
# if total_unknown_location_value:
# del seen_location_dict[unknown]
#
# sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True)
# sorted_frequent_location = sorted_seen_location[0:k]
# total_known_time = user_count - total_unknown_location_value
# sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location]
# sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage])))
#
#
#
# except LeanCloudError, e:
#
# raise e
# return sorted_frequent_location_percentage
# def get_sound_distribution_data_dict(self):
# field = 'sound'
# k = 5
# unknown = 'unknown'
# try:
# WeightedStaticInfo = Object.extend('WeightedUserContext')
# query = Query(WeightedStaticInfo)
# query.exists('objectId')
# query.select(field)
# # 这个地方后面需要做根据applicationid查询
# #另外也需要分组查询
# resultList = query.find()
# seen_location_dict = {}
# user_count = len(resultList)
#
# for result in resultList:
# location_dict = result.get(field)
# for key, valu in location_dict.items():
# if key in seen_location_dict.keys():
# seen_location_dict[key] += location_dict[key]
# else:
# seen_location_dict[key] = location_dict[key]
# total_unknown_location_value = seen_location_dict.get(unknown)
# #如果seen_location_dict中含有unknown字段的话,就删掉
# if total_unknown_location_value:
# del seen_location_dict[unknown]
#
# sorted_seen_location = sorted(seen_location_dict.items(), key=lambda l: l[1], reverse=True)
# sorted_frequent_location = sorted_seen_location[0:k]
# total_known_time = user_count - total_unknown_location_value
# sorted_frequent_location_percentage = [(str(kv[0]),(kv[1]/total_known_time)) for kv in sorted_frequent_location]
# sorted_frequent_location_percentage.append(('others',1-sum([kv[1] for kv in sorted_frequent_location_percentage])))
#
# except LeanCloudError, e:
#
# raise e
# return sorted_frequent_location_percentage
#
#
#
#
#
# def get_event_to_activity_data(self,application_id,event_name,db_name='EventActivity'):
# try:
# DbTable = Object.extend(db_name)
# query = Query(DbTable)
# #这里只是测试知道是少于1K条的
# query.equal_to('event_name',event_name)
# # query.equal_to('application_id',application_id)
# query.descending('createdAt')
# query.limit(1)
# result = query.find()
# activity_statistics_dict = result[0].get('activity_dict')
#
#
# except LeanCloudError, e:
#
# raise e
# return activity_statistics_dict
#
# # query.select('user','timestamp')
# # resultList = query.find()
# # DBTable = Object.extend('MergedUserContext')
# # activity_dict = {}
# # total_count = len(resultList)
# # print 'the length of resultList is : %s' %(str(total_count))
# # for index1,result in enumerate(resultList):
# # query = Query(DBTable)
# # query.equal_to('user',result.get('user'))
# # query.less_than_or_equal_to('startTime',result.get('timestamp'))
# # query.greater_than_or_equal_to('endTime',result.get('timestamp'))
# # resultList1 = query.find()
# # if len(resultList1) == 1 or len(resultList1) == 2 :
# # activity = resultList1[0].get('event')[0]
# # if activity in activity_dict.keys():
# # activity_dict[activity]+=1
# # else:
# # activity_dict[activity] =1
# # else:
# # print 'length of resultList1: %s' %(str(len(resultList1)))
# # print 'Seems to be an error,index: %s,user: %s; timestamp: %s \n' %(str(index1),str(result.get('user').id),str(result.get('timestamp')))
# #
# # activity_dict['others'] = total_count-sum(activity_dict.values())
#
#
#
#
#
#
#
# class Developer:
# def __init__(self,user_id=None):
# self.user = User()
# self.user_id = user_id
#
# @classmethod
# def is_valid_email(self,email):
# query = Query(User)
# query.exists('email',email)
# return 0 if query.find() else 1;
#
# def signup(self,email,username,password):
# self.user.set('email',email)
# self.user.set('username',username)
# self.user.set('password',password)
# try:
# result = self.user.sign_up()
# print result
# return 1
# except LeanCloudError,e:
# print e
# return 0
#
# def login_with_email(self,email,password):
# # self.user.login(email,password)
# pass
#
# def login_with_username(self,username,password):
# try:
# self.user.login(username,password)
# self.user_id = self.user.id
# self.session_token = self.user.get_session_token()
# print 'user.id: %s' %(str(self.user_id))
# print 'session_token: %s' %(str(self.session_token))
# return 1
# except LeanCloudError,e:
# print e
# return 0
#
# def init_developer_with_user_id(self,user_id):
# query = Query(User)
# query.equal_to('objectId',user_id)
# result = query.find()
# if len(result)==1:
# return result[0]
# else:
# print len(result)
# print user_id
#
# def create_new_app(self,app_name):
# try:
# developer = self.init_developer_with_user_id(self.user_id)
# signed_key = signer.sign(app_name)
# Application = Object.extend('Application')
# application = Application()
# application.set('application_name',app_name)
# application.set('user',developer)
# application.save()
# app_id = application.id
# app_key = signer.sign(app_id).split(app_id+'.')[1]
# application.set('app_id',app_id)
# application.set('app_key',app_key)
# application.save()
# return 1
# except LeanCloudError,e:
# print e
# return 0
# pass
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
| mit | -1,799,050,062,595,747,000 | 38.51976 | 860 | 0.555532 | false |
Lartza/mumble | scripts/transtate.py | 1 | 1997 | #!/usr/bin/env python
#
# Copyright 2005-2019 The Mumble Developers. All rights reserved.
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file at the root of the
# Mumble source tree or at <https://www.mumble.info/LICENSE>.
# Extracts the progress of translations from the compilation
# log in easily readable form. Make sure to delete all .qm
# files beforehand.
#
# Usage: cat log | .\transtate.py
import sys
import re
def n(val):
return (int(val) if val else 0)
if __name__ == "__main__":
#--Regex matches strings like
#
#Updating 'mumble_zh_CN.qm'...
# Generated 1421 translation(s) (1145 finished and 276 unfinished)
# Ignored 89 untranslated source text(s)
# s:\dev\QtMumble\bin\lrelease.exe mumble_zh_TW.ts
#Updating 'mumble_zh_TW.qm'...
# Generated 664 translation(s) (594 finished and 70 unfinished)
# Ignored 846 untranslated source text(s)
update = re.compile(r"Updating '([\w\.]+)'\.\.\.\s+Generated (\d+) translation\(s\) \((\d+) finished and (\d+) unfinished\)(?:\s+ Ignored (\d+) untranslated source text\(s\))?")
langs = 0
s = 's'
sortedbyuntranslated = sorted(update.findall(sys.stdin.read()), key=lambda s: (float(s[2]) / (n(s[1]) + n(s[4]))) if n(s[1]) else 10, reverse=True)
for lang, total, finished, unfinished, ignored in sortedbyuntranslated:
print "%s:" % lang
if int(total) == 0:
print " Source language"
else:
realtotal = n(total) + n(ignored)
percent = float(finished) / realtotal * 100
print " %d marked unfinished" % (n(unfinished))
if n(ignored):
print " %d untranslated." % (n(ignored))
print " => %d%% done (total %d + %d)." % (percent, n(total), n(ignored))
print
langs += 1
print "Number of languages: %d" % langs
| bsd-3-clause | -5,393,092,225,324,176,000 | 31.754098 | 181 | 0.58688 | false |
dwavesystems/dimod | dimod/decorators.py | 1 | 17667 | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import itertools
import collections.abc as abc
from functools import wraps
from numbers import Integral
from dimod.compatibility23 import getargspec
from dimod.core.structured import Structured
from dimod.exceptions import BinaryQuadraticModelStructureError, WriteableError
from dimod.vartypes import as_vartype
__all__ = ['nonblocking_sample_method',
'bqm_index_labels',
'bqm_index_labelled_input'
'bqm_structured',
'vartype_argument',
'graph_argument',
'lockable_method',
'forwarding_method',
]
def nonblocking_sample_method(f):
"""Decorator to create non-blocking sample methods.
Some samplers work asynchronously, and it is useful for composites to
handle that case. This decorator can be used to easily construct a
non-blocking :class:`.Sampler` or :class:`.Composite`.
The function being decorated must return an iterator when called. This
iterator must yield exactly two values.
The first value can be any object, but if the object has a `done()`
method, that method will determine the value of :meth:`.SampleSet.done()`.
The second value must be a :class:`.SampleSet`, which will provide the
samples to the user.
The generator is executed until the first yield. The generator is then
resumed when the returned sample set is resolved.
>>> from dimod.decorators import nonblocking_sample_method
...
>>> class Sampler:
... @nonblocking_sample_method
... def sample(self, bqm):
... print("First part!")
... yield
... print("Second part!")
... sample = {v: 1 for v in bqm.variables}
... yield dimod.SampleSet.from_samples_bqm(sample, bqm)
...
>>> bqm = dimod.BinaryQuadraticModel.from_ising({'a': -1}, {('a', 'b'): 1})
>>> ss = Sampler().sample(bqm)
First part!
>>> ss.resolve()
Second part!
>>> print(ss)
a b energy num_oc.
0 +1 +1 0.0 1
['SPIN', 1 rows, 1 samples, 2 variables]
"""
from dimod.sampleset import SampleSet # avoid circular import
@wraps(f)
def _sample(*args, **kwargs):
iterator = f(*args, **kwargs)
# resolve blocking part now, and make hook for the non-blocking part
return SampleSet.from_future(next(iterator), lambda _: next(iterator))
return _sample
def bqm_index_labels(f):
"""Decorator to convert a BQM to index-labels and relabel the sample set
output.
Designed to be applied to :meth:`.Sampler.sample`. Expects the wrapped
function or method to accept a :obj:`.BinaryQuadraticModel` as the second
input and to return a :obj:`.SampleSet`.
"""
@wraps(f)
def _index_label(sampler, bqm, **kwargs):
if not hasattr(bqm, 'linear'):
raise TypeError('expected input to be a BinaryQuadraticModel')
linear = bqm.linear
# if already index-labelled, just continue
if all(v in linear for v in range(len(bqm))):
return f(sampler, bqm, **kwargs)
try:
inverse_mapping = dict(enumerate(sorted(linear)))
except TypeError:
# in python3 unlike types cannot be sorted
inverse_mapping = dict(enumerate(linear))
mapping = {v: i for i, v in inverse_mapping.items()}
response = f(sampler, bqm.relabel_variables(mapping, inplace=False), **kwargs)
# unapply the relabeling
return response.relabel_variables(inverse_mapping, inplace=True)
return _index_label
def bqm_index_labelled_input(var_labels_arg_name, samples_arg_names):
"""Returns a decorator that ensures BQM variable labeling and
specified sample_like inputs are index labeled and consistent.
Args:
var_labels_arg_name (str):
Expected name of the argument used to pass in an
index labeling for the binary quadratic model (BQM).
samples_arg_names (list[str]):
Expected names of sample_like inputs that should be
indexed by the labels passed to the `var_labels_arg_name`
argument. 'samples_like' is an extension of NumPy's
array_like_. See :func:`.as_samples`.
Returns:
Function decorator.
.. _array_like: https://numpy.org/doc/stable/user/basics.creation.html
"""
def index_label_decorator(f):
@wraps(f)
def _index_label(sampler, bqm, **kwargs):
if not hasattr(bqm, 'linear'):
raise TypeError('expected input to be a BinaryQuadraticModel')
linear = bqm.linear
var_labels = kwargs.get(var_labels_arg_name, None)
has_samples_input = any(kwargs.get(arg_name, None) is not None
for arg_name in samples_arg_names)
if var_labels is None:
# if already index-labelled, just continue
if all(v in linear for v in range(len(bqm))):
return f(sampler, bqm, **kwargs)
if has_samples_input:
err_str = ("Argument `{}` must be provided if any of the"
" samples arguments {} are provided and the "
"bqm is not already index-labelled".format(
var_labels_arg_name,
samples_arg_names))
raise ValueError(err_str)
try:
inverse_mapping = dict(enumerate(sorted(linear)))
except TypeError:
# in python3 unlike types cannot be sorted
inverse_mapping = dict(enumerate(linear))
var_labels = {v: i for i, v in inverse_mapping.items()}
else:
inverse_mapping = {i: v for v, i in var_labels.items()}
response = f(sampler,
bqm.relabel_variables(var_labels, inplace=False),
**kwargs)
# unapply the relabeling
return response.relabel_variables(inverse_mapping, inplace=True)
return _index_label
return index_label_decorator
def bqm_structured(f):
"""Decorator to raise an error if the given BQM does not match the sampler's
structure.
Designed to be applied to :meth:`.Sampler.sample`. Expects the wrapped
function or method to accept a :obj:`.BinaryQuadraticModel` as the second
input and for the :class:`.Sampler` to also be :class:`.Structured`.
"""
@wraps(f)
def new_f(sampler, bqm, **kwargs):
try:
structure = sampler.structure
adjacency = structure.adjacency
except AttributeError:
if isinstance(sampler, Structured):
raise RuntimeError("something is wrong with the structured sampler")
else:
raise TypeError("sampler does not have a structure property")
if not all(v in adjacency for v in bqm.linear):
# todo: better error message
raise BinaryQuadraticModelStructureError("given bqm does not match the sampler's structure")
if not all(u in adjacency[v] for u, v in bqm.quadratic):
# todo: better error message
raise BinaryQuadraticModelStructureError("given bqm does not match the sampler's structure")
return f(sampler, bqm, **kwargs)
return new_f
def vartype_argument(*arg_names):
"""Ensures the wrapped function receives valid vartype argument(s).
One or more argument names can be specified as a list of string arguments.
Args:
*arg_names (list[str], argument names, optional, default='vartype'):
Names of the constrained arguments in decorated function.
Returns:
Function decorator.
Examples:
>>> from dimod.decorators import vartype_argument
>>> @vartype_argument()
... def f(x, vartype):
... print(vartype)
...
>>> f(1, 'SPIN')
Vartype.SPIN
>>> f(1, vartype='SPIN')
Vartype.SPIN
>>> @vartype_argument('y')
... def f(x, y):
... print(y)
...
>>> f(1, 'SPIN')
Vartype.SPIN
>>> f(1, y='SPIN')
Vartype.SPIN
>>> @vartype_argument('z')
... def f(x, **kwargs):
... print(kwargs['z'])
...
>>> f(1, z='SPIN')
Vartype.SPIN
Note:
The decorated function can explicitly list (name) vartype arguments
constrained by :func:`vartype_argument` or it can use a keyword
arguments `dict`.
See also:
:func:`~dimod.as_vartype`
"""
# by default, constrain only one argument, the 'vartype`
if not arg_names:
arg_names = ['vartype']
def _vartype_arg(f):
argspec = getargspec(f)
def _enforce_single_arg(name, args, kwargs):
try:
vartype = kwargs[name]
except KeyError:
raise TypeError('vartype argument missing')
kwargs[name] = as_vartype(vartype)
@wraps(f)
def new_f(*args, **kwargs):
# bound actual f arguments (including defaults) to f argument names
# (note: if call arguments don't match actual function signature,
# we'll fail here with the standard `TypeError`)
bound_args = inspect.getcallargs(f, *args, **kwargs)
# `getcallargs` doesn't merge additional positional/keyword arguments,
# so do it manually
final_args = list(bound_args.pop(argspec.varargs, ()))
final_kwargs = bound_args.pop(argspec.keywords, {})
final_kwargs.update(bound_args)
for name in arg_names:
_enforce_single_arg(name, final_args, final_kwargs)
return f(*final_args, **final_kwargs)
return new_f
return _vartype_arg
def _is_integer(a):
if isinstance(a, int):
return True
if hasattr(a, "is_integer") and a.is_integer():
return True
return False
# we would like to do graph_argument(*arg_names, allow_None=False), but python2...
def graph_argument(*arg_names, **options):
"""Decorator to coerce given graph arguments into a consistent form.
The wrapped function accepts either an integer n, interpreted as a
complete graph of size n, a nodes/edges pair, a sequence of edges, or a
NetworkX graph. The argument is converted into a nodes/edges 2-tuple.
Args:
*arg_names (optional, default='G'):
Names of the arguments for input graphs.
allow_None (bool, optional, default=False):
If True, None can be passed through as an input graph.
"""
# by default, constrain only one argument, the 'G`
if not arg_names:
arg_names = ['G']
# we only allow one option allow_None
allow_None = options.pop("allow_None", False)
if options:
# to keep it consistent with python3
# behaviour like graph_argument(*arg_names, allow_None=False)
key, _ = options.popitem()
msg = "graph_argument() for an unexpected keyword argument '{}'".format(key)
raise TypeError(msg)
def _graph_arg(f):
argspec = getargspec(f)
def _enforce_single_arg(name, args, kwargs):
try:
G = kwargs[name]
except KeyError:
raise TypeError('Graph argument missing')
if hasattr(G, 'edges') and hasattr(G, 'nodes'):
# networkx or perhaps a named tuple
kwargs[name] = (list(G.nodes), list(G.edges))
elif _is_integer(G):
# an integer, cast to a complete graph
kwargs[name] = (list(range(G)), list(itertools.combinations(range(G), 2)))
elif isinstance(G, abc.Sequence):
if len(G) != 2:
# edgelist
kwargs[name] = (list(set().union(*G)), G)
else: # len(G) == 2
# need to determine if this is a nodes/edges pair or an
# edgelist
if isinstance(G[0], int):
# nodes are an int so definitely nodelist
kwargs[name] = (list(range(G[0])), G[1])
elif all(isinstance(e, abc.Sequence) and len(e) == 2
for e in G):
# ok, everything is a sequence and everything has length
# 2, so probably an edgelist. But we're dealing with
# only four objects so might as well check to be sure
nodes, edges = G
if all(isinstance(e, abc.Sequence) and len(e) == 2 and
(v in nodes for v in e) for e in edges):
pass # nodes, edges
else:
# edgelist
kwargs[name] = (list(set().union(*G)), G)
else:
# nodes, edges
pass
elif allow_None and G is None:
# allow None to be passed through
kwargs[name] = G
else:
raise ValueError('Unexpected graph input form')
return
@wraps(f)
def new_f(*args, **kwargs):
# bound actual f arguments (including defaults) to f argument names
# (note: if call arguments don't match actual function signature,
# we'll fail here with the standard `TypeError`)
bound_args = inspect.getcallargs(f, *args, **kwargs)
# `getcallargs` doesn't merge additional positional/keyword arguments,
# so do it manually
final_args = list(bound_args.pop(argspec.varargs, ()))
final_kwargs = bound_args.pop(argspec.keywords, {})
final_kwargs.update(bound_args)
for name in arg_names:
_enforce_single_arg(name, final_args, final_kwargs)
return f(*final_args, **final_kwargs)
return new_f
return _graph_arg
def lockable_method(f):
"""Method decorator for objects with an is_writeable flag.
If wrapped method is called, and the associated object's `is_writeable`
attribute is set to True, a :exc:`.exceptions.WriteableError` is raised.
"""
@wraps(f)
def _check_writeable(obj, *args, **kwds):
if not obj.is_writeable:
raise WriteableError
return f(obj, *args, **kwds)
return _check_writeable
_NOT_FOUND = object()
def forwarding_method(func):
"""Improve the performance of a forwarding method by avoiding an attribute
lookup.
The decorated method should return the function that it is forwarding to.
Subsequent calls will be made directly to that function.
Example:
>>> import typing
>>> import timeit
>>> from dimod.decorators import forwarding_method
...
>>> class Inner:
... def func(self, a: int, b: int = 0) -> int:
... "Inner.func docsting."
... return a + b
...
>>> class Outer:
... def __init__(self):
... self.inner = Inner()
...
... def func(self, a: int, b: int = 0) -> int:
... "Outer.func docsting."
... return self.inner.func(a, b=b)
...
... @forwarding_method
... def fwd_func(self, a: int, b: int = 0) -> int:
... "Outer.fwd_func docsting."
... return self.inner.func
...
>>> obj = Outer()
>>> obj.func(2, 3)
5
>>> obj.fwd_func(1, 3)
4
>>> timeit.timeit(lambda: obj.func(10, 5)) # doctest:+SKIP
0.275462614998105
>>> timeit.timeit(lambda: obj.fwd_func(10, 5)) # doctest:+SKIP
0.16692455199881806
>>> Outer.fwd_func.__doc__
'Outer.fwd_func docsting.'
>>> obj.fwd_func.__doc__
'Inner.func docsting.'
"""
@wraps(func)
def wrapper(obj, *args, **kwargs):
name = func.__name__
try:
cache = obj.__dict__
except AttributeError:
raise TypeError(
f"No '__dict__' attribute on {type(obj).__name__!r}") from None
method = cache.get(name, _NOT_FOUND)
if method is _NOT_FOUND:
# the args and kwargs are ignored but they are required to not
# raise an error
method = func(obj, *args, **kwargs)
try:
cache[name] = method
except TypeError:
raise TypeError(
f"the '__dict__' attribute of {type(obj).__name__!r} "
"instance does not support item assignment.") from None
return method(*args, **kwargs)
return wrapper
| apache-2.0 | -5,030,165,640,508,756,000 | 33.304854 | 104 | 0.568744 | false |
ZellMechanik-Dresden/dclab | dclab/rtdc_dataset/fmt_tdms/event_contour.py | 1 | 8965 | """Class for efficiently handling contour data"""
import sys
import warnings
import numpy as np
from ...features import inert_ratio
from .exc import ContourIndexingError
class ContourVerificationWarning(UserWarning):
pass
class ContourColumn(object):
def __init__(self, rtdc_dataset):
"""A wrapper for ContourData that takes into account event offsets
Event offsets appear when the first event that is recorded in the
tdms files does not have a corresponding contour in the contour
text file.
"""
fname = self.find_contour_file(rtdc_dataset)
if fname is None:
self.identifier = None
else:
if sys.version_info[0] == 2:
self.identifier = str(fname).decode("utf-8")
else:
self.identifier = str(fname)
if fname is not None:
self._contour_data = ContourData(fname)
self._initialized = False
else:
self._contour_data = []
# prevent `determine_offset` to be called
self._initialized = True
self.frame = rtdc_dataset["frame"]
# if they are set, these features are used for verifying the contour
self.pxfeat = {}
if "area_msd" in rtdc_dataset:
self.pxfeat["area_msd"] = rtdc_dataset["area_msd"]
if "pixel size" in rtdc_dataset.config["imaging"]:
px_size = rtdc_dataset.config["imaging"]["pixel size"]
for key in ["pos_x", "pos_y", "size_x", "size_y"]:
if key not in rtdc_dataset.features_innate:
# abort
self.pxfeat.clear()
break
self.pxfeat[key] = rtdc_dataset[key] / px_size
if "image" in rtdc_dataset:
self.shape = rtdc_dataset["image"].shape
else:
self.shape = None
self.event_offset = 0
def __getitem__(self, idx):
if not self._initialized:
self.determine_offset()
idnew = idx-self.event_offset
cdata = None
if idnew < 0:
# No contour data
cdata = np.zeros((2, 2), dtype=int)
else:
# Assign contour based on stored frame index
frame_ist = self.frame[idx]
# Do not only check the exact frame, but +/- 2 events around it
for idn in [idnew, idnew-1, idnew+1, idnew-2, idnew+2]:
# check frame
try:
frame_soll = self._contour_data.get_frame(idn)
except IndexError:
# reached end of file
continue
if np.allclose(frame_soll, frame_ist, rtol=0):
cdata = self._contour_data[idn]
break
if cdata is None and self.shape and self.pxfeat: #
# The frame is wrong, but the contour might be correct.
# We check that by verifying several features.
cdata2 = self._contour_data[idnew]
cont = np.zeros((self.shape[1], self.shape[0]))
cont[cdata2[:, 0], cdata2[:, 1]] = True
mm = inert_ratio.cont_moments_cv(cdata2)
if (np.allclose(self.pxfeat["size_x"][idx],
np.ptp(cdata2[:, 0]) + 1,
rtol=0, atol=1e-5)
and np.allclose(self.pxfeat["size_y"][idx],
np.ptp(cdata2[:, 1]) + 1,
rtol=0, atol=1e-5)
and np.allclose(mm["m00"],
self.pxfeat["area_msd"][idx],
rtol=0, atol=1e-5)
# atol=6 for positions, because the original positions
# are computed from the convex contour, which would be
# computed using cv2.convexHull(cdata2).
and np.allclose(self.pxfeat["pos_x"][idx],
mm["m10"]/mm["m00"],
rtol=0, atol=6)
and np.allclose(self.pxfeat["pos_y"][idx],
mm["m01"]/mm["m00"],
rtol=0, atol=6)):
cdata = cdata2
if cdata is None:
# No idea what went wrong, but we make the beste guess and
# issue a warning.
cdata = self._contour_data[idnew]
frame_c = self._contour_data.get_frame(idnew)
warnings.warn(
"Couldn't verify contour {} in {}".format(idx, self.identifier)
+ " (frame index {})!".format(frame_c),
ContourVerificationWarning
)
return cdata
def __len__(self):
length = len(self._contour_data)
if length:
if not self._initialized:
self.determine_offset()
length += self.event_offset
return length
def determine_offset(self):
"""Determines the offset of the contours w.r.t. other data columns
Notes
-----
- the "frame" column of `rtdc_dataset` is compared to
the first contour in the contour text file to determine an
offset by one event
- modifies the property `event_offset` and sets `_initialized`
to `True`
"""
# In case of regular RTDC, the first contour is
# missing. In case of fRTDC, it is there, so we
# might have an offset. We find out if the first
# contour frame is missing by comparing it to
# the "frame" column of the rtdc dataset.
fref = self._contour_data.get_frame(0)
f0 = self.frame[0]
f1 = self.frame[1]
# Use allclose to avoid float/integer comparison problems
if np.allclose(fref, f0, rtol=0):
self.event_offset = 0
elif np.allclose(fref, f1, rtol=0):
self.event_offset = 1
else:
msg = "Contour data has unknown offset (frame {})!".format(fref)
raise ContourIndexingError(msg)
self._initialized = True
@staticmethod
def find_contour_file(rtdc_dataset):
"""Tries to find a contour file that belongs to an RTDC dataset
Returns None if no contour file is found.
"""
cont_id = rtdc_dataset.path.stem
cands = [c.name for c in rtdc_dataset._fdir.glob("*_contours.txt")]
cands = sorted(cands)
# Search for perfect matches, e.g.
# - M1_0.240000ul_s.tdms
# - M1_0.240000ul_s_contours.txt
for c1 in cands:
if c1.startswith(cont_id):
cfile = rtdc_dataset._fdir / c1
break
else:
# Search for M* matches with most overlap, e.g.
# - M1_0.240000ul_s.tdms
# - M1_contours.txt
for c2 in cands:
if (c2.split("_")[0] == rtdc_dataset._mid):
# Do not confuse with M10_contours.txt
cfile = rtdc_dataset._fdir / c2
break
else:
cfile = None
return cfile
class ContourData(object):
def __init__(self, fname):
"""Access an MX_contour.txt as a dictionary
Initialize this class with a *_contour.txt file.
The individual contours can be accessed like a
list (enumerated from 0 on).
"""
self._initialized = False
self.filename = fname
def __getitem__(self, idx):
cont = self.data[idx]
cont = cont.strip()
cont = cont.replace(")", "")
cont = cont.replace("(", "")
cont = cont.replace("(", "")
cont = cont.replace("\n", ",")
cont = cont.replace(" ", " ")
cont = cont.replace(" ", " ")
if len(cont) > 1:
_frame, cont = cont.split(" ", 1)
cont = cont.strip(" ,")
data = np.fromstring(cont, sep=",", dtype=np.uint16).reshape(-1, 2)
return data
def __len__(self):
return len(self.data)
def _index_file(self):
"""Open and index the contour file
This function populates the internal list of contours
as strings which will be available as `self.data`.
"""
with self.filename.open() as fd:
data = fd.read()
ident = "Contour in frame"
self._data = data.split(ident)[1:]
self._initialized = True
@property
def data(self):
"""Access self.data
If `self._index_file` has not been computed before, this
property will cause it to do so.
"""
if not self._initialized:
self._index_file()
return self._data
def get_frame(self, idx):
"""Return the frame number of a contour"""
cont = self.data[idx]
# previously was split using " ", but "(" is more general
frame = int(cont.strip().split("(", 1)[0])
return frame
| gpl-2.0 | -762,820,162,700,823,400 | 35.443089 | 79 | 0.521695 | false |
RayRuizhiLiao/ITK_4D | Utilities/Maintenance/ArchiveTestingDataOnGirder.py | 1 | 8696 | #!/usr/bin/env python
import argparse
import girder_client
from girder_client import GirderClient
import os
import fnmatch
import json
import mimetypes
from distutils.version import StrictVersion
if StrictVersion(girder_client.__version__) < StrictVersion("2.0.0"):
raise Exception("Girder 2.0.0 or newer is required")
class GirderExternalDataCli(GirderClient):
"""
A command line Python client for interacting with a Girder instance's
RESTful api, specifically for performing uploads into a Girder instance.
"""
def __init__(self, apiKey, objectStore):
"""initialization function to create a GirderCli instance, will attempt
to authenticate with the designated Girder instance.
"""
GirderClient.__init__(self,
apiUrl='https://data.kitware.com/api/v1')
self.objectStore = objectStore
self.authenticate(apiKey=apiKey)
def content_link_upload(self, localFolder, parentId, ext='.sha512',
parentType='folder', blacklist=['.git', '.ExternalData'],
reuseExisting=True, dryRun=False):
"""Upload objects corresponding to CMake ExternalData content links.
This will recursively walk down the tree and find content links ending
with the specified extension and create a hierarchy on the server under
the parentId.
:param ext: Content link file extension.
:param parentId: id of the parent in Girder or resource path.
:param parentType: one of (collection,folder,user), default of folder.
:param reuseExisting: bool whether to accept an existing item of
the same name in the same location, or create a new one instead.
:param dryRun: Do not actually upload any content.
"""
parentId = self._checkResourcePath(parentId)
localFolder = os.path.normpath(localFolder)
for entry in os.listdir(localFolder):
if entry in blacklist:
print("Ignoring file %s as it is blacklisted" % entry)
continue
full_entry = os.path.join(localFolder, entry)
if os.path.islink(full_entry):
# os.walk skips symlinks by default
print("Skipping file %s as it is a symlink" % entry)
continue
if os.path.isdir(full_entry):
self._uploadFolderRecursive(
full_entry, parentId, parentType, ext,
reuseExisting=reuseExisting, blacklist=blacklist,
dryRun=dryRun)
def _uploadContentLinkItem(self, name, content_link, folder,
ext='.sha512', parentType='folder', dryRun=False,
reuseExisting=False):
"""Upload objects corresponding to CMake ExternalData content links.
This will upload the file with name, *name*, for the content link
located at *content_link* to the Girder folder, *folder*.
:param ext: Content link file extension.
:param parentType: one of (collection,folder,user), default of folder.
:param reuseExisting: bool whether to accept an existing item of
the same name in the same location, or create a new one instead.
:param dryRun: Do not actually upload any content.
"""
content_link = os.path.normpath(content_link)
if os.path.isfile(content_link) and \
fnmatch.fnmatch(content_link, '*' + ext):
if parentType != 'folder':
raise Exception(('Attempting to upload an item under a %s.'
% parentType) +
' Items can only be added to folders.')
else:
with open(content_link, 'r') as fp:
hash_value = fp.readline().strip()
self._uploadAsItem(
name,
folder['_id'],
os.path.join(self.objectStore, hash_value),
reuseExisting=reuseExisting,
dryRun=dryRun)
def _uploadFolderRecursive(self, localFolder, parentId, parentType,
ext='.sha512',
reuseExisting=False,
blacklist=[],
dryRun=False):
"""Function to recursively upload a folder and all of its descendants.
:param localFolder: full path to local folder to be uploaded
:param parentId: id of parent in Girder,
where new folder will be added
:param parentType: one of (collection, folder, user)
:param leaf_folders_as_items: whether leaf folders should have all
files uploaded as single items
:param reuseExisting: boolean indicating whether to accept an existing
item
of the same name in the same location, or create a new one instead
"""
localFolder = os.path.normpath(localFolder)
filename = os.path.basename(localFolder)
if filename in blacklist:
print("Ignoring file %s as it is blacklisted" % filename)
return
# Do not add the folder if it does not contain any content links
has_content_link = False
for root, dirnames, filenames in os.walk(localFolder):
for filename in fnmatch.filter(filenames, '*' + ext):
has_content_link = True
break
if not has_content_link:
return
print('Creating Folder from %s' % localFolder)
if dryRun:
# create a dryRun placeholder
folder = {'_id': 'dryRun'}
elif localFolder == '.':
folder = {'_id': parentId}
else:
folder = self.loadOrCreateFolder(
os.path.basename(localFolder), parentId, parentType)
for entry in sorted(os.listdir(localFolder)):
if entry in blacklist:
print("Ignoring file %s as it is blacklisted" % entry)
continue
full_entry = os.path.join(localFolder, entry)
if os.path.islink(full_entry):
# os.walk skips symlinks by default
print("Skipping file %s as it is a symlink" % entry)
continue
elif os.path.isdir(full_entry):
# At this point we should have an actual folder, so can
# pass that as the parentType
self._uploadFolderRecursive(
full_entry, folder['_id'], 'folder',
ext, reuseExisting=reuseExisting,
blacklist=blacklist, dryRun=dryRun)
else:
name = os.path.splitext(entry)[0]
self._uploadContentLinkItem(name, full_entry, folder,
ext=ext, parentType=parentType, dryRun=dryRun,
reuseExisting=reuseExisting)
if not dryRun:
for callback in self._folderUploadCallbacks:
callback(folder, localFolder)
def main():
parser = argparse.ArgumentParser(
description='Upload CMake ExternalData content links to Girder')
parser.add_argument(
'--dry-run', action='store_true',
help='will not write anything to Girder, only report on what would '
'happen')
parser.add_argument('--api-key', required=True, default=None)
parser.add_argument('--local-folder', required=False,
default=os.path.join(os.path.dirname(__file__), '..',
'..'),
help='path to local target folder')
# Default is ITK/ITKTestingData/Nightly
parser.add_argument('--parent-id', required=False,
default='57b673388d777f10f269651c',
help='id of Girder parent target')
parser.add_argument('--object-store', required=True,
help='Path to the CMake ExternalData object store')
parser.add_argument(
'--no-reuse', action='store_true',
help='Don\'t reuse existing items of same name at same location')
args = parser.parse_args()
reuseExisting = not args.no_reuse
gc = GirderExternalDataCli(args.api_key,
objectStore=os.path.join(args.object_store, 'SHA512'))
gc.content_link_upload(args.local_folder, args.parent_id,
reuseExisting=reuseExisting, dryRun=args.dry_run)
if __name__ == '__main__':
main()
| apache-2.0 | -2,546,963,650,112,851,000 | 43.528796 | 79 | 0.576587 | false |
d2emon/newspaperizer | src/people/migrations/0006_auto_20160913_0914.py | 1 | 3605 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-13 09:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('people', '0005_auto_20160913_0556'),
]
operations = [
migrations.CreateModel(
name='EyeColor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(max_length=255, verbose_name='color')),
],
),
migrations.CreateModel(
name='HairColor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(max_length=255, verbose_name='color')),
],
),
migrations.CreateModel(
name='Haircut',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('size', models.IntegerField(verbose_name='size')),
],
),
migrations.CreateModel(
name='HairParting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parting', models.CharField(max_length=255, verbose_name='parting')),
],
),
migrations.RemoveField(
model_name='hair',
name='hairdress',
),
migrations.RemoveField(
model_name='hair',
name='length',
),
migrations.AlterField(
model_name='cloth',
name='person',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='people.Person'),
),
migrations.AlterField(
model_name='face',
name='eye',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='people.EyeColor', verbose_name='eye'),
),
migrations.AlterField(
model_name='face',
name='person',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='people.Person'),
),
migrations.AlterField(
model_name='hair',
name='color',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='people.HairColor', verbose_name='color'),
),
migrations.AlterField(
model_name='hair',
name='curling',
field=models.BooleanField(verbose_name='curling'),
),
migrations.AlterField(
model_name='hair',
name='face',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='people.Face'),
),
migrations.AlterField(
model_name='hair',
name='parting',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='people.HairParting', verbose_name='parting'),
),
migrations.AddField(
model_name='hair',
name='haircut',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='people.Haircut', verbose_name='haircut'),
),
]
| gpl-3.0 | 7,657,601,287,248,645,000 | 37.763441 | 153 | 0.563107 | false |
voronovim/mikrotik-api-tools | api_tools/ini_parser.py | 1 | 1045 | import configparser
class Config(object):
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('../config.ini')
def get_general(self):
general = {'debug': self.config.get('general', 'debug')}
return general
def get_ftp(self):
ftp = {'host': self.config.get('ftp', 'host'),
'port': self.config.get('ftp', 'port'),
'username': self.config.get('ftp', 'username'),
'password': self.config.get('ftp', 'password')}
return ftp
def get_devices(self):
for section in self.config.sections():
if section != 'general' and section != 'ftp':
device = {'host': self.config.get(section, 'host'),
'username': self.config.get(section, 'username'),
'password': self.config.get(section, 'password'),
'dst-path': self.config.get(section, 'path')}
# Return generator
yield device
| mit | -4,069,631,633,620,106,000 | 35.034483 | 75 | 0.522488 | false |
ez-p/madness | tournament/engine/algorithms/fiftyfifty.py | 1 | 1144 | """
Copyright 2016, Paul Powell, All rights reserved.
"""
import random
from matchup import Matchup
#
# Implement a matchup between two teams
# This is an example of implementing Matchup, which determines
# the winner when two teams play in the tournament.
#
class FiftyFifty(Matchup):
# teams: tuple of Team objects (Team1, Team2)
# madness: level of madness
def __init__(self, teams, madness):
super(FiftyFifty, self).__init__(teams)
self.teams = teams
self.madness = madness
def _play(self, madness):
# See if superclass wants to handle the matchup
status, winner, loser = Matchup.base_play(self.teams[0], self.teams[1])
if status:
# Superclass handled the matchup
self.winner = winner
self.loser = loser
return (self.winner, self.loser)
seq = [1,2]
val = random.choice(seq)
if val == 1:
self.winner = self.teams[0]
self.loser = self.teams[1]
else:
self.winner = self.teams[1]
self.loser = self.teams[0]
return (self.winner, self.loser)
| gpl-3.0 | 6,270,458,487,347,824,000 | 29.105263 | 79 | 0.604895 | false |
KarolBedkowski/photomagic | photomagick/lib/logging_setup.py | 1 | 1737 | #!/usr/bin/python2.4
# -*- coding: utf-8 -*-
"""
Logging setup.
2008-04-17 [k]: umieszczanie loga w tempie jeżeli jest frozen lub wybrany
katalog jest ro.
"""
__author__ = "Karol Będkowski"
__copyright__ = "Copyright (c) Karol Będkowski, 2006-2010"
__version__ = "2011-04-24"
__all__ = ['logging_setup']
import os.path
import logging
import tempfile
import time
from . import appconfig
def logging_setup(filename, debug=False):
log_fullpath = os.path.abspath(filename)
log_dir = os.path.dirname(log_fullpath)
log_dir_access = os.access(log_dir, os.W_OK)
create_temp = False
if os.path.isabs(filename):
if not log_dir_access:
create_temp = True
else:
if appconfig.is_frozen() or not log_dir_access:
create_temp = True
if create_temp:
basename = os.path.basename(filename)
spfname = os.path.splitext(basename)
filename = spfname[0] + "_" + str(int(time.time())) + spfname[1]
log_fullpath = os.path.join(tempfile.gettempdir(), filename)
print 'Logging to %s' % log_fullpath
if debug:
level_console = logging.DEBUG
level_file = logging.DEBUG
else:
level_console = logging.INFO
level_file = logging.ERROR
logging.basicConfig(level=level_file,
format='%(asctime)s %(levelname)-8s %(name)s - %(message)s',
filename=log_fullpath, filemode='w')
console = logging.StreamHandler()
console.setLevel(level_console)
console.setFormatter(logging.Formatter(
'%(levelname)-8s %(name)s - %(message)s'))
logging.getLogger('').addHandler(console)
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
logging.getLogger('sqlalchemy.orm.unitofwork').setLevel(logging.WARN)
log = logging.getLogger(__name__)
log.debug('logging_setup() finished')
# vim: ff=unix: encoding=utf8:
| gpl-2.0 | -6,666,117,779,018,797,000 | 23.771429 | 73 | 0.703576 | false |
repotvsupertuga/tvsupertuga.repository | script.module.openscrapers/lib/openscrapers/sources_openscrapers/en/freefmovies.py | 1 | 5118 | # -*- coding: UTF-8 -*-
# ..#######.########.#######.##....#..######..######.########....###...########.#######.########..######.
# .##.....#.##.....#.##......###...#.##....#.##....#.##.....#...##.##..##.....#.##......##.....#.##....##
# .##.....#.##.....#.##......####..#.##......##......##.....#..##...##.##.....#.##......##.....#.##......
# .##.....#.########.######..##.##.#..######.##......########.##.....#.########.######..########..######.
# .##.....#.##.......##......##..###.......#.##......##...##..########.##.......##......##...##........##
# .##.....#.##.......##......##...##.##....#.##....#.##....##.##.....#.##.......##......##....##.##....##
# ..#######.##.......#######.##....#..######..######.##.....#.##.....#.##.......#######.##.....#..######.
'''
freefmovies scraper for Exodus forks.
Nov 9 2018 - Checked
Updated and refactored by someone.
Originally created by others.
'''
import json
import re
import urllib
import urlparse
from openscrapers.modules import cfscrape
from openscrapers.modules import cleantitle
from openscrapers.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['freefmovies.net']
self.base_link = 'http://freefmovies.net'
self.search_link = '/watch/%s-%s-online-fmovies.html'
self.scraper = cfscrape.create_scraper()
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, year)))
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
aliases.append({'country': 'uk', 'title': tvshowtitle})
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season)
url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
r = self.scraper.get(url).content
r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
if i.content == 'Episode %s' % episode:
url = i.attrs['href']
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
r = self.scraper.get(url).content
quality = re.findall(">(\w+)<\/p", r)
if quality[0] == "HD":
quality = "720p"
else:
quality = "SD"
r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
for i in r[0]:
url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'],
'data-name': i.attrs['data-name']}
url = urllib.urlencode(url)
sources.append({'source': i.content, 'quality': quality, 'language': 'en', 'url': url, 'direct': False,
'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
try:
urldata = urlparse.parse_qs(url)
urldata = dict((i, urldata[i][0]) for i in urldata)
post = {'ipplugins': 1, 'ip_film': urldata['data-film'], 'ip_server': urldata['data-server'],
'ip_name': urldata['data-name'], 'fix': "0"}
p1 = self.scraper.request('http://freefmovies.net/ip.file/swf/plugins/ipplugins.php', post=post,
referer=urldata['url'], XHR=True)
p1 = json.loads(p1)
p2 = self.scraper.request('http://freefmovies.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=0' % (
p1['s'], urldata['data-server']))
p2 = json.loads(p2)
p3 = self.scraper.request('http://freefmovies.net/ip.file/swf/ipplayer/api.php?hash=%s' % (p2['hash']))
p3 = json.loads(p3)
n = p3['status']
if n == False:
p2 = self.scraper.request('http://freefmovies.net/ip.file/swf/ipplayer/ipplayer.php?u=%s&s=%s&n=1' % (
p1['s'], urldata['data-server']))
p2 = json.loads(p2)
url = "https:%s" % p2["data"].replace("\/", "/")
return url
except:
return
| gpl-2.0 | 7,994,238,289,006,390,000 | 42.74359 | 120 | 0.446854 | false |
fendouai/FaceRank | find_faces_in_picture.py | 1 | 1236 | from PIL import Image
import face_recognition
import os
print("h")
def find_and_save_face(web_file,face_file):
# Load the jpg file into a numpy array
image = face_recognition.load_image_file(web_file)
print(image.dtype)
# Find all the faces in the image
face_locations = face_recognition.face_locations(image)
print("I found {} face(s) in this photograph.".format(len(face_locations)))
for face_location in face_locations:
# Print the location of each face in this image
top, right, bottom, left = face_location
print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
# You can access the actual face itself like this:
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
pil_image.save(face_file)
print("h")
list = os.listdir("web_image/")
print(list)
for image in list:
id_tag = image.find(".")
name=image[0:id_tag]
print(name)
web_file = "./web_image/" +image
face_file="./face_image/"+name+".jpg"
im=Image.open("./web_image/"+image)
try:
find_and_save_face(web_file, face_file)
except:
print("fail")
| gpl-3.0 | -5,268,368,286,697,568,000 | 29.9 | 126 | 0.642395 | false |
jimsize/PySolFC | pysollib/games/numerica.py | 1 | 33879 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# imports
import time
# PySol imports
from pysollib.mygettext import _
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.hint import DefaultHint, CautiousDefaultHint
from pysollib.mfxutil import kwdefault
from pysollib.util import ACE, ANY_RANK, ANY_SUIT, JACK, KING, NO_RANK, \
UNLIMITED_ACCEPTS, \
UNLIMITED_CARDS
from pysollib.stack import \
AC_RowStack, \
BasicRowStack, \
DealRowTalonStack, \
InitialDealTalonStack, \
OpenStack, \
OpenTalonStack, \
RK_FoundationStack, \
RK_RowStack, \
ReserveStack, \
SS_FoundationStack, \
Stack, \
StackWrapper, \
TalonStack, \
WasteStack, \
WasteTalonStack
# ************************************************************************
# *
# ************************************************************************
class Numerica_Hint(DefaultHint):
# FIXME: demo is clueless
# def _getDropCardScore(self, score, color, r, t, ncards):
# FIXME: implement this method
def _getMoveWasteScore(self, score, color, r, t, pile, rpile):
assert r in (self.game.s.waste, self.game.s.talon) and len(pile) == 1
score = self._computeScore(r, t)
return score, color
def _computeScore(self, r, t):
score = 30000
if len(t.cards) == 0:
score = score - (KING - r.cards[0].rank) * 1000
elif t.cards[-1].rank < r.cards[0].rank:
# FIXME: add intelligence here
score = 10000 + t.cards[-1].rank - len(t.cards)
elif t.cards[-1].rank == r.cards[0].rank:
score = 20000
else:
score = score - (t.cards[-1].rank - r.cards[0].rank) * 1000
return score
# ************************************************************************
# *
# ************************************************************************
class Numerica_RowStack(BasicRowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return False
# this stack accepts any one card from the Waste pile
return from_stack is self.game.s.waste and len(cards) == 1
getBottomImage = Stack._getReserveBottomImage
def getHelp(self):
# return _('Tableau. Accepts any one card from the Waste.')
return _('Tableau. Build regardless of rank and suit.')
# ************************************************************************
# * Numerica
# ************************************************************************
class Numerica(Game):
Hint_Class = Numerica_Hint
Foundation_Class = StackWrapper(RK_FoundationStack, suit=ANY_SUIT)
RowStack_Class = StackWrapper(Numerica_RowStack, max_accept=1)
#
# game layout
#
def createGame(self, rows=4, reserve=False, max_rounds=1,
waste_max_cards=1):
# create layout
l, s = Layout(self), self.s
decks = self.gameinfo.decks
foundations = 4*decks
# set window
# (piles up to 20 cards are playable in default window size)
h = max(2*l.YS, 20*l.YOFFSET)
max_rows = max(rows, foundations)
self.setSize(l.XM+(1.5+max_rows)*l.XS+l.XM, l.YM + l.YS + h)
# create stacks
x0 = l.XM + l.XS * 3 // 2
if decks == 1:
x = x0 + (rows-4)*l.XS//2
else:
x = x0
y = l.YM
for i in range(foundations):
s.foundations.append(self.Foundation_Class(x, y, self, suit=i))
x = x + l.XS
x, y = x0, l.YM + l.YS
for i in range(rows):
s.rows.append(self.RowStack_Class(x, y, self))
x = x + l.XS
self.setRegion(s.rows, (x0-l.XS//2, y-l.CH//2, 999999, 999999))
x, y = l.XM, l.YM+l.YS+l.YS//2*int(reserve)
s.talon = WasteTalonStack(x, y, self, max_rounds=max_rounds)
if reserve or waste_max_cards > 1:
l.createText(s.talon, 'ne')
else:
l.createText(s.talon, 'n')
y = y + l.YS
s.waste = WasteStack(x, y, self, max_cards=waste_max_cards)
if waste_max_cards > 1:
l.createText(s.waste, 'ne')
if reserve:
s.reserves.append(self.ReserveStack_Class(l.XM, l.YM, self))
# define stack-groups
l.defaultStackGroups()
return l
#
# game overrides
#
def startGame(self):
self.startDealSample()
self.s.talon.dealCards() # deal first card to WasteStack
shallHighlightMatch = Game._shallHighlightMatch_SS
def getHighlightPilesStacks(self):
return ()
class Numerica2Decks(Numerica):
def createGame(self):
Numerica.createGame(self, rows=6)
# ************************************************************************
# * Lady Betty
# * Last Chance
# ************************************************************************
class LadyBetty(Numerica):
Foundation_Class = SS_FoundationStack
def createGame(self):
Numerica.createGame(self, rows=6)
class LastChance_RowStack(Numerica_RowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return False
if not self.cards:
return True
return from_stack is self.game.s.waste and len(cards) == 1
class LastChance_Reserve(OpenStack):
def canFlipCard(self):
return (len(self.game.s.talon.cards) == 0 and
len(self.game.s.waste.cards) == 0 and
self.cards and not self.cards[0].face_up)
class LastChance(LadyBetty):
RowStack_Class = StackWrapper(LastChance_RowStack, max_accept=1)
ReserveStack_Class = LastChance_Reserve
def createGame(self):
Numerica.createGame(self, rows=7, reserve=True)
def startGame(self):
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.dealRow(rows=self.s.reserves, flip=False)
self.s.talon.dealCards()
# ************************************************************************
# * Puss in the Corner
# ************************************************************************
class PussInTheCorner_Talon(OpenTalonStack):
rightclickHandler = OpenStack.rightclickHandler
doubleclickHandler = OpenStack.doubleclickHandler
def canDealCards(self):
if self.round != self.max_rounds:
return True
return False
def clickHandler(self, event):
if self.cards:
return OpenStack.clickHandler(self, event)
else:
return TalonStack.clickHandler(self, event)
def dealCards(self, sound=False):
ncards = 0
old_state = self.game.enterState(self.game.S_DEAL)
if not self.cards and self.round != self.max_rounds:
self.game.nextRoundMove(self)
self.game.startDealSample()
for r in self.game.s.rows:
while r.cards:
self.game.moveMove(1, r, self, frames=4)
self.game.flipMove(self)
ncards += 1
self.fillStack()
self.game.stopSamples()
self.game.leaveState(old_state)
return ncards
class PussInTheCorner_Foundation(SS_FoundationStack):
def __init__(self, x, y, game, **cap):
kwdefault(cap, base_suit=ANY_SUIT)
SS_FoundationStack.__init__(self, x, y, game, ANY_SUIT, **cap)
def acceptsCards(self, from_stack, cards):
if not SS_FoundationStack.acceptsCards(self, from_stack, cards):
return False
if self.cards:
# check the color
if cards[0].color != self.cards[-1].color:
return False
return True
def getHelp(self):
return _('Foundation. Build up by color.')
class PussInTheCorner_RowStack(BasicRowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return False
# this stack accepts any one card from the Talon
return from_stack is self.game.s.talon and len(cards) == 1
getBottomImage = Stack._getReserveBottomImage
def getHelp(self):
# return _('Tableau. Accepts any one card from the Waste.')
return _('Tableau. Build regardless of rank and suit.')
class PussInTheCorner(Numerica):
def createGame(self, rows=4):
l, s = Layout(self), self.s
self.setSize(l.XM+5*l.XS, l.YM+4*l.YS)
for x, y in ((l.XM, l.YM),
(l.XM+4*l.XS, l.YM),
(l.XM, l.YM+3*l.YS),
(l.XM+4*l.XS, l.YM+3*l.YS),
):
stack = PussInTheCorner_RowStack(x, y, self,
max_accept=1, max_move=1)
stack.CARD_XOFFSET, stack.CARD_YOFFSET = 0, 0
s.rows.append(stack)
for x, y in ((l.XM+1.5*l.XS, l.YM + l.YS),
(l.XM+1.5*l.XS, l.YM + 2*l.YS),
(l.XM+2.5*l.XS, l.YM + l.YS),
(l.XM+2.5*l.XS, l.YM + 2*l.YS),
):
s.foundations.append(PussInTheCorner_Foundation(x, y, self,
max_move=0))
x, y = l.XM + 2*l.XS, l.YM
s.waste = s.talon = PussInTheCorner_Talon(x, y, self, max_rounds=2)
l.createText(s.talon, 'se')
l.createRoundText(self.s.talon, 'ne')
# define stack-groups
l.defaultStackGroups()
def _shuffleHook(self, cards):
return self._shuffleHookMoveToTop(
cards, lambda c: (c.rank == ACE, c.suit))
def startGame(self):
self.startDealSample()
self.s.talon.dealRow(rows=self.s.foundations)
self.s.talon.fillStack()
def _autoDeal(self, sound=True):
return 0
# ************************************************************************
# * Frog
# * Fly
# * Fanny
# ************************************************************************
class Frog(Game):
Hint_Class = Numerica_Hint
# Foundation_Class = SS_FoundationStack
Foundation_Class = RK_FoundationStack
def createGame(self):
# create layout
l, s = Layout(self), self.s
# set window
self.setSize(l.XM + 8*l.XS, l.YM + 2*l.YS+16*l.YOFFSET)
# create stacks
x, y, = l.XM, l.YM
for i in range(8):
if self.Foundation_Class is RK_FoundationStack:
suit = ANY_SUIT
else:
suit = int(i//2)
s.foundations.append(self.Foundation_Class(x, y, self,
suit=suit, max_move=0))
x += l.XS
x, y = l.XM, l.YM+l.YS
stack = OpenStack(x, y, self, max_accept=0)
stack.CARD_XOFFSET, stack.CARD_YOFFSET = 0, l.YOFFSET
s.reserves.append(stack)
x += l.XS
s.talon = WasteTalonStack(x, y, self, max_rounds=1)
l.createText(s.talon, "s")
x += l.XS
s.waste = WasteStack(x, y, self, max_cards=1)
x += l.XS
for i in range(5):
stack = Numerica_RowStack(x, y, self, max_accept=UNLIMITED_ACCEPTS)
# stack.CARD_XOFFSET, stack.CARD_YOFFSET = 0, l.YOFFSET
s.rows.append(stack)
x = x + l.XS
# define stack-groups
l.defaultStackGroups()
def startGame(self):
self.startDealSample()
n = 0
f = 0
while True:
c = self.s.talon.cards[-1]
if c.rank == ACE:
r = self.s.foundations[f]
f += 1
# r = self.s.foundations[c.suit*2]
else:
r = self.s.reserves[0]
n += 1
self.s.talon.dealRow(rows=[r])
if n == 13:
break
self.s.talon.dealCards()
class Fly(Frog):
Foundation_Class = RK_FoundationStack
def _shuffleHook(self, cards):
return self._shuffleHookMoveToTop(
cards, lambda c: (c.rank == ACE, c.suit))
def startGame(self):
self.startDealSample()
self.s.talon.dealRow(rows=self.s.foundations)
for i in range(13):
self.s.talon.dealRow(self.s.reserves)
self.s.talon.dealCards()
class Fanny(Frog):
Foundation_Class = RK_FoundationStack
def startGame(self):
self.startDealSample()
for i in range(11):
self.s.talon.dealRow(self.s.reserves, flip=0)
self.s.talon.dealRow(self.s.reserves)
self.s.talon.dealCards()
# ************************************************************************
# * Gnat
# ************************************************************************
class Gnat(Game):
Hint_Class = Numerica_Hint
def createGame(self):
# create layout
l, s = Layout(self), self.s
# set window
self.setSize(l.XM + 8*l.XS, l.YM + 2*l.YS+16*l.YOFFSET)
# create stacks
x, y = l.XM, l.YM
s.talon = WasteTalonStack(x, y, self, max_rounds=1)
l.createText(s.talon, "s")
x += l.XS
s.waste = WasteStack(x, y, self, max_cards=1)
x += l.XS
for i in range(4):
s.foundations.append(SS_FoundationStack(x, y, self, suit=i))
x += l.XS
x, y = l.XM+2*l.XS, l.YM+l.YS
for i in range(4):
s.rows.append(
Numerica_RowStack(x, y, self, max_accept=UNLIMITED_ACCEPTS))
x += l.XS
x = l.XM+6*l.XS
for i in range(2):
y = l.YM + l.YS//2
for j in range(3):
s.reserves.append(OpenStack(x, y, self, max_accept=0))
y += l.YS
x += l.XS
# define stack-groups
l.defaultStackGroups()
def _shuffleHook(self, cards):
return self._shuffleHookMoveToTop(
cards, lambda c: (c.rank == ACE, c.suit))
def startGame(self):
self.startDealSample()
self.s.talon.dealRow(rows=self.s.foundations)
self.s.talon.dealRow(rows=self.s.reserves)
self.s.talon.dealCards()
# ************************************************************************
# * Gloaming
# * Chamberlain
# ************************************************************************
class Gloaming_Hint(Numerica_Hint):
def computeHints(self):
self.step010(self.game.s.rows, self.game.s.rows)
self.step060(self.game.sg.reservestacks, self.game.s.rows)
# try if we should move a card from a ReserveStack to a RowStack
def step060(self, reservestacks, rows):
for r in reservestacks:
if not r.cards:
continue
for t in rows:
if t.cards:
score = self._computeScore(r, t)
self.addHint(score, 1, r, t)
else:
self.addHint(90000+r.cards[-1].rank, 1, r, t)
class Gloaming_RowStack(Numerica_RowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return False
# this stack accepts any one card from reserves
return from_stack in self.game.s.reserves
class Gloaming(Game):
Hint_Class = Gloaming_Hint
Foundation_Class = SS_FoundationStack
def createGame(self, reserves=3, rows=5):
# create layout
l, s = Layout(self), self.s
# set window
n = 52//reserves+1
w, h = l.XM + (reserves+rows+1)*l.XS, l.YM + 2*l.YS+n*l.YOFFSET
self.setSize(w, h)
# create stacks
x, y = l.XM+(reserves+rows+1-4)*l.XS//2, l.YM
for i in range(4):
if self.Foundation_Class is RK_FoundationStack:
suit = ANY_SUIT
else:
suit = i
s.foundations.append(self.Foundation_Class(x, y, self,
suit=suit, max_move=0))
x += l.XS
x, y = l.XM, l.YM+l.YS
for i in range(reserves):
stack = OpenStack(x, y, self, max_accept=0)
stack.CARD_XOFFSET, stack.CARD_YOFFSET = 0, l.YOFFSET
s.reserves.append(stack)
x += l.XS
x += l.XS
for i in range(rows):
s.rows.append(
Gloaming_RowStack(x, y, self, max_accept=UNLIMITED_ACCEPTS))
x += l.XS
s.talon = InitialDealTalonStack(w-l.XS, h-l.YS, self)
# default
l.defaultAll()
def startGame(self):
n = 52//len(self.s.reserves)+1
for i in range(n-3):
self.s.talon.dealRow(rows=self.s.reserves, frames=0)
self.startDealSample()
self.s.talon.dealRow(rows=self.s.reserves)
self.s.talon.dealRow(rows=self.s.reserves)
self.s.talon.dealRowAvail(rows=self.s.reserves)
class Chamberlain(Gloaming):
Foundation_Class = RK_FoundationStack
def createGame(self, reserves=3, rows=5):
Gloaming.createGame(self, reserves=4, rows=3)
# ************************************************************************
# * Toad
# ************************************************************************
class Toad_TalonStack(DealRowTalonStack):
def canDealCards(self):
if not DealRowTalonStack.canDealCards(self):
return False
for r in self.game.s.reserves:
if r.cards:
return False
return True
def dealCards(self, sound=False):
self.dealRow(rows=self.game.s.reserves, sound=sound)
class Toad(Game):
Hint_Class = Gloaming_Hint
def createGame(self, reserves=3, rows=5):
# create layout
l, s = Layout(self), self.s
# set window
w, h = l.XM+11*l.XS, l.YM+6*l.YS
self.setSize(w, h)
# create stacks
x, y = w-l.XS, h-l.YS
s.talon = Toad_TalonStack(x, y, self)
l.createText(s.talon, "n")
x, y = l.XM, l.YM
for i in range(8):
s.foundations.append(SS_FoundationStack(x, y, self, suit=i//2))
x += l.XS
x, y = l.XM+3*l.XS//2, l.YM+l.YS
for i in range(5):
s.rows.append(
Gloaming_RowStack(x, y, self, max_accept=UNLIMITED_ACCEPTS))
x += l.XS
y = l.YM+l.YS//2
for i in (3, 3, 3, 3, 1):
x = l.XM+8*l.XS
for j in range(i):
s.reserves.append(OpenStack(x, y, self, max_accept=0))
x += l.XS
y += l.YS
# define stack-groups
l.defaultStackGroups()
def startGame(self):
self.startDealSample()
self.s.talon.dealRow(rows=self.s.reserves)
# ************************************************************************
# * Shifting
# ************************************************************************
class Shifting_Hint(Numerica_Hint):
shallMovePile = DefaultHint._cautiousShallMovePile
class Shifting_RowStack(Numerica_RowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return False
if from_stack is self.game.s.waste:
return True
if not self.cards:
return cards[0].rank == KING
if (from_stack in self.game.s.rows and
self.cards[-1].rank-cards[0].rank == 1):
return True
return False
class Shifting(Numerica):
Hint_Class = Shifting_Hint
RowStack_Class = StackWrapper(Shifting_RowStack, max_accept=1)
# ************************************************************************
# * Strategerie
# ************************************************************************
class Strategerie_Talon(OpenTalonStack):
rightclickHandler = OpenStack.rightclickHandler
doubleclickHandler = OpenStack.doubleclickHandler
class Strategerie_RowStack(BasicRowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return False
if from_stack is self.game.s.talon or \
from_stack in self.game.s.reserves:
return True
return False
getBottomImage = Stack._getReserveBottomImage
def getHelp(self):
return _('Tableau. Build regardless of rank and suit.')
class Strategerie_ReserveStack(ReserveStack):
def acceptsCards(self, from_stack, cards):
if not ReserveStack.acceptsCards(self, from_stack, cards):
return False
if from_stack is self.game.s.talon:
return True
return False
class Strategerie(Game):
Hint_Class = Numerica_Hint
def createGame(self, **layout):
# create layout
l, s = Layout(self), self.s
l.freeCellLayout(rows=4, reserves=4, texts=1)
self.setSize(l.size[0], l.size[1])
# create stacks
s.talon = Strategerie_Talon(l.s.talon.x, l.s.talon.y, self)
for r in l.s.foundations:
s.foundations.append(RK_FoundationStack(r.x, r.y, self))
for r in l.s.rows:
s.rows.append(Strategerie_RowStack(r.x, r.y, self,
max_accept=UNLIMITED_ACCEPTS))
for r in l.s.reserves:
s.reserves.append(Strategerie_ReserveStack(r.x, r.y, self))
# default
l.defaultAll()
self.sg.dropstacks.append(s.talon)
def startGame(self):
self.startDealSample()
self.s.talon.fillStack()
# ************************************************************************
# * Assembly
# * Anno Domini
# ************************************************************************
class Assembly_RowStack(RK_RowStack):
def acceptsCards(self, from_stack, cards):
if not RK_RowStack.acceptsCards(self, from_stack, cards):
return False
if not self.cards:
return from_stack is self.game.s.waste
return True
class Assembly(Numerica):
Hint_Class = CautiousDefaultHint
Foundation_Class = StackWrapper(RK_FoundationStack, suit=ANY_SUIT)
RowStack_Class = StackWrapper(Assembly_RowStack, max_move=1)
def createGame(self):
Numerica.createGame(self, waste_max_cards=UNLIMITED_CARDS)
def startGame(self):
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.dealCards()
shallHighlightMatch = Game._shallHighlightMatch_RK
class AnnoDomini_Hint(DefaultHint):
def step030(self, foundations, rows, dropstacks):
pass
class AnnoDomini(Numerica):
Hint_Class = AnnoDomini_Hint
Foundation_Class = StackWrapper(SS_FoundationStack, suit=ANY_SUIT, mod=13)
RowStack_Class = StackWrapper(AC_RowStack, mod=13)
def createGame(self):
lay = Numerica.createGame(
self, max_rounds=3, waste_max_cards=UNLIMITED_CARDS)
year = str(time.localtime()[0])
i = 0
for s in self.s.foundations:
# setup base_rank & base_suit
s.cap.suit = i
s.cap.base_suit = i
d = int(year[i])
if d == 0:
d = JACK
s.cap.base_rank = d
i += 1
lay.createRoundText(self.s.talon, 'nn')
def startGame(self):
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.dealCards()
shallHighlightMatch = Game._shallHighlightMatch_ACW
# ************************************************************************
# * Circle Nine
# * Measure
# * Double Measure
# ************************************************************************
class CircleNine_RowStack(BasicRowStack):
def acceptsCards(self, from_stack, cards):
if not BasicRowStack.acceptsCards(self, from_stack, cards):
return False
return from_stack is self.game.s.talon
def getHelp(self):
return _('Tableau. Build regardless of rank and suit.')
class CircleNine(Game):
Hint_Class = Numerica_Hint
def createGame(self):
l, s = Layout(self), self.s
self.setSize(l.XM+7*l.XS, l.YM+3*l.YS)
for i, j in ((1, 0),
(2, 0),
(3, 0),
(4, 0),
(5, 1),
(3.5, 2),
(2.5, 2),
(1.5, 2),
(0, 1),
):
x, y = l.XM+(1+i)*l.XS, l.YM+j*l.YS
stack = CircleNine_RowStack(x, y, self, max_accept=1,
max_move=1, base_rank=NO_RANK)
s.rows.append(stack)
stack.CARD_YOFFSET = 0
x, y = l.XM+3.5*l.XS, l.YM+l.YS
stack = RK_FoundationStack(x, y, self, suit=ANY_SUIT, max_cards=52,
max_move=0, mod=13, base_rank=ANY_RANK)
s.foundations.append(stack)
l.createText(stack, 'ne')
x, y = l.XM, l.YM
s.talon = Strategerie_Talon(x, y, self)
l.createText(s.talon, 'ne')
l.defaultStackGroups()
self.sg.dropstacks.append(s.talon)
def startGame(self):
self.startDealSample()
self.s.talon.dealRow(rows=self.s.foundations)
self.s.talon.dealRow()
self.s.talon.fillStack()
def fillStack(self, stack):
if stack in self.s.rows and not stack.cards:
if self.s.talon.cards:
old_state = self.enterState(self.S_FILL)
self.s.talon.moveMove(1, stack)
self.leaveState(old_state)
class Measure(CircleNine):
Foundation_Class = StackWrapper(RK_FoundationStack, max_cards=52)
def createGame(self, rows=8):
l, s = Layout(self), self.s
self.setSize(l.XM+rows*l.XS, l.YM+2*l.YS+10*l.YOFFSET)
x, y = l.XM, l.YM
s.talon = Strategerie_Talon(x, y, self)
l.createText(s.talon, 'ne')
x = self.width-l.XS
stack = self.Foundation_Class(x, y, self, suit=ANY_SUIT, max_cards=52,
max_move=0, mod=13, base_rank=ANY_RANK)
s.foundations.append(stack)
l.createText(stack, 'nw')
x, y = l.XM, l.YM+l.YS
for i in range(rows):
s.rows.append(CircleNine_RowStack(x, y, self, max_accept=1,
max_move=1, base_rank=NO_RANK))
x += l.XS
l.defaultStackGroups()
self.sg.dropstacks.append(s.talon)
def startGame(self):
self.startDealSample()
self.s.talon.dealRow()
self.s.talon.fillStack()
class DoubleMeasure(Measure):
Foundation_Class = StackWrapper(RK_FoundationStack, max_cards=104)
def createGame(self, rows=8):
Measure.createGame(self, rows=10)
# ************************************************************************
# * Amphibian
# ************************************************************************
class Amphibian(Game):
Hint_Class = Gloaming_Hint
def createGame(self, rows=5, reserves=4, playcards=15):
# create layout
l, s = Layout(self), self.s
# set window
self.setSize(l.XM + 8 * l.XS, l.YM + 3*l.YS + playcards*l.YOFFSET)
# create stacks
x, y = l.XM, l.YM
for i in range(4):
for j in range(2):
s.foundations.append(RK_FoundationStack(x, y, self,
suit=ANY_SUIT))
x += l.XS
x, y = l.XM+(8-rows)*l.XS//2, l.YM + l.YS
for i in range(rows):
s.rows.append(Gloaming_RowStack(x, y, self, max_accept=1))
x += l.XS
x, y = l.XM+(8-reserves-1)*l.XS//2, self.height-l.YS
for i in range(reserves):
s.reserves.append(OpenStack(x, y, self, max_accept=0))
x += l.XS
s.talon = TalonStack(x, y, self)
l.createText(s.talon, 'n')
# define stack-groups
l.defaultStackGroups()
def startGame(self):
self.startDealSample()
self.s.talon.dealRow(rows=self.s.reserves)
def fillStack(self, stack):
if stack in self.s.reserves:
for stack in self.s.reserves:
if stack.cards:
return
old_state = self.enterState(self.S_FILL)
self.s.talon.dealRow(rows=self.s.reserves, sound=1)
self.leaveState(old_state)
# ************************************************************************
# * Aglet
# ************************************************************************
class Aglet(Game):
def createGame(self, playcards=20, rows=8, reserves=1):
decks = self.gameinfo.decks
l, s = Layout(self), self.s
self.setSize(l.XM+(reserves+0.5+rows)*l.XS,
l.YM+max(2*l.YS+7*l.YOFFSET, l.YS+playcards*l.YOFFSET))
x, y = self.width-l.XS, self.height-l.YS
s.talon = InitialDealTalonStack(x, y, self)
x, y = l.XM, l.YM
for i in range(reserves):
stack = ReserveStack(x, y, self, max_cards=UNLIMITED_CARDS)
stack.CARD_YOFFSET = l.YOFFSET
s.reserves.append(stack)
x += l.XS
x, y = l.XM + (reserves+0.5+(rows-decks*4)/2.0)*l.XS, l.YM
for i in range(4):
s.foundations.append(RK_FoundationStack(x, y, self, suit=ANY_SUIT))
x += l.XS
x, y = l.XM+(reserves+0.5)*l.XS, l.YM+l.YS
for i in range(rows):
s.rows.append(BasicRowStack(x, y, self, base_rank=NO_RANK))
x += l.XS
l.defaultStackGroups()
def _shuffleHook(self, cards):
# move Aces to top of the Talon (i.e. first cards to be dealt)
return self._shuffleHookMoveToTop(
cards, lambda c: (c.rank == ACE, c.suit))
def startGame(self):
self.s.talon.dealRow(rows=self.s.foundations, frames=0)
self._startDealNumRows(4)
self.s.talon.dealRowAvail()
self.s.talon.dealRowAvail()
# register the game
registerGame(GameInfo(257, Numerica, "Numerica",
GI.GT_NUMERICA | GI.GT_CONTRIB, 1, 0, GI.SL_BALANCED,
altnames=("Sir Tommy",)))
registerGame(GameInfo(171, LadyBetty, "Lady Betty",
GI.GT_NUMERICA, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(355, Frog, "Frog",
GI.GT_NUMERICA, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(356, Fly, "Fly",
GI.GT_NUMERICA, 2, 0, GI.SL_BALANCED,
rules_filename='frog.html'))
registerGame(GameInfo(357, Gnat, "Gnat",
GI.GT_NUMERICA, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(378, Gloaming, "Gloaming",
GI.GT_NUMERICA | GI.GT_OPEN | GI.GT_ORIGINAL, 1, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(379, Chamberlain, "Chamberlain",
GI.GT_NUMERICA | GI.GT_OPEN | GI.GT_ORIGINAL, 1, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(402, Toad, "Toad",
GI.GT_NUMERICA, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(430, PussInTheCorner, "Puss in the Corner",
GI.GT_NUMERICA, 1, 1, GI.SL_BALANCED))
registerGame(GameInfo(435, Shifting, "Shifting",
GI.GT_NUMERICA, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(472, Strategerie, "Strategerie",
GI.GT_NUMERICA, 1, 0, GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(558, Numerica2Decks, "Numerica (2 decks)",
GI.GT_NUMERICA, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(589, LastChance, "Last Chance",
GI.GT_NUMERICA, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(599, Assembly, "Assembly",
GI.GT_NUMERICA, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(600, AnnoDomini, "Anno Domini",
GI.GT_NUMERICA, 1, 2, GI.SL_BALANCED))
registerGame(GameInfo(613, Fanny, "Fanny",
GI.GT_NUMERICA, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(641, CircleNine, "Circle Nine",
GI.GT_NUMERICA, 1, 0, GI.SL_BALANCED))
registerGame(GameInfo(643, Measure, "Measure",
GI.GT_NUMERICA | GI.GT_ORIGINAL, 1, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(644, DoubleMeasure, "Double Measure",
GI.GT_NUMERICA | GI.GT_ORIGINAL, 2, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(754, Amphibian, "Amphibian",
GI.GT_NUMERICA | GI.GT_ORIGINAL, 2, 0,
GI.SL_MOSTLY_SKILL))
registerGame(GameInfo(760, Aglet, "Aglet",
GI.GT_1DECK_TYPE | GI.GT_ORIGINAL, 1, 0,
GI.SL_MOSTLY_SKILL))
| gpl-3.0 | -6,632,628,231,658,330,000 | 31.670203 | 79 | 0.526108 | false |
ccrisan/motioneye | motioneye/utils.py | 1 | 28442 |
# Copyright (c) 2013 Calin Crisan
# This file is part of motionEye.
#
# motionEye is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import datetime
import functools
import hashlib
import logging
import os
import re
import socket
import sys
import time
import urllib
import urllib2
import urlparse
from PIL import Image, ImageDraw
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from tornado.iostream import IOStream
from tornado.ioloop import IOLoop
import settings
_SIGNATURE_REGEX = re.compile('[^a-zA-Z0-9/?_.=&{}\[\]":, -]')
_SPECIAL_COOKIE_NAMES = {'expires', 'domain', 'path', 'secure', 'httponly'}
MASK_WIDTH = 32
DEV_NULL = open('/dev/null', 'w')
COMMON_RESOLUTIONS = [
(320, 200),
(320, 240),
(640, 480),
(800, 480),
(800, 600),
(1024, 576),
(1024, 600),
(1024, 768),
(1280, 720),
(1280, 768),
(1280, 800),
(1280, 960),
(1280, 1024),
(1440, 900),
(1440, 960),
(1440, 1024),
(1600, 1200),
(1920, 1080)
]
def _(x):
return x # this could later be replaced by a proper translate function
def pretty_date_time(date_time, tzinfo=None, short=False):
if date_time is None:
return '(' + _('never') + ')'
if isinstance(date_time, int):
return pretty_date_time(datetime.datetime.fromtimestamp(date_time))
if short:
text = u'{day} {month}, {hm}'.format(
day=date_time.day,
month=date_time.strftime('%b'),
hm=date_time.strftime('%H:%M')
)
else:
text = u'{day} {month} {year}, {hm}'.format(
day=date_time.day,
month=date_time.strftime('%B'),
year=date_time.year,
hm=date_time.strftime('%H:%M')
)
if tzinfo:
offset = tzinfo.utcoffset(datetime.datetime.utcnow()).seconds
tz = 'GMT'
if offset >= 0:
tz += '+'
else:
tz += '-'
offset = -offset
tz += '%.2d' % (offset / 3600) + ':%.2d' % ((offset % 3600) / 60)
text += ' (' + tz + ')'
return text
def pretty_date(date):
if date is None:
return '(' + _('never') + ')'
if isinstance(date, int):
return pretty_date(datetime.datetime.fromtimestamp(date))
return u'{day} {month} {year}'.format(
day=date.day,
month=_(date.strftime('%B')),
year=date.year
)
def pretty_time(time):
if time is None:
return ''
if isinstance(time, datetime.timedelta):
hour = time.seconds / 3600
minute = (time.seconds % 3600) / 60
time = datetime.time(hour, minute)
return '{hm}'.format(
hm=time.strftime('%H:%M')
)
def pretty_duration(duration):
if duration is None:
duration = 0
if isinstance(duration, datetime.timedelta):
duration = duration.seconds + duration.days * 86400
if duration < 0:
negative = True
duration = -duration
else:
negative = False
days = int(duration / 86400)
duration %= 86400
hours = int(duration / 3600)
duration %= 3600
minutes = int(duration / 60)
duration %= 60
seconds = duration
# treat special cases
special_result = None
if days != 0 and hours == 0 and minutes == 0 and seconds == 0:
if days == 1:
special_result = str(days) + ' ' + _('day')
elif days == 7:
special_result = '1 ' + _('week')
elif days in [30, 31, 32]:
special_result = '1 ' + _('month')
elif days in [365, 366]:
special_result = '1 ' + _('year')
else:
special_result = str(days) + ' ' + _('days')
elif days == 0 and hours != 0 and minutes == 0 and seconds == 0:
if hours == 1:
special_result = str(hours) + ' ' + _('hour')
else:
special_result = str(hours) + ' ' + _('hours')
elif days == 0 and hours == 0 and minutes != 0 and seconds == 0:
if minutes == 1:
special_result = str(minutes) + ' ' + _('minute')
else:
special_result = str(minutes) + ' ' + _('minutes')
elif days == 0 and hours == 0 and minutes == 0 and seconds != 0:
if seconds == 1:
special_result = str(seconds) + ' ' + _('second')
else:
special_result = str(seconds) + ' ' + _('seconds')
elif days == 0 and hours == 0 and minutes == 0 and seconds == 0:
special_result = str(0) + ' ' + _('seconds')
if special_result:
if negative:
special_result = _('minus') + ' ' + special_result
return special_result
if days:
fmt = "{d}d{h}h{m}m"
elif hours:
fmt = "{h}h{m}m"
elif minutes:
fmt = "{m}m"
if seconds:
fmt += "{s}s"
else:
fmt = "{s}s"
if negative:
fmt = '-' + fmt
return fmt.format(d=days, h=hours, m=minutes, s=seconds)
def pretty_size(size):
if size < 1024: # less than 1kB
size, unit = size, 'B'
elif size < 1024 * 1024: # less than 1MB
size, unit = size / 1024.0, 'kB'
elif size < 1024 * 1024 * 1024: # less than 1GB
size, unit = size / 1024.0 / 1024, 'MB'
else: # greater than or equal to 1GB
size, unit = size / 1024.0 / 1024 / 1024, 'GB'
return '%.1f %s' % (size, unit)
def pretty_http_error(response):
if response.code == 401 or response.error == 'Authentication Error':
return 'authentication failed'
if not response.error:
return 'ok'
msg = unicode(response.error)
if msg.startswith('HTTP '):
msg = msg.split(':', 1)[-1].strip()
if msg.startswith('[Errno '):
msg = msg.split(']', 1)[-1].strip()
if 'timeout' in msg.lower() or 'timed out' in msg.lower():
msg = 'request timed out'
return msg
def make_str(s):
if isinstance(s, str):
return s
try:
return str(s)
except:
try:
return unicode(s, encoding='utf8').encode('utf8')
except:
return unicode(s).encode('utf8')
def make_unicode(s):
if isinstance(s, unicode):
return s
try:
return unicode(s, encoding='utf8')
except:
try:
return unicode(s)
except:
return str(s).decode('utf8')
def split_semicolon(s):
parts = s.split(';')
merged_parts = []
for p in parts:
if merged_parts and merged_parts[-1][-1] == '\\':
merged_parts[-1] = merged_parts[-1][:-1] + ';' + p
else:
merged_parts.append(p)
if not merged_parts:
return []
return [p.strip() for p in merged_parts]
def get_disk_usage(path):
logging.debug('getting disk usage for path %(path)s...' % {
'path': path})
try:
result = os.statvfs(path)
except OSError as e:
logging.error('failed to execute statvfs: %(msg)s' % {'msg': unicode(e)})
return None
block_size = result.f_frsize
free_blocks = result.f_bfree
total_blocks = result.f_blocks
free_size = free_blocks * block_size
total_size = total_blocks * block_size
used_size = total_size - free_size
return used_size, total_size
def is_local_motion_camera(config):
"""Tells if a camera is managed by the local motion instance."""
return bool(config.get('videodevice') or config.get('netcam_url') or config.get('mmalcam_name'))
def is_remote_camera(config):
"""Tells if a camera is managed by a remote motionEye server."""
return config.get('@proto') == 'motioneye'
def is_v4l2_camera(config):
"""Tells if a camera is a v4l2 device managed by the local motion instance."""
return bool(config.get('videodevice'))
def is_mmal_camera(config):
'''Tells if a camera is mmal device managed by the local motion instance.'''
return bool(config.get('mmalcam_name'))
def is_net_camera(config):
"""Tells if a camera is a network camera managed by the local motion instance."""
return bool(config.get('netcam_url'))
def is_simple_mjpeg_camera(config):
"""Tells if a camera is a simple MJPEG camera not managed by any motion instance."""
return bool(config.get('@proto') == 'mjpeg')
def test_mjpeg_url(data, auth_modes, allow_jpeg, callback):
data = dict(data)
data.setdefault('scheme', 'http')
data.setdefault('host', '127.0.0.1')
data.setdefault('port', '80')
data.setdefault('path', '')
data.setdefault('username', None)
data.setdefault('password', None)
url = '%(scheme)s://%(host)s%(port)s%(path)s' % {
'scheme': data['scheme'] if data['scheme'] != 'mjpeg' else 'http',
'host': data['host'],
'port': ':' + str(data['port']) if data['port'] else '',
'path': data['path'] or ''}
called = [False]
status_2xx = [False]
http_11 = [False]
def do_request(on_response):
if data['username']:
auth = auth_modes[0]
else:
auth = 'no'
logging.debug('testing (m)jpg netcam at %s using %s authentication' % (url, auth))
request = HTTPRequest(url, auth_username=username, auth_password=password or '', auth_mode=auth_modes.pop(0),
connect_timeout=settings.REMOTE_REQUEST_TIMEOUT,
request_timeout=settings.REMOTE_REQUEST_TIMEOUT,
header_callback=on_header, validate_cert=settings.VALIDATE_CERTS)
http_client = AsyncHTTPClient(force_instance=True)
http_client.fetch(request, on_response)
def on_header(header):
header = header.lower()
if header.startswith('content-type') and status_2xx[0]:
content_type = header.split(':')[1].strip()
called[0] = True
if content_type in ['image/jpg', 'image/jpeg', 'image/pjpg'] and allow_jpeg:
callback([{'id': 1, 'name': 'JPEG Network Camera', 'keep_alive': http_11[0]}])
elif content_type.startswith('multipart/x-mixed-replace'):
callback([{'id': 1, 'name': 'MJPEG Network Camera', 'keep_alive': http_11[0]}])
else:
callback(error='not a supported network camera')
else:
# check for the status header
m = re.match('^http/1.(\d) (\d+) ', header)
if m:
if int(m.group(2)) / 100 == 2:
status_2xx[0] = True
if m.group(1) == '1':
http_11[0] = True
def on_response(response):
if not called[0]:
if response.code == 401 and auth_modes and data['username']:
status_2xx[0] = False
do_request(on_response)
else:
called[0] = True
callback(error=pretty_http_error(response) if response.error else 'not a supported network camera')
username = data['username'] or None
password = data['password'] or None
do_request(on_response)
def test_rtsp_url(data, callback):
import motionctl
scheme = data.get('scheme', 'rtsp')
host = data.get('host', '127.0.0.1')
port = data.get('port') or '554'
path = data.get('path') or ''
username = data.get('username')
password = data.get('password')
url = '%(scheme)s://%(host)s%(port)s%(path)s' % {
'scheme': scheme,
'host': host,
'port': (':' + port) if port else '',
'path': path}
called = [False]
send_auth = [False]
timeout = [None]
stream = None
io_loop = IOLoop.instance()
def connect():
if send_auth[0]:
logging.debug('testing rtsp netcam at %s (this time with credentials)' % url)
else:
logging.debug('testing rtsp netcam at %s' % url)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.settimeout(settings.MJPG_CLIENT_TIMEOUT)
stream = IOStream(s)
stream.set_close_callback(on_close)
stream.connect((host, int(port)), on_connect)
timeout[0] = io_loop.add_timeout(datetime.timedelta(seconds=settings.MJPG_CLIENT_TIMEOUT),
functools.partial(on_connect, _timeout=True))
return stream
def on_connect(_timeout=False):
io_loop.remove_timeout(timeout[0])
if _timeout:
return handle_error('timeout connecting to rtsp netcam')
if not stream:
return handle_error('failed to connect to rtsp netcam')
logging.debug('connected to rtsp netcam')
lines = [
'OPTIONS %s RTSP/1.0' % url.encode('utf8'),
'CSeq: 1',
'User-Agent: motionEye'
]
if username and send_auth[0]:
auth_header = 'Authorization: ' + build_basic_header(username, password)
lines.append(auth_header)
lines += [
'',
''
]
stream.write('\r\n'.join(lines))
seek_rtsp()
def seek_rtsp():
if check_error():
return
stream.read_until_regex('RTSP/1.0 \d+ ', on_rtsp)
timeout[0] = io_loop.add_timeout(datetime.timedelta(seconds=settings.MJPG_CLIENT_TIMEOUT), on_rtsp)
def on_rtsp(data=None):
io_loop.remove_timeout(timeout[0])
if data:
if data.endswith('200 '):
seek_server()
elif data.endswith('401 '):
if not username or send_auth[0]:
# either credentials not supplied, or already sent
handle_error('authentication failed')
else:
seek_www_authenticate()
else:
handle_error('rtsp netcam returned erroneous response: %s' % data)
else:
handle_error('timeout waiting for rtsp netcam response')
def seek_server():
if check_error():
return
stream.read_until_regex('Server: .*', on_server)
timeout[0] = io_loop.add_timeout(datetime.timedelta(seconds=1), on_server)
def on_server(data=None):
io_loop.remove_timeout(timeout[0])
if data:
identifier = re.findall('Server: (.*)', data)[0].strip()
logging.debug('rtsp netcam identifier is "%s"' % identifier)
else:
identifier = None
logging.debug('no rtsp netcam identifier')
handle_success(identifier)
def seek_www_authenticate():
if check_error():
return
stream.read_until_regex('WWW-Authenticate: .*', on_www_authenticate)
timeout[0] = io_loop.add_timeout(datetime.timedelta(seconds=1), on_www_authenticate)
def on_www_authenticate(data=None):
io_loop.remove_timeout(timeout[0])
if data:
scheme = re.findall('WWW-Authenticate: ([^\s]+)', data)[0].strip()
logging.debug('rtsp netcam auth scheme: %s' % scheme)
if scheme.lower() == 'basic':
send_auth[0] = True
connect()
else:
logging.debug('rtsp auth scheme digest not supported, considering credentials ok')
handle_success('(unknown) ')
else:
logging.error('timeout waiting for rtsp auth scheme')
handle_error('timeout waiting for rtsp netcam response')
def on_close():
if called[0]:
return
if not check_error():
handle_error('connection closed')
def handle_success(identifier):
if called[0]:
return
called[0] = True
cameras = []
if identifier:
identifier = ' ' + identifier
else:
identifier = ''
cameras.append({'id': 'tcp', 'name': '%sRTSP/TCP Camera' % identifier})
cameras.append({'id': 'udp', 'name': '%sRTSP/UDP Camera' % identifier})
callback(cameras)
def handle_error(e):
if called[0]:
return
called[0] = True
logging.error('rtsp client error: %s' % unicode(e))
try:
stream.close()
except:
pass
callback(error=unicode(e))
def check_error():
error = getattr(stream, 'error', None)
if error and getattr(error, 'errno', None) != 0:
handle_error(error.strerror)
return True
if stream and stream.socket is None:
handle_error('connection closed')
stream.close()
return True
return False
stream = connect()
def test_rtmp_url(data, callback):
import motionctl
scheme = data.get('scheme', 'rtmp')
host = data.get('host', '127.0.0.1')
port = data.get('port') or '1935'
path = data.get('path') or ''
username = data.get('username')
password = data.get('password')
url = '%(scheme)s://%(host)s%(port)s%(path)s' % {
'scheme': scheme,
'host': host,
'port': (':' + port) if port else '',
'path': path}
# Since RTMP is a binary TCP stream its a little more work to do a proper test
# For now lets just check if a TCP socket is open on the target IP:PORT
# TODO: Actually do the TCP SYN/ACK check...
cameras = []
cameras.append({'id': 'tcp', 'name': 'RTMP/TCP Camera'})
callback(cameras)
def compute_signature(method, path, body, key):
parts = list(urlparse.urlsplit(path))
query = [q for q in urlparse.parse_qsl(parts[3], keep_blank_values=True) if (q[0] != '_signature')]
query.sort(key=lambda q: q[0])
# "safe" characters here are set to match the encodeURIComponent JavaScript counterpart
query = [(n, urllib.quote(v, safe="!'()*~")) for (n, v) in query]
query = '&'.join([(q[0] + '=' + q[1]) for q in query])
parts[0] = parts[1] = ''
parts[3] = query
path = urlparse.urlunsplit(parts)
path = _SIGNATURE_REGEX.sub('-', path)
key = _SIGNATURE_REGEX.sub('-', key)
if body and body.startswith('---'):
body = None # file attachment
body = body and _SIGNATURE_REGEX.sub('-', body.decode('utf8'))
return hashlib.sha1('%s:%s:%s:%s' % (method, path, body or '', key)).hexdigest().lower()
def parse_cookies(cookies_headers):
parsed = {}
for cookie in cookies_headers:
cookie = cookie.split(';')
for c in cookie:
(name, value) = c.split('=', 1)
name = name.strip()
value = value.strip()
if name.lower() in _SPECIAL_COOKIE_NAMES:
continue
parsed[name] = value
return parsed
def build_basic_header(username, password):
return 'Basic ' + base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
def parse_basic_header(header):
parts = header.split(' ', 1)
if len(parts) < 2:
return None
if parts[0].lower() != 'basic':
return None
encoded = parts[1]
try:
decoded = base64.decodestring(encoded)
except:
return None
parts = decoded.split(':', 1)
if len(parts) < 2:
return None
return {
'username': parts[0],
'password': parts[1]
}
def build_digest_header(method, url, username, password, state):
realm = state['realm']
nonce = state['nonce']
last_nonce = state.get('last_nonce', '')
nonce_count = state.get('nonce_count', 0)
qop = state.get('qop')
algorithm = state.get('algorithm')
opaque = state.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
else: # _algorithm == 'SHA'
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
entdig = None
p_parsed = urlparse.urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (username, realm, password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == last_nonce:
nonce_count += 1
else:
nonce_count = 1
ncvalue = '%08x' % nonce_count
s = str(nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, 'auth', HA2)
respdig = KD(HA1, noncebit)
else:
return None
last_nonce = nonce
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
state['last_nonce'] = last_nonce
state['nonce_count'] = nonce_count
return 'Digest %s' % base
def urlopen(*args, **kwargs):
if sys.version_info >= (2, 7, 9) and not settings.VALIDATE_CERTS:
# ssl certs are not verified by default
# in versions prior to 2.7.9
import ssl
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
kwargs.setdefault('context', ctx)
return urllib2.urlopen(*args, **kwargs)
def build_editable_mask_file(camera_id, mask_class, mask_lines, capture_width=None, capture_height=None):
if not mask_lines:
return ''
width = mask_lines[0]
height = mask_lines[1]
mask_lines = mask_lines[2:]
logging.debug('building editable %s mask for camera with id %s (%sx%s)' %
(mask_class, camera_id, width, height))
# horizontal rectangles
nx = MASK_WIDTH # number of rectangles
if width % nx:
nx -= 1
rx = width % nx # remainder
else:
rx = 0
rw = width / nx # rectangle width
# vertical rectangles
ny = mask_height = height * MASK_WIDTH / width # number of rectangles
if height % ny:
ny -= 1
ry = height % ny # remainder
else:
ry = 0
# if mask not present, generate an empty mask
if not mask_lines:
mask_lines = [0] * mask_height
# scale the mask vertically in case the aspect ratio has changed
# since the last time the mask has been generated
if ny == len(mask_lines):
line_index_func = lambda y: y
else:
line_index_func = lambda y: (len(mask_lines) - 1) * y / ny
rh = height / ny # rectangle height
# draw the actual mask image content
im = Image.new('L', (width, height), 255) # all white
dr = ImageDraw.Draw(im)
for y in xrange(ny):
line = mask_lines[line_index_func(y)]
for x in xrange(nx):
if line & (1 << (MASK_WIDTH - 1 - x)):
dr.rectangle((x * rw, y * rh, (x + 1) * rw - 1, (y + 1) * rh - 1), fill=0)
if rx and line & 1:
dr.rectangle((nx * rw, y * rh, nx * rw + rx - 1, (y + 1) * rh - 1), fill=0)
if ry:
line = mask_lines[line_index_func(ny)]
for x in xrange(nx):
if line & (1 << (MASK_WIDTH - 1 - x)):
dr.rectangle((x * rw, ny * rh, (x + 1) * rw - 1, ny * rh + ry - 1), fill=0)
if rx and line & 1:
dr.rectangle((nx * rw, ny * rh, nx * rw + rx - 1, ny * rh + ry - 1), fill=0)
file_name = build_mask_file_name(camera_id, mask_class)
# resize the image if necessary
if capture_width and capture_height and im.size != (capture_width, capture_height):
logging.debug('editable mask needs resizing from %sx%s to %sx%s' %
(im.size[0], im.size[1], capture_width, capture_height))
im = im.resize((capture_width, capture_height))
im.save(file_name, 'ppm')
return file_name
def build_mask_file_name(camera_id, mask_class):
file_name = 'mask_%s.pgm' % (camera_id) if mask_class == 'motion' else 'mask_%s_%s.pgm' % (camera_id, mask_class)
full_path = os.path.join(settings.CONF_PATH, file_name)
return full_path
def parse_editable_mask_file(camera_id, mask_class, capture_width=None, capture_height=None):
# capture_width and capture_height arguments represent the current size
# of the camera image, as it might be different from that of the associated mask;
# they can be null (e.g. netcams)
file_name = build_mask_file_name(camera_id, mask_class)
logging.debug('parsing editable mask %s for camera with id %s: %s' % (mask_class, camera_id, file_name))
# read the image file
try:
im = Image.open(file_name)
except Exception as e:
logging.error('failed to read mask file %s: %s' % (file_name, e))
# empty mask
return [0] * (MASK_WIDTH * 10)
if capture_width and capture_height:
# resize the image if necessary
if im.size != (capture_width, capture_height):
logging.debug('editable mask needs resizing from %sx%s to %sx%s' %
(im.size[0], im.size[1], capture_width, capture_height))
im = im.resize((capture_width, capture_height))
width, height = capture_width, capture_height
else:
logging.debug('using mask size from file: %sx%s' % (im.size[0], im.size[1]))
width, height = im.size
pixels = list(im.getdata())
# horizontal rectangles
nx = MASK_WIDTH # number of rectangles
if width % nx:
nx -= 1
rx = width % nx # remainder
else:
rx = 0
rw = width / nx # rectangle width
# vertical rectangles
ny = height * MASK_WIDTH / width # number of rectangles
if height % ny:
ny -= 1
ry = height % ny # remainder
else:
ry = 0
rh = height / ny # rectangle height
# parse the image contents and build the mask lines
mask_lines = [width, height]
for y in xrange(ny):
bits = []
for x in xrange(nx):
px = int((x + 0.5) * rw)
py = int((y + 0.5) * rh)
pixel = pixels[py * width + px]
bits.append(not bool(pixel))
if rx:
px = int(nx * rw + rx / 2)
py = int((y + 0.5) * rh)
pixel = pixels[py * width + px]
bits.append(not bool(pixel))
# build the binary packed mask line
line = 0
for i, bit in enumerate(bits):
if bit:
line |= 1 << (MASK_WIDTH - 1 - i)
mask_lines.append(line)
if ry:
bits = []
for x in xrange(nx):
px = int((x + 0.5) * rw)
py = int(ny * rh + ry / 2)
pixel = pixels[py * width + px]
bits.append(not bool(pixel))
if rx:
px = int(nx * rw + rx / 2)
py = int(ny * rh + ry / 2)
pixel = pixels[py * width + px]
bits.append(not bool(pixel))
# build the binary packed mask line
line = 0
for i, bit in enumerate(bits):
if bit:
line |= 1 << (MASK_WIDTH - 1 - i)
mask_lines.append(line)
return mask_lines
| gpl-3.0 | -5,104,830,186,163,047,000 | 26.50677 | 117 | 0.551965 | false |
gcallah/Indra | models/tests/rewrite_these/test_wolfram.py | 1 | 2258 | from unittest import TestCase
import models.wolfram as wolf
from indra.composite import Composite
from models.wolfram import create_wolf_cell, get_color, get_rule, next_color
from models.wolfram import set_up, W, B
TEST_ANUM = 999999
class WolframTestCase(TestCase):
def setUp(self):
(wolf.wolfram_env, wolf.groups, wolf.rule_dict) = set_up()
def tearDown(self):
wolf.wolfram_env = None
wolf.groups = None
wolf.rule_dict = None
def test_create_agent(self):
"""
Creates an agent and checks that it has the correct name,
which is its (x, y) corrdinates.
"""
a = create_wolf_cell(0, 0)
self.assertEqual(a.name, '(0,0)')
def test_get_color(self):
"""
Based on a passed in group return the appropriate color.
"""
white = Composite("white")
black = Composite("black")
wolf.groups = []
wolf.groups.append(white)
wolf.groups.append(black)
self.assertEqual(get_color(wolf.groups[W]), W)
def test_get_rule(self):
"""
Creates a dictionary of a rule (rule 30 in this case)
then compares it to what get_rule returns
given that get_rule was passed in the parameter to return rule 30.
"""
rule30 = {"(1, 1, 1)": 0,
"(1, 1, 0)": 0,
"(1, 0, 1)": 0,
"(1, 0, 0)": 1,
"(0, 1, 1)": 1,
"(0, 1, 0)": 1,
"(0, 0, 1)": 1,
"(0, 0, 0)": 0}
self.assertEqual(get_rule(30), rule30)
def test_next_color(self):
"""
Ensure we get proper color based on trio from previous row.
"""
self.assertEqual(next_color(wolf.rule_dict, B, B, B), W)
self.assertEqual(next_color(wolf.rule_dict, B, B, W), W)
self.assertEqual(next_color(wolf.rule_dict, B, W, B), W)
self.assertEqual(next_color(wolf.rule_dict, B, W, W), B)
self.assertEqual(next_color(wolf.rule_dict, W, B, B), B)
self.assertEqual(next_color(wolf.rule_dict, W, B, W), B)
self.assertEqual(next_color(wolf.rule_dict, W, W, B), B)
self.assertEqual(next_color(wolf.rule_dict, W, W, W), W)
| gpl-3.0 | -8,902,933,236,556,920,000 | 33.212121 | 76 | 0.555802 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.