repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
tholum/PiBunny | system.d/library/tools_installer/tools_to_install/responder/tools/FindSQLSrv.py | 1 | 1355 | #!/usr/bin/env python
# This file is part of Responder, a network take-over set of tools
# created and maintained by Laurent Gaffie.
# email: [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from socket import *
print 'MSSQL Server Finder 0.1'
s = socket(AF_INET,SOCK_DGRAM)
s.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
s.settimeout(2)
s.sendto('\x02',('255.255.255.255',1434))
try:
while 1:
data, address = s.recvfrom(8092)
if not data:
break
else:
print "==============================================================="
print "Host details:",address[0]
print data[2:]
print "==============================================================="
print ""
except:
pass
| mit | 4,311,758,640,280,520,000 | 32.875 | 80 | 0.630996 | false |
Dziolas/harvesting-kit | harvestingkit/tests/aps_package_tests.py | 1 | 16805 | # -*- coding: utf-8 -*-
#
# This file is part of Harvesting Kit.
# Copyright (C) 2014, 2015 CERN.
#
# Harvesting Kit is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Harvesting Kit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Harvesting Kit; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for APS."""
import unittest
from harvestingkit.aps_package import ApsPackage
from xml.dom.minidom import parse
from os.path import (join,
dirname)
from harvestingkit.tests import (__file__ as folder,
aps_test_record,
aps_output,
journal_mappings)
class APSPackageTests(unittest.TestCase):
"""Test that metadata are exported correctly."""
def setUp(self):
"""Setup sample parsing used in tests."""
self.aps = ApsPackage(journal_mappings)
self.aps.document = parse(join(dirname(folder), aps_test_record))
def test_journal(self):
"""Test that journal name is extracted correctly."""
self.assertEqual(self.aps._get_journal(), 'Phys. Rev. D')
def test_abstract(self):
"""Test that abstract is extracted correctly."""
abstract = '<p>In conformally flat background geometries the' \
' long-wavelength gravitons can be described in the ' \
'fluid approximation and they induce scalar fluctuations ' \
'both during inflation and in the subsequent ' \
'radiation-dominated epoch. While this effect is minute ' \
'and suppressed for a de Sitter stage of expansion, the ' \
'fluctuations of the energy-momentum pseudotensor of the ' \
'graviton fluid lead to curvature perturbations that ' \
'increase with time all along the post-inflationary evolution.' \
' An explicit calculation of these effects is presented for' \
' a standard thermal history and it is shown that the growth' \
' of the curvature perturbations caused by the long-wavelength ' \
'modes is approximately compensated by the slope of the power ' \
'spectra of the energy density, pressure and anisotropic ' \
'stress of the relic gravitons.</p>'
self.assertEqual(self.aps._get_abstract(), abstract)
def test_title(self):
"""Check that title is correct."""
title = 'Scalar modes of the relic gravitons', '', []
self.assertEqual(self.aps._get_title(), title)
def test_doi(self):
"""Check that DOI is correct."""
self.assertEqual(self.aps._get_doi(), '10.1103/PhysRevD.91.023521')
def test_authors(self):
"""Check that authors are correct."""
authors = [('Giovannini, Massimo', [u'a1'], [u'n1'])]
self.assertEqual(self.aps._get_authors(), authors)
def test_affiliations(self):
"""Check that affiliations are correct."""
affiliations = {
u'a1': 'Department of Physics, Theory Division, CERN , 1211 Geneva 23, Switzerland INFN, Section of Milan-Bicocca, 20126 Milan, Italy'
}
self.assertEqual(self.aps._get_affiliations(), affiliations)
def test_author_emails(self):
"""Check email from author."""
emails = {u'n1': ['[email protected]']}
self.assertEqual(self.aps._get_author_emails(), emails)
def test_copyright(self):
"""Check that Copyright is extracted."""
self.assertEqual(self.aps._get_copyright(), ('authors', '2015', 'Published by the American Physical Society'))
def test_date(self):
"""Check published date."""
self.assertEqual(self.aps._get_date(), '2015-01-29')
def test_publisher(self):
"""Check correct publisher."""
self.assertEqual(self.aps._get_publisher(), 'American Physical Society')
def test_publication_information(self):
"""Check extracted pubinfo."""
publication_information = ('Phys.Rev.',
'D91',
'2',
u'2015',
u'2015-01-29',
u'10.1103/PhysRevD.91.023521',
'023521',
'',
'')
self.assertEqual(self.aps._get_publication_information(), publication_information)
def test_pagecount(self):
"""Check pagecount."""
self.assertEqual(self.aps._get_page_count(), '15')
def test_pacscodes(self):
"""Check that PACS are extracted."""
self.assertEqual(self.aps._get_pacscodes(), ['98.80.Cq', '04.30.-w', '04.62.+v', '98.70.Vc'])
def test_subject(self):
"""Check subject."""
self.assertEqual(self.aps._get_subject(), 'Cosmology')
def test_license(self):
"""Check license."""
self.assertEqual(
self.aps._get_license(),
('Creative Commons Attribution 3.0 License',
'creative-commons',
'http://creativecommons.org/licenses/by/3.0/')
)
def test_keywords(self):
"""Check keywords."""
self.assertEqual(self.aps._get_keywords(), [])
def test_references(self):
"""Check references."""
references = [
(u'journal', '', [u'L.\u2009P. Grishchuk'], '', 'Zh.\xc3\x89ksp.Teor.Fiz.',
'67', '825', '1974', '1', '', '', '', [], '', '', []),
(u'journal', '', [u'L.\u2009P. Grishchuk'], '', 'Sov.Phys.JETP',
'40', '409', '1975', '1', '', '', '', [], '', '', []),
(u'journal', '10.1111/j.1749-6632.1977.tb37064.x', [u'L.\u2009P. Grishchuk'],
'', 'Ann.N.Y.Acad.Sci.', '302', '439', '1977', '1', '', '', '', [], '', '', []),
(u'journal', '10.1111/j.1749-6632.1977.tb37064.x', [u'L.\u2009P. Grishchuk'], '',
'Ann.N.Y.Acad.Sci.', '302', '439', '1977', '1', '', '', '', [], '', '', []),
(u'journal', '', [u'A.\u2009A. Starobinsky'], '', 'JETP Lett.',
'30', '682', '1979', '2', '', '', '', [], '', '', []),
(u'journal', '10.1016/0370-2693(82)90641-4', [u'V.\u2009A. Rubakov', u'M.\u2009V. Sazhin', u'A.\u2009V. Veryaskin'],
'', 'Phys.Lett.', 'B115', '189', '1982', '2', '', '', '', [], '', '', []),
(u'journal', '10.1016/0370-2693(83)91322-9', [u'R. Fabbri', u'M.\u2009D. Pollock'],
'', 'Phys.Lett.', 'B125', '445', '1983', '3', '', '', '', [], '', '', []),
(u'journal', '10.1016/0550-3213(84)90329-8', [u'L.\u2009F. Abbott', u'M.\u2009B. Wise'],
'', 'Nucl.Phys.', '244', '541', '1984', '3', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.43.2566', [u'L.\u2009P. Grishchuk', u'M. Solokhin'], '',
'Phys.Rev.', 'D43', '2566', '1991', '4', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.42.453', [u'V. Sahni'], '', 'Phys.Rev.',
'D42', '453', '1990', '4', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.58.083504', [u'M. Giovannini'], '', 'Phys.Rev.',
'D58', '083504', '1998', '5', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.60.123511', [u'M. Giovannini'], '', 'Phys.Rev.',
'D60', '123511', '1999', '5', '', '', '', [], '', '', []),
(u'journal', '10.1088/0264-9381/26/4/045004', [u'M. Giovannini'], '',
'Classical Quantum Gravity', '26', '045004', '2009', '5', '', '', '', [], '', '', []),
(u'journal', '10.1016/j.physletb.2009.09.018', [u'W. Zhao', u'D. Baskaran', u'P. Coles'],
'', 'Phys.Lett.', 'B680', '411', '2009', '5', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.80.042002', [u'M.\u2009S. Pshirkov', u'D. Baskaran'],
'', 'Phys.Rev.', 'D80', '042002', '2009', '6', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.81.083503', [u'T. Chiba', u'K. Kamada', u'M. Yamaguchi'],
'', 'Phys.Rev.', 'D81', '083503', '2010', '6', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.89.123513', [u'M.\u2009W. Hossain', u'R. Myrzakulov', u'M. Sami', u'E.\u2009N. Saridakis'],
'', 'Phys.Rev.', 'D89', '123513', '2014', '6', '', '', '', [], '', '', []),
(u'book', '', [u'C.\u2009W. Misner', u'K.\u2009S. Thorne', u'J.\u2009A. Wheeler'],
'', 'Gravitation', '', '467', '1973', '7', '', 'Freeman', '', u'New York,', '', '', []),
(u'book', '', [u'S. Weinberg'], '', 'Gravitation and Cosmology',
'', '166', '1972', '8', '', 'Wiley', '', u'New York,', '', '', []),
(u'book', '', [u'L.\u2009D. Landau', u'E.\u2009M. Lifshitz'], '',
'The Classical Theory of Fields', '', '', '1971', '9', '', 'Pergamon Press', '', u'New York,', '', '', []),
(u'journal', '10.1103/PhysRev.166.1263', [u'R. Isaacson'], '', 'Phys.Rev.', '166', '1263',
'1968', '10', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRev.166.1272', [u'R. Isaacson'], '', 'Phys.Rev.', '166', '1272',
'1968', '10', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.56.3248', [u'L.\u2009R. Abramo', u'R. Brandenberger', u'V. Mukahanov'],
'', 'Phys.Rev.', 'D56', '3248', '1997', '11', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.60.064004', [u'L.\u2009R. Abramo'], '',
'Phys Rev.', 'D60', '064004', '1999', '11', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.61.024038', [u'S.\u2009V. Babak', u'L.\u2009P. Grishchuk'],
'', 'Phys.Rev.', 'D61', '024038', '1999', '11', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.73.083505', [u'M. Giovannini'], '',
'Phys.Rev.', 'D73', '083505', '2006', '12', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.85.104012', [u'D. Su', u'Y. Zhang'],
'', 'Phys.Rev.', 'D85', '104012', '2012', '12', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.16.1601', [u'L.\u2009H. Ford', u'L. Parker'],
'', 'Phys.Rev.', 'D16', '1601', '1977', '13', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.16.245', [u'L.\u2009H. Ford', u'L. Parker'],
'', 'Phys.Rev.', 'D16', '245', '1977', '13', '', '', '', [], '', '', []),
(u'journal', '10.1016/0375-9601(77)90880-5', [u'B.\u2009L. Hu', u'L. Parker'],
'', 'Phys.Lett', 'A63', '217', '1977', '13', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.22.1882', [u'J. Bardeen'], '',
'Phys.Rev.', 'D22', '1882', '1980', '14', '', '', '', [], '', '', []),
(u'journal', '10.1093/mnras/200.3.535', [u'G.\u2009V. Chibisov', u'V.\u2009F. Mukhanov'],
'', 'Mon.Not.R.Astron.Soc.', '200', '535', '1982', '14', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.28.679', [u'J. Bardeen', u'P. Steinhardt', u'M. Turner'],
'', 'Phys.Rev.', 'D28', '679', '1983', '14', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.30.265', [u'J.\u2009A. Frieman', u'M.\u2009S. Turner'],
'', 'Phys.Rev.', 'D30', '265', '1984', '14', '', '', '', [], '', '', []),
(u'journal', '10.1143/PTPS.78.1', [u'H. Kodama', u'M. Sasaki'], '',
'Prog.Theor.Phys.Suppl.', '78', '1', '1984', '14', '', '', '', [], '', '', []),
(u'journal', '10.1086/170206', [u'J-c. Hwang'], '',
'Astrophys.J.', '375', '443', '1991', '14', '', '', '', [], '', '', []),
(u'journal', '', [u'V.\u2009N. Lukash'], '',
'Zh.Eksp.Teor.Fiz.', '79', '1601', '1980', '15', '', '', '', [], '', '', []),
(u'journal', '', [u'V.\u2009N. Lukash'], '',
'Sov.Phys.JETP', '52', '807', '1980', '15', '', '', '', [], '', '', []),
(u'journal', '10.1134/S1063772907060017', [u'V. Strokov'],
'', 'Astronomy Reports', '51', '431', '2007', '15', '', '', '', [], '', '', []),
(u'journal', '10.1088/0067-0049/192/2/15', [u'B. Gold'],
'', 'Astrophys.J.Suppl.Ser.', '192', '15', '2011', '16', '', '', '', [], '', '', []),
(u'journal', '10.1088/0067-0049/192/2/16', [u'D. Larson'],
'', 'Astrophys.J.Suppl.Ser.', '192', '16', '2011', '16', '', '', '', [], '', '', []),
(u'journal', '10.1088/0067-0049/192/2/17', [u'C.\u2009L. Bennett'],
'', 'Astrophys.J.Suppl.Ser.', '192', '17', '2011', '16', '', '', '', [], '', '', []),
(u'journal', '10.1088/0067-0049/208/2/19', [u'G. Hinshaw'],
'', 'Astrophys.J.Suppl.Ser.', '208', '19', '2013', '16', '', '', '', [], '', '', []),
(u'journal', '10.1088/0067-0049/208/2/20', [u'C.\u2009L. Bennett'],
'', 'Astrophys.J.Suppl.Ser.', '208', '20', '2013', '16', '', '', '', [], '', '', []),
(u'journal', '10.1086/377226', [u'D.\u2009N. Spergel'],
'', 'Astrophys.J.Suppl.Ser.', '148', '175', '2003', '17', '', '', '', [], '', '', []),
(u'journal', '10.1086/513700', [u'D.\u2009N. Spergel'],
'', 'Astrophys.J.Suppl.Ser.', '170', '377', '2007', '17', '', '', '', [], '', '', []),
(u'journal', '10.1086/513699', [u'L. Page'], '',
'Astrophys.J.Suppl.Ser.', '170', '335', '2007', '17', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevLett.112.241101',
[u'P.\u2009A.\u2009R. Ade'], 'BICEP2 Collaboration', 'Phys.Rev.Lett.', '112', '241101', '2014', '18', '', '', '', [], '', '', []),
(u'journal', '10.1088/0004-637X/792/1/62', [u'P.\u2009A.\u2009R. Ade'],
'BICEP2 Collaboration', 'Astrophys.J.', '792', '62', '2014', '18', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.8.4231', [u'G.\u2009L. Murphy'],
'', 'Phys.Rev.', 'D8', '4231', '1973', '19', '', '', '', [], '', '', []),
(u'journal', '10.1016/0375-9601(77)90953-7', [u'G.\u2009L. Murphy'],
'', 'Phys.Lett.', 'A62', '75', '1977', '19', '', '', '', [], '', '', []),
(u'journal', '', [u'V.\u2009A. Belinskii', u'I.\u2009M. Khalatnikov'],
'', 'Zh.Eksp.Teor.Fiz.', '69', '401', '1975', '20', '', '', '', [], '', '', []),
(u'journal', '', [u'V.\u2009A. Belinskii', u'I.\u2009M. Khalatnikov'],
'', 'Sov.Phys.JETP', '42', '205', '1976', '20', '', '', '', [], '', '', []),
(u'journal', '', [u'V.\u2009A. Belinskii', u'I.\u2009M. Khalatnikov'],
'', 'Zh.Eksp.Teor.Fiz.Pis.Red.', '21', '223', '1975', '20', '', '', '', [], '', '', []),
(u'journal', '', [u'V.\u2009A. Belinskii', u'I.\u2009M. Khalatnikov'],
'', 'JETP Lett.', '21', '99', '1975', '20', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.72.043514', [u'S. Weinberg'],
'', 'Phys.Rev.', 'D72', '043514', '2005', '21', '', '', '', [], '', '', []),
(u'journal', '10.1103/PhysRevD.74.023508', [u'S. Weinberg'],
'', 'Phys.Rev.', 'D74', '023508', '2006', '21', '', '', '', [], '', '', []),
]
for ref in self.aps.document.getElementsByTagName('ref'):
for innerref in self.aps._get_reference(ref):
self.assertTrue(innerref in references)
def test_article_type(self):
"""Check extracted article type."""
self.assertEqual(self.aps._get_article_type(), 'research-article')
def test_get_record(self):
"""Check full record conversion."""
source_file = join(dirname(folder), aps_test_record)
marc_file = join(dirname(folder), aps_output)
with open(marc_file) as marc:
result = marc.read()
xml = self.aps.get_record(source_file)
self.assertEqual(xml.strip(), result.strip())
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(APSPackageTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-2.0 | -1,746,790,422,166,673,400 | 59.017857 | 146 | 0.47349 | false |
hamonikr-root/system-config-printer-gnome | PhysicalDevice.py | 1 | 11477 | #!/usr/bin/python
## Copyright (C) 2008, 2009, 2010, 2012 Red Hat, Inc.
## Authors:
## Tim Waugh <[email protected]>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import config
import gettext
gettext.install(domain=config.PACKAGE, localedir=config.localedir, unicode=True)
import cupshelpers
import urllib
import ppdippstr
class PhysicalDevice:
def __init__(self, device):
self.devices = None
self._network_host = None
self.dnssd_hostname = None
self._cupsserver = False
self.add_device (device)
self._user_data = {}
self._ppdippstr = ppdippstr.backends
def _canonical_id (self, device):
if hasattr (device, "id_dict"):
mfg = device.id_dict.get ('MFG', '')
mdl = device.id_dict.get ('MDL', '')
if mfg == '' or mdl.lower ().startswith (mfg.lower ()):
make_and_model = mdl
else:
make_and_model = "%s %s" % (mfg, mdl)
else:
make_and_model = device.make_and_model
return cupshelpers.ppds.ppdMakeModelSplit (make_and_model)
def _get_host_from_uri (self, uri):
hostport = None
host = None
dnssdhost = None
(scheme, rest) = urllib.splittype (uri)
if scheme == 'hp' or scheme == 'hpfax':
if rest.startswith ("/net/"):
(rest, ipparam) = urllib.splitquery (rest[5:])
if ipparam != None and ipparam.startswith("ip="):
hostport = ipparam[3:]
else:
if ipparam != None and ipparam.startswith("zc="):
dnssdhost = ipparam[3:]
else:
return None, None
else:
return None, None
elif scheme == 'dnssd' or scheme == 'mdns':
# The URIs of the CUPS "dnssd" backend do not contain the host
# name of the printer
return None, None
else:
(hostport, rest) = urllib.splithost (rest)
if hostport == None:
return None, None
if hostport:
(host, port) = urllib.splitport (hostport)
if type (host) == unicode:
host = host.encode ('utf-8')
if type (dnssdhost) == unicode:
dnssdhost = dnssdhost.encode ('utf-8')
return host, dnssdhost
def add_device (self, device):
if self._network_host or self.dnssd_hostname:
host, dnssdhost = self._get_host_from_uri (device.uri)
if (hasattr (device, 'address')):
host = device.address
if (hasattr (device, 'hostname') and dnssdhost == None):
dnssdhost = device.hostname
if (host == None and dnssdhost == None) or \
(host and self._network_host and \
host != self._network_host) or \
(dnssdhost and self.dnssd_hostname and \
dnssdhost != self.dnssd_hostname) or \
(host == None and self.dnssd_hostname == None) or \
(dnssdhost == None and self._network_host == None):
raise ValueError
else:
(mfg, mdl) = self._canonical_id (device)
if self.devices == None:
self.mfg = mfg
self.mdl = mdl
self.mfg_lower = mfg.lower ()
self.mdl_lower = mdl.lower ()
self.sn = device.id_dict.get ('SN', '')
self.devices = []
else:
def nicest (a, b):
def count_lower (s):
l = s.lower ()
n = 0
for i in xrange (len (s)):
if l[i] != s[i]:
n += 1
return n
if count_lower (b) < count_lower (a):
return b
return a
self.mfg = nicest (self.mfg, mfg)
self.mdl = nicest (self.mdl, mdl)
sn = device.id_dict.get ('SN', '')
if sn != '' and self.sn != '' and sn != self.sn:
raise ValueError
if device.type == "socket":
# Remove default port to more easily find duplicate URIs
device.uri = device.uri.replace (":9100", "")
if (device.uri.startswith('ipp:') and \
device.uri.find('/printers/') != -1) or \
((device.uri.startswith('dnssd:') or \
device.uri.startswith('mdns:')) and \
device.uri.endswith('/cups')):
# CUPS server
self._cupsserver = True
elif self._cupsserver:
# Non-CUPS queue on a CUPS server, drop this one
return
for d in self.devices:
if d.uri == device.uri:
return
self.devices.append (device)
self.devices.sort ()
if (not self._network_host or not self.dnssd_hostname) and \
device.device_class == "network":
# We just added a network device.
self._network_host, dnssdhost = \
self._get_host_from_uri (device.uri)
if dnssdhost:
self.dnssd_hostname = dnssdhost;
if (hasattr (device, 'address') and self._network_host == None):
if device.address:
self._network_host = device.address
if (hasattr (device, 'hostname') and self.dnssd_hostname == None):
if device.hostname:
self.dnssd_hostname = device.hostname
def get_devices (self):
return self.devices
def get_info (self):
# If the manufacturer/model is not known, or useless (in the
# case of the hpfax backend or a dnssd URI pointing to a remote
# CUPS queue), show the device-info field instead.
if (self.devices[0].uri.startswith('ipp:') and \
self.devices[0].uri.find('/printers/') != -1) or \
((self.devices[0].uri.startswith('dnssd:') or \
self.devices[0].uri.startswith('mdns:')) and \
self.devices[0].uri.endswith('/cups')):
if not self.dnssd_hostname:
info = "%s" % self._network_host
elif not self._network_host or self._network_host.find(":") != -1:
info = "%s" % self.dnssd_hostname
else:
if self._network_host != self.dnssd_hostname:
info = "%s (%s)" % (self.dnssd_hostname, self._network_host)
else:
info = "%s" % self._network_host
elif self.mfg == '' or \
(self.mfg == "HP" and self.mdl.startswith("Fax")):
info = self._ppdippstr.get (self.devices[0].info)
else:
info = "%s %s" % (self.mfg, self.mdl)
if ((self._network_host and len (self._network_host) > 0) or \
(self.dnssd_hostname and len (self.dnssd_hostname) > 0)) and not \
((self.devices[0].uri.startswith('dnssd:') or \
self.devices[0].uri.startswith('mdns:')) and \
self.devices[0].uri.endswith('/cups')) and \
(not self._network_host or \
info.find(self._network_host) == -1) and \
(not self.dnssd_hostname or \
info.find(self.dnssd_hostname) == -1):
if not self.dnssd_hostname:
info += " (%s)" % self._network_host
elif not self._network_host:
info += " (%s)" % self.dnssd_hostname
else:
info += " (%s, %s)" % (self.dnssd_hostname, self._network_host)
elif len (self.sn) > 0:
info += " (%s)" % self.sn
return info
# User data
def set_data (self, key, value):
self._user_data[key] = value
def get_data (self, key):
return self._user_data.get (key)
def __str__ (self):
return "(description: %s)" % self.__repr__ ()
def __repr__ (self):
return "<PhysicalDevice.PhysicalDevice (%s,%s,%s)>" % (self.mfg,
self.mdl,
self.sn)
def __cmp__(self, other):
if other == None or type (other) != type (self):
return 1
if (self._network_host != None or
other._network_host != None):
return cmp (self._network_host, other._network_host)
devs = other.get_devices()
if devs:
uris = map (lambda x: x.uri, self.devices)
for dev in devs:
if dev.uri in uris:
# URI match
return 0
if (other.mfg == '' and other.mdl == '') or \
(self.mfg == '' and self.mdl == ''):
# One or other is just a backend, not a real physical device.
if other.mfg == '' and other.mdl == '' and \
self.mfg == '' and self.mdl == '':
return cmp (self.devices[0], other.devices[0])
if other.mfg == '' and other.mdl == '':
return -1
return 1
if self.mfg == '' or self.mdl.lower ().startswith (self.mfg.lower ()):
our_make_and_model = self.mdl
else:
our_make_and_model = "%s %s" % (self.mfg, self.mdl)
(our_mfg, our_mdl) = \
cupshelpers.ppds.ppdMakeModelSplit (our_make_and_model)
if other.mfg == '' or \
other.mdl.lower ().startswith (other.mfg.lower ()):
other_make_and_model = other.mdl
else:
other_make_and_model = "%s %s" % (other.mfg, other.mdl)
(other_mfg, other_mdl) = \
cupshelpers.ppds.ppdMakeModelSplit (other_make_and_model)
mfgcmp = cmp (our_mfg.lower (), other_mfg.lower ())
if mfgcmp != 0:
return mfgcmp
mdlcmp = cmp (our_mdl.lower (), other_mdl.lower ())
if mdlcmp != 0:
return mdlcmp
if self.sn == '' or other.sn == '':
return 0
return cmp (self.sn, other.sn)
if __name__ == '__main__':
import authconn
c = authconn.Connection ()
devices = cupshelpers.getDevices (c)
physicaldevices = []
for device in devices.values ():
physicaldevice = PhysicalDevice (device)
try:
i = physicaldevices.index (physicaldevice)
physicaldevices[i].add_device (device)
except ValueError:
physicaldevices.append (physicaldevice)
physicaldevices.sort ()
for physicaldevice in physicaldevices:
print physicaldevice.get_info ()
devices = physicaldevice.get_devices ()
for device in devices:
print " ", device
| gpl-2.0 | 6,027,811,522,915,123,000 | 37.513423 | 82 | 0.51442 | false |
timestocome/Test-stock-prediction-algorithms | StockMarketTimeSeriesAnomalies/FindPatterns.py | 1 | 4161 | # http://github.com/timestocome
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# look for signature patterns before very high or low return days
# meh, the month before tends to have more extreme returns
# and more of them are lower than average, (like an earthquake?)
# but there isn't a clear signature pattern
# read in nasdaq
data = pd.read_csv('data/nasdaq.csv', parse_dates=True, index_col='Date')
# reverse dates
#data = data.iloc[::-1]
# keep only opening price
data = data[['Open']]
data['Open'] = pd.to_numeric(data['Open'], errors='coerce')
# log of price is used in most stock market calculations
data['LogOpen'] = np.log(data[['Open']])
# index change from prior day
data['dx'] = data['LogOpen'] - data['LogOpen'].shift(1)
# check every thing looks okay
# print(data)
# look for patterns in the daily change
# redo dx as high gain 4, low gain 3, low loss 2, high loss 1
# use this to set the dividers between high and average return days
print(data.dx.mean(), data.dx.median(), data.dx.max(), data.dx.min())
def gains_losses(d):
if d >= 0.05: return 4
elif d > 0.00: return 3
elif d <= -0.05: return 1
else: return 2
data['GainLoss'] = data['dx'].apply(gains_losses)
print("Count of days with good, bad and average returns: \n", data['GainLoss'].value_counts())
# what happens in the n trading days before a very good or bad day?
n_trading_days = 21 # 5/week, 21/month, 63/quarter, 252/year
# add a row count column to make it easier to fetch data slices
i = np.arange(1, len(data) + 1)
data['i'] = i
# set up storage
lowReturns = []
highReturns = []
slightlyLowReturns = []
slightlyHighReturns = []
for idx, row in data.iterrows():
if row.i > n_trading_days:
start = int(row.i - n_trading_days)
end = int(row.i)
pattern = np.asarray(data.iloc[start:end, :]['dx'].values)
if row.GainLoss == 1: # very bad day
lowReturns.append(pattern)
if row.GainLoss == 2:
slightlyLowReturns.append(pattern)
if row.GainLoss == 3:
slightlyHighReturns.append(pattern)
if row.GainLoss == 4: # very good day
highReturns.append(pattern)
# create np array columns = n_trading_days before high return day
high_returns = np.array(highReturns)
low_returns = np.array(lowReturns)
slightly_low_returns = np.array(slightlyLowReturns)
slightly_high_returns = np.array(slightlyHighReturns)
print(high_returns.shape)
print(slightly_high_returns.shape)
print(slightly_low_returns.shape)
print(low_returns.shape)
high_avg = high_returns.mean(axis=0)
low_avg = low_returns.mean(axis=0)
slightlyHigh_avg = np.nanmean(slightly_high_returns, axis=0)
slightlyLow_avg = np.nanmean(slightly_low_returns, axis=0)
for i in range(n_trading_days):
print('%.5f, %.5f, %.5f, %.5f' %(high_avg[i], slightlyHigh_avg[i], slightlyLow_avg[i], low_avg[i]))
plt.figure(figsize=(16,12))
plt.title("21 day returns before a large gain/loss are mostly losses and larger than average")
plt.plot(high_avg, lw=3, label="very high returns")
plt.plot(slightlyHigh_avg, label="gains")
plt.plot(slightlyLow_avg, label="losses")
plt.plot(low_avg, lw=3, label='big losses')
plt.legend(loc='best')
plt.savefig("FindPatterns1.png")
plt.show()
# heat maps of best worst trading days
plt.figure(figsize=(16, 16))
plt.suptitle("Patterns before extreme high/low return trading days")
plt.subplot(121)
map_best = plt.imshow(high_returns, cmap=plt.cm.Spectral, interpolation='nearest')
plt.title("21 days leading to highest return days")
plt.subplot(122)
map_worst = plt.imshow(low_returns, cmap=plt.cm.Spectral, interpolation='nearest')
plt.title("21 days leading to lowest return days")
cbar = plt.colorbar(map_best)
cbar.ax.set_yticklabels(['High loss', '', 'low loss', '', 'low gain', '', 'High Gain', ''])
cbar.set_label("Colors")
plt.savefig("FindPatterns2.png")
plt.show()
'''
plt.figure(figsize=(18,14))
plt.plot(data['Open'], label='Nasdaq')
plt.plot(data['LogOpen'] * 100, label='Scaled Log')
plt.title("Nasdaq Composite Index")
plt.legend(loc='best')
plt.show()
''' | mit | -3,519,715,092,887,859,700 | 25.509554 | 103 | 0.685652 | false |
avatar29A/pyfuzzy | fuzzy/norm/DualOfHarmonicMean.py | 1 | 1256 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: DualOfHarmonicMean.py,v 1.7 2009-10-27 20:06:27 rliebscher Exp $"
from fuzzy.norm.Norm import Norm, product, sum
class DualOfHarmonicMean(Norm):
def __init__(self):
super(DualOfHarmonicMean, self).__init__(Norm.UNKNOWN) #XXX
def __call__(self, *args):
args = self.checkArgsN(args)
sum_ = sum(*args)
if sum_ == len(args):
return 1.0
product_ = product(*args)
count_ = float(len(args))
return (sum_-count_*product_)/(count_-sum_)
| mit | -4,267,947,875,288,818,700 | 34.885714 | 86 | 0.678344 | false |
PythonT/Crawler | XiuPaiPicMultiCrawler.py | 1 | 2724 | #!/usr/bin/env python
#coding:utf-8
import urllib
import urllib.request
from queue import Queue
import time,re,threading
head = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
lock = threading.Lock()# lock to serialize console output
def do_work(item):
time.sleep(.1) # pretend to do some lengthy work.
# Make sure the whole print completes or threads can mix up output in one line.
with lock:
print(threading.current_thread().name,item)
try:
image=item
link = (image.split('"'))[1].split('alt')[0]#get pic link
name = str(image.split(' ')[2]).split('"')[1]#get pic link
imgType = link.split('.')[-1]
print(str(imgType) + ':' +name +":"+ str(link))
try:
with urllib.request.urlopen(link,None,timeout=10) as url :
write_file(url.read(), './pic3/%s.%s'%(name,imgType))
except Exception as e:print(e)
except Exception as e:print(e)
def write_file(content,filePath):
fil = open(filePath,'wb')
fil.write(content)
fil.close()
# The worker thread pulls an item from the queue and processes it
def worker():
while True:
item = q.get()
print('queue get ' + str(item))
do_work(item)
q.task_done()
def touchImages():
url='http://www.qiushibaike.com/imgrank'
req=urllib.request.Request(url,headers=head)
res=urllib.request.urlopen(req)
html=res.read().decode('utf8')
#rule=re.compile('<img src="(.\\?)" alt="(.\\?)" />')
rule = re.compile('<img[^>]*>')
return rule.findall(html)
# Create the queue and thread pool.
q = Queue()
for i in range(4):
t = threading.Thread(target=worker)
t.daemon = True # thread dies when main thread (only non-daemon thread) exits.
t.start()
# stuff work items on the queue (in this case, just a number).
start = time.perf_counter()
images = [] + touchImages()
for item in images:
q.put(item)
q.join()# block until all tasks are done
# "Work" took .1 seconds per task.
# 20 tasks serially would be 2 seconds.
# With 4 threads should be about .5 seconds (contrived because non-CPU intensive "work")
print('time:',time.perf_counter() - start)
| apache-2.0 | -7,517,515,320,149,850,000 | 35.32 | 232 | 0.520558 | false |
mgraupe/acq4 | acq4/devices/Laser/LaserTaskGui.py | 1 | 11574 | from PyQt4 import QtGui, QtCore
from acq4.pyqtgraph import PlotWidget
from acq4.devices.DAQGeneric import DAQGenericTaskGui
from acq4.util.SequenceRunner import runSequence
from acq4.pyqtgraph.functions import siFormat
from acq4.pyqtgraph.WidgetGroup import WidgetGroup
import taskTemplate
from acq4.util.HelpfulException import HelpfulException
#from FeedbackButton import FeedbackButton
class LaserTaskGui(DAQGenericTaskGui):
def __init__(self, dev, taskRunner):
DAQGenericTaskGui.__init__(self, dev, taskRunner, ownUi=False)
self.ui = taskTemplate.Ui_Form()
self.taskR = taskRunner
self.cache = {}
self.layout = QtGui.QGridLayout()
self.layout.setContentsMargins(0,0,0,0)
self.setLayout(self.layout)
self.splitter1 = QtGui.QSplitter()
self.splitter1.setOrientation(QtCore.Qt.Horizontal)
self.layout.addWidget(self.splitter1)
self.ctrlLayout = QtGui.QVBoxLayout()
wid1 = QtGui.QWidget()
wid1.setLayout(self.ctrlLayout)
self.plotSplitter = QtGui.QSplitter()
self.plotSplitter.setOrientation(QtCore.Qt.Vertical)
self.splitter1.addWidget(wid1)
self.splitter1.addWidget(self.plotSplitter)
#wid = QtGui.QWidget()
#hLayout = QtGui.QHBoxLayout()
#wid.setLayout(hLayout)
#self.ctrlLayout.addLayout(hLayout)
wid2 = QtGui.QWidget()
self.ui.setupUi(wid2)
self.ctrlLayout.addWidget(wid2)
if not self.dev.hasPowerIndicator:
self.ui.checkPowerBtn.setEnabled(False)
self.ui.checkPowerCheck.hide()
self.ui.checkPowerCheck.setChecked(False)
if not self.dev.hasTunableWavelength:
self.ui.wavelengthWidget.hide()
self.powerWidget, self.powerPlot = self.createChannelWidget('power', daqName=self.dev.getDAQName()[0])
## all we want is the function generator
self.powerFnGenerator = self.powerWidget.ui.waveGeneratorWidget
self.powerWidget.hide()
self.ctrlLayout.addWidget(self.powerFnGenerator)
self.powerFnGenerator.show()
self.plotSplitter.addWidget(self.powerPlot)
self.powerWidget.setMeta('y', units='W', siPrefix=True, dec=True, step=0.5, minStep=1e-3, limits=(0, None))
self.powerWidget.setMeta('xy', units='J', siPrefix=True, dec=True, step=0.5, minStep=1e-6, limits=(0, None))
self.powerWidget.setMeta('x', units='s', siPrefix=True, dec=True, step=0.5, minStep=1e-6, limits=(None, None))
if self.dev.hasTriggerableShutter:
#(self.shutterWidget, self.shutterPlot) = self.createChannelWidget('shutter')
self.shutterPlot = PlotWidget(name='%s.shutter'%self.dev.name)
self.shutterPlot.setLabel('left', text='Shutter')
self.plotSplitter.addWidget(self.shutterPlot)
#self.shutterPlot.hide()
if self.dev.hasQSwitch:
#self.qSwitchWidget, self.qSwitchPlot = self.createChannelWidget('qSwitch')
self.qSwitchPlot = PlotWidget(name='%s.qSwitch'%self.dev.name)
self.qSwitchPlot.setLabel('left', text='Q-Switch')
self.plotSplitter.addWidget(self.qSwitchPlot)
#self.qSwitchPlot.hide()
if self.dev.hasPCell:
#self.pCellWidget, self.pCellPlot = self.createChannelWidget('pCell')
self.pCellPlot = PlotWidget(name='%s.pCell'%self.dev.name)
self.pCellPlot.setLabel('left', text='Pockel Cell', units='V')
self.plotSplitter.addWidget(self.pCellPlot)
#self.pCellPlot.hide()
## catch self.powerWidget.sigDataChanged and connect it to functions that calculate and plot raw shutter and qswitch traces
self.powerWidget.sigDataChanged.connect(self.powerCmdChanged)
self.ui.checkPowerBtn.clicked.connect(self.dev.outputPower)
self.dev.sigOutputPowerChanged.connect(self.laserPowerChanged)
self.dev.sigSamplePowerChanged.connect(self.samplePowerChanged)
self.stateGroup = WidgetGroup([
(self.splitter1, 'splitter1'),
(self.plotSplitter,'plotSplitter'),
])
self.dev.outputPower()
def laserPowerChanged(self, power, valid):
#samplePower = self.dev.samplePower(power) ## we should get another signal for this later..
#samplePower = power*self.dev.getParam('scopeTransmission')
## update label
if power is None:
self.ui.outputPowerLabel.setText("?")
else:
self.ui.outputPowerLabel.setText(siFormat(power, suffix='W'))
if not valid:
self.ui.outputPowerLabel.setStyleSheet("QLabel {color: #B00}")
else:
self.ui.outputPowerLabel.setStyleSheet("QLabel {color: #000}")
def samplePowerChanged(self, power):
if power is None:
self.ui.samplePowerLabel.setText("?")
return
else:
self.ui.samplePowerLabel.setText(siFormat(power, suffix='W'))
if self.dev.hasPCell:
raise Exception('stub')
else:
## adjust length of pulse to correct for new power
if self.ui.adjustLengthCheck.isChecked():
en = {}
for param in self.powerWidget.ui.waveGeneratorWidget.stimParams:
en[param.name()] = param['sum']
self.powerWidget.setMeta('y', value=power, readonly=True)
for param in self.powerWidget.ui.waveGeneratorWidget.stimParams:
param['sum'] = en[param.name()]
else:
self.powerWidget.setMeta('y', value=power, readonly=True)
def saveState(self):
"""Return a dictionary representing the current state of the widget."""
state = {}
state['daqState'] = DAQGenericTaskGui.saveState(self)
return state
def restoreState(self, state):
"""Restore the state of the widget from a dictionary previously generated using saveState"""
return DAQGenericTaskGui.restoreState(self, state['daqState'])
def describe(self, params=None):
state = self.saveState()
ps = state['daqState']['channels']['power']
desc = {'mode': 'power', 'command': ps['waveGeneratorWidget']}
return desc
def prepareTaskStart(self):
## check power before starting task.
if self.ui.checkPowerCheck.isChecked():
power = self.dev.outputPower() ## request current power from laser
valid = self.dev.checkPowerValidity(power)
if power is None:
raise HelpfulException("The current laser power for '%s' is unknown." % self.dev.name)
if not valid:
powerStr = siFormat(power, suffix='W')
raise HelpfulException("The current laser power for '%s' (%s) is outside the expected range." % (self.dev.name(), powerStr))
def generateTask(self, params=None):
"""Return a cmd dictionary suitable for passing to LaserTask."""
## Params looks like: {'amp': 7} where 'amp' is the name of a sequence parameter, and 7 is the 7th value in the list of 'amp'
for k,v in params.items():
if k.startswith('power.'):
del params[k]
params[k[6:]] = v
rate = self.powerWidget.rate
wave = self.powerWidget.getSingleWave(params)
rawCmds = self.getChannelCmds(wave, rate)
#rawCmds = self.cache.get(id(wave), self.dev.getChannelCmds({'powerWaveform':wave}, rate)) ## returns {'shutter': array(...), 'qSwitch':array(..), 'pCell':array(...)}
### structure task in DAQGeneric-compatible way
cmd = {}
for k in rawCmds:
cmd[k] = {}
cmd[k]['command'] = rawCmds[k]
cmd['powerWaveform'] = wave ## just to allow the device task to store this data
cmd['ignorePowerWaveform'] = True
#print cmd
return cmd
def sequenceAborted(self):
#print 'sequence aborted'
if self.ui.releaseAfterSequence.isChecked():
self.dev.closeShutter()
def taskSequenceStarted(self):
#print 'task sequence started'
if self.ui.releaseAfterSequence.isChecked():
self.dev.openShutter()
def taskFinished(self):
#print 'task finished'
if not self.taskR.loopEnabled:
self.dev.closeShutter()
def getChannelCmds(self, powerWave, rate):
key = id(powerWave)
# force update of rawCmds
#if key in self.cache:
# rawCmds = self.cache[key]
#else:
rawCmds = self.dev.getChannelCmds({'powerWaveform':powerWave}, rate) ## returns {'shutter': array(...), 'qSwitch':array(..), 'pCell':array(...)}
if self.ui.releaseAfterSequence.isChecked():
#print 'shutter set to 1.'
rawCmds['shutter'][:] = 1.
#self.dev.setChanHolding('shutter',False)
self.cache[key] = rawCmds
return rawCmds
def powerCmdChanged(self):
self.clearRawPlots()
self.cache = {}
rate = self.powerWidget.rate
#### calculate, cache and display sequence waves for shutter/qSwitch/pCell
params = {}
ps = self.powerWidget.listSequence()
for k in ps:
params[k] = range(len(ps[k]))
## get power waveforms
waves = []
runSequence(lambda p: waves.append(self.powerWidget.getSingleWave(p)), params, params.keys()) ## appends waveforms for the entire parameter space to waves
for w in waves:
if w is not None:
## need to translate w into raw traces, plot them, and cache them (using id(w) as a key)
rawWaves = self.getChannelCmds(w, rate)
#rawWaves = self.dev.getChannelCmds({'powerWaveform':w}, rate) ## calculate raw waveforms for shutter/qSwitch/pCell from powerWaveform
#self.cache[id(w)] = rawWaves ## cache the calculated waveforms
self.plotRawCurves(rawWaves, color=QtGui.QColor(100, 100, 100)) ## plot the raw waveform in it's appropriate plot in grey
## calculate (or pull from cache) and display single-mode wave in red
single = self.powerWidget.getSingleWave()
if single is not None:
#rawSingle = self.cache.get(id(single), self.dev.getChannelCmds({'powerWaveform':single}, rate))
rawSingle = self.getChannelCmds(single, rate)
self.plotRawCurves(rawSingle, color=QtGui.QColor(200, 100, 100))
def plotRawCurves(self, data, color=QtGui.QColor(100, 100, 100)):
if 'shutter' in data:
self.shutterPlot.plot(y=data['shutter'], x=self.powerWidget.timeVals, pen=QtGui.QPen(color))
if 'qSwitch' in data:
self.qSwitchPlot.plot(y=data['qSwitch'], x=self.powerWidget.timeVals, pen=QtGui.QPen(color))
if 'pCell' in data:
self.pCellPlot.plot(y=data['pCell'], x=self.powerWidget.timeVals, pen=QtGui.QPen(color))
def clearRawPlots(self):
for p in ['shutterPlot', 'qSwitchPlot', 'pCellPlot']:
if hasattr(self, p):
getattr(self, p).clear()
def quit(self):
self.dev.lastResult = None
DAQGenericTaskGui.quit(self) | mit | 6,053,857,047,404,167,000 | 43.179389 | 174 | 0.614654 | false |
momijiame/jpgrep | jpgrep/cmd/jpgrep_cmd.py | 1 | 2779 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from concurrent import futures
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import ThreadPoolExecutor
import click
from future.utils import text_type
from jpgrep.util import get_encoding
from jpgrep.util import filepathes
from jpgrep.util import FileObjectWrapper
from jpgrep.morpheme import StreamDetector
def _open(filepath):
file_ = open(filepath, mode='rb')
wrapper = FileObjectWrapper(file_)
return wrapper
def _executor(targets):
for target in targets:
if target.name.find('<std') == 0:
# プロセスが異なると標準入出力が異なってしまうので
return ThreadPoolExecutor()
return ProcessPoolExecutor()
@click.command()
@click.option('-v', '--inverse', type=bool, is_flag=True)
@click.argument('query', type=text_type, nargs=1)
@click.argument('files', nargs=-1)
def cmd(inverse, query, files):
targets = []
# 処理対象のファイルがない場合は標準入力を検索対象にする
if len(files) < 1:
targets = [FileObjectWrapper(sys.stdin)]
# 処理対象のファイルがある場合はバイナリモードでオープンする
for file_ in files:
relpathes = filepathes(file_)
file_objects = [_open(relpath) for relpath in relpathes]
targets.extend(file_objects)
executor = _executor(targets)
with executor as e:
mappings = dict((e.submit(_dispatch, target, query, inverse),
target)
for target in targets)
for future in futures.as_completed(mappings):
target = mappings[future]
troves = future.result()
for trove in troves:
_print(target.name, trove.line)
def _print(name, line):
if _is_stdio(name):
print(line)
else:
path = os.path.relpath(name)
msg = u'{path}:{line}'.format(path=path, line=line)
print(msg)
def _is_stdio(name):
return name.find('<std') == 0
def _dispatch(target, query, inverse):
detector = StreamDetector(query, inverse)
with target.file as file_:
binary = file_.read()
encoding = get_encoding(binary)
if encoding is None:
# エンコードが不明 (おそらくバイナリファイルだった)
return []
text = binary.decode(encoding)
# XXX: ファイルの改行コードも検出すべきだろうか?
lines = text.split(os.linesep)
troves = []
for line in lines:
trove = detector.feed(line)
if trove is None:
continue
troves.append(trove)
return troves
def main():
cmd()
if __name__ == '__main__':
main()
| apache-2.0 | 1,412,952,036,770,468,900 | 22.654206 | 69 | 0.630581 | false |
kasun/YapDi | examples/basic.py | 1 | 2303 | # !/usr/bin/env python
''' YapDi Example - Demonstrate basic YapDi functionality.
Author - Kasun Herath <[email protected]>
USAGE - python basic.py start|stop|restart
python basic.py start would execute count() in daemon mode
if there is no instance already running.
count() prints a counting number to syslog. To view output of
count() execute a follow tail to syslog file. Most probably
tail -f /var/log/syslog under linux and tail -f /var/log/messages
under BSD.
python basic.py stop would kill any running instance.
python basic.py restart would kill any running instance; and
start an instance. '''
import sys
import syslog
import time
import yapdi
COMMAND_START = 'start'
COMMAND_STOP = 'stop'
COMMAND_RESTART = 'restart'
def usage():
print("USAGE: python %s %s|%s|%s" % (sys.argv[0], COMMAND_START, COMMAND_STOP, COMMAND_RESTART))
# Invalid executions
if len(sys.argv) < 2 or sys.argv[1] not in [COMMAND_START, COMMAND_STOP, COMMAND_RESTART]:
usage()
exit()
def count():
''' Outputs a counting value to syslog. Sleeps for 1 second between counts '''
i = 0
while 1:
syslog.openlog("yapdi-example.info", 0, syslog.LOG_USER)
syslog.syslog(syslog.LOG_NOTICE, 'Counting %s' % (i))
i += 1
time.sleep(1)
if sys.argv[1] == COMMAND_START:
daemon = yapdi.Daemon()
# Check whether an instance is already running
if daemon.status():
print("An instance is already running.")
exit()
retcode = daemon.daemonize()
# Execute if daemonization was successful else exit
if retcode == yapdi.OPERATION_SUCCESSFUL:
count()
else:
print('Daemonization failed')
elif sys.argv[1] == COMMAND_STOP:
daemon = yapdi.Daemon()
# Check whether no instance is running
if not daemon.status():
print("No instance running.")
exit()
retcode = daemon.kill()
if retcode == yapdi.OPERATION_FAILED:
print('Trying to stop running instance failed')
elif sys.argv[1] == COMMAND_RESTART:
daemon = yapdi.Daemon()
retcode = daemon.restart()
# Execute if daemonization was successful else exit
if retcode == yapdi.OPERATION_SUCCESSFUL:
count()
else:
print('Daemonization failed')
| isc | 1,526,457,386,641,257,200 | 27.432099 | 100 | 0.660443 | false |
padraicc/Evolib | evolib/data/AlignmentObjects.py | 1 | 6446 | from evolib.tools.GeneralMethods import create_ids
from evolib.stats.StatObjects import IOstats
from evolib.generic.AlignmentSite import Site
from evolib.data.DataObjects import SeqTable, IOtable
from evolib.tools.DNAmethods import booleanDNA, booleanIO
from evolib.generic.GeneticSequence import DNAsequence
from evolib.tools.GeneralMethods import loopByColumn
from evolib.tools.DNAmethods import dna_to_amino, synNonsynProbs
class DnaPopulationData(IOstats):
"""
Class representation of DNA sequences from multiple
samples.
"""
def __init__(self, *args):
if len(args) == 1:
seqs, ids = self._from_sequence(args[0])
elif len(args) == 2:
seqs, ids = self._from_sequence(args[0], args[1])
else:
raise TypeError, "Wrong number of arguments"
self._attach_data(seqs, ids)
def _from_sequence(self, seqs, ids = None):
if isinstance(seqs, list) is False:
raise TypeError, 'List expected.'
n = len(seqs)
if isinstance(ids, list) is False:
if ids is None:
ids = create_ids(n, "seq")
else:
raise TypeError, 'List expected.'
return seqs, ids
def _attach_data(self, sequences, ids):
self.DNAdata = SeqTable(sequences, ids)
self.IOdata = self._get_IOdata(self.DNAdata)
######
def __len__(self):
return len(self.DNAdata)
######
def _get_IOdata(self, seqs):
self.validSites = 0
io = []
for site in seqs.iter_sites():
SiteClass = Site(site)
if SiteClass.has_missing_data():
pass
elif SiteClass.number_of_alleles() > 2:
pass
elif SiteClass.number_of_alleles() == 1:
self.validSites += 1
else:
self.validSites += 1
siteIO = booleanDNA(SiteClass.alleles())
io.append(siteIO)
IO = IOtable(io)
return IO
######
def iter_sites(self):
for site in self.DNAdata.iter_sites():
yield site
######
def ids(self):
return self.DNAdata.ids
def nsamples(self):
return self.__len__()
def sequences(self):
return self.DNAdata.sequences
def length(self):
return self.validSites
######
def index(self, key):
return self.DNAdata.index(key)
def pop(self, index = None):
seq, seqid = self.DNAdata.pop(index)
self.IOdata = self._get_IOdata(self.DNAdata)
return DNAsequence(seq, seqid)
######
def coding(self, refseq):
dna = ['A', 'T', 'G', 'C', 'a', 't', 'g', 'c']
nsam = self.nsamples()
inc = (i for i in xrange(len(refseq)) if refseq[i] in dna)
cds_seqs = ['' for j in xrange(nsam)]
for site in inc:
for ind in xrange(nsam):
cds_seqs[ind] += self.DNAdata.sequences[ind][site]
return type(self)(cds_seqs, self.DNAdata.ids)
######
def nonsyn(self, frame):
nsyn, nnon = 0, 0
nsam = len(self.DNAdata.sequences)
syn_seqs, nonsyn_seqs = ['' for n in range(nsam)], ['' for n in range(nsam)]
for codons in loopByColumn(self.DNAdata.sequences, start = frame, size = 3):
nmiss = sum([len(set(i) - set('ATGCatgc')) for i in codons])
if nmiss > 0:
# contains non-ATGC data
pass
elif len(codons[0]) != 3:
# number of bases in codon != 3
pass
else:
ucodons = list(set(codons))
nucodons = len(ucodons)
if nucodons > 2:
# > 1 segregating site in codon
pass
elif nucodons == 1:
# monomorphic site
nsp, nnp = synNonsynProbs(ucodons[0])
nsyn += 3 * nsp
nnon += 3 * nnp
else:
codon1, codon2 = ucodons[0], ucodons[1]
codon_count = [(codons.count(codon1), codon1), (codons.count(codon2), codon2)]
codon_count.sort(reverse = True)
major = codon_count[0][1]
nsp, nnp = synNonsynProbs(major)
nsyn += 3 * nsp
nnon += 3 * nnp
sindex = [i for i in range(len(codon1)) if codon1[i] != codon2[i]][0]
for s in range(len(codons)):
aa1, aa2 = dna_to_amino[ucodons[0]], dna_to_amino[ucodons[1]]
if aa1 == aa2:
syn_seqs[s] += codons[s][sindex]
else:
nonsyn_seqs[s] += codons[s][sindex]
SynClass = type(self)(syn_seqs, self.DNAdata.ids)
NonSynClass = type(self)(nonsyn_seqs, self.DNAdata.ids)
SynClass.validSites = nsyn
NonSynClass.validSites = nnon
return SynClass, NonSynClass
###### ######
import random
class IOPopulationData(IOstats):
def __init__(self, seqs):
if isinstance(seqs, IOtable):
self.IOdata = seqs
else:
self.IOdata = self._get_IOdata(seqs)
def _get_IOdata(self, seqs):
io = []
if seqs != []:
for s in range(len(seqs[0])):
site = ''.join([f[s] for f in seqs[:]])
bio = booleanIO(site)
io.append(bio)
IO = IOtable(io)
return IO
def nsamples(self):
return len(self.IOdata[0])
def nonsyn_sample_sites(self, p):
n = len(self.IOdata)
one, two = [], []
for i in xrange(n):
rint = random.random()
if rint < p:
one.append(list(self.IOdata[i]))
else:
two.append(list(self.IOdata[i]))
table1 = IOtable(one)
table2 = IOtable(two)
OneClass, TwoClass = IOPopulationData(table1), IOPopulationData(table2)
return OneClass, TwoClass
| mit | -7,136,375,593,400,998,000 | 25.970711 | 98 | 0.489296 | false |
VigneshChennai/BlackPearl | lib/BlackPearl/application.py | 1 | 10798 | #!/usr/bin/env python
# This file is part of BlackPearl.
# BlackPearl is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# BlackPearl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with BlackPearl. If not, see <http://www.gnu.org/licenses/>.
import traceback
import json
import cgi
import pickle
import inspect
import os
import base64
import logging
from BlackPearl import testing
from BlackPearl.core import sessions
from BlackPearl.core import exceptions
from BlackPearl.core import utils
from BlackPearl.core.exceptions import RequestInvalid, UnSuccessfulException
logger = logging.getLogger(__name__)
webapp = None
def invoke_preprocessors(urlpath, session):
try:
# Executing all the preprocessors defined and configured in webapp
url = urlpath.replace(webapp.url_prefix, "")
for preprocessor in webapp.preprocessors:
preprocessor['func'](session, url)
return None
except exceptions.RequestCannotBeProcessed as e:
rets = {
"status": -101,
"desc": str(e)
}
except exceptions.UnAuthorizedAccess as e:
rets = {
"status": -102,
"desc": str(e)
}
except Exception as e:
rets = {
"status": -103,
"desc": "Exception occurred in Preprocessor. Error: <%s>" % str(e)
}
return rets
def invoke_posthandlers(urlpath, session, rets):
# Invoking the post handler defined and configured in webapp
try:
for posthandler in webapp.posthandlers:
posthandler['func'](session, urlpath, rets)
return None
except Exception as e:
error = {
"status": -301,
"desc": "Exception occurred in posthandler. Error: <%s>" % e
}
return error
def handle_request(module, session, parameter):
"""This function handles the user request"""
func = module['func']
signature = module['signature']
try:
parameter = utils.validate_parameter(signature, parameter)
except Exception as e:
raise ParametersInvalid(str(e)) from None
return func(session, parameter)
def return_to_client(start_response, headers, session, data):
status = "200 ok"
# serializing the python object return from handler to JSON.
try:
json_rets = json.dumps(data)
except:
start_response(status, headers)
rets = {
"status": -401,
"desc": "Error in serializing the return data from module. Return value <%s>" % (str(data))
}
yield json.dumps(rets).encode('UTF-8')
else:
# Encrypting the session object
sess_value = sessions.encode_session(session).decode('utf-8') + "; Path=/"
# Most browser supports only around 400O bytes in the cookie.
# So, it is always good practise to have less value in the cookie
# restricting the session object size to 4000
if len(sess_value) > 4000:
start_response(status, headers)
rets = {"status": -501,
"desc": "Session object should be less than 4000 bytes in size. "
"Currently the session object size is <%s> bytes" % (len(sess_value))}
yield json.dumps(rets).encode('UTF-8')
else:
# Once everything went well, we are sending the result to the client
headers.append(('Set-Cookie', "session=%s" % sess_value))
start_response(status, headers)
yield json_rets.encode('UTF-8')
class ParametersInvalid(Exception):
"""This exception should be raised when invalid parameters are received"""
pass
def __application__(environ, start_response):
headers = [('Content-Type', "text/plain")]
try:
# Request method (POST, GET .. etc)
method = environ['REQUEST_METHOD']
urlpath = environ['PATH_INFO']
# Parsing the input values.
# The FieldStorage will handle all methods and file upload as well.
form_values = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
# Restricting the access method only to GET and POST
if not (method == 'GET' or method == 'POST'):
status = '405 Method Not Allowed'
start_response(status, headers + [('Allow', ('POST', 'GET'))])
yield str("Method<%s> is not allowed" % method).encode('UTF-8')
else:
try:
module = webapp.webmodules[urlpath]
except:
status = '404 Requested URL not found'
start_response(status, headers)
yield str("Requested URL not found : %s" % (urlpath)).encode('utf-8')
else:
# Parse/Initialize session object.
session = sessions.parse_session(environ=environ)
headers = [('Content-Type', "text/json")]
error = invoke_preprocessors(urlpath=urlpath, session=session)
if error:
for i in return_to_client(start_response=start_response, headers=headers,
session=session, data=error):
yield i
else:
# Invoking the request handler for this the URL
try:
output = handle_request(module=module, session=session, parameter=form_values)
except ParametersInvalid as e:
rets = {
"status": -201,
"desc": str(e)
}
for i in return_to_client(start_response=start_response, headers=headers,
session=session, data=rets):
yield i
except RequestInvalid as ri:
rets = {
"status": -202,
"desc": str(ri)
}
for i in return_to_client(start_response=start_response, headers=headers,
session=session, data=rets):
yield i
except UnSuccessfulException as e:
rets = {
"status": -203,
"desc": e.desc,
"data": e.data
}
for i in return_to_client(start_response=start_response, headers=headers,
session=session, data=rets):
yield i
except Exception:
error = traceback.format_exc()
rets = {
"status": -299,
"desc": error
}
for i in return_to_client(start_response=start_response, headers=headers,
session=session, data=rets):
yield i
else:
if inspect.isgenerator(output):
status = "200 ok"
headers = []
try:
remaining = None
for data_segment in output:
if type(data_segment) == tuple:
headers.append(data_segment)
else:
remaining = data_segment
except:
status = "500 Internal server error. Check the server logs"
logger.error("Error occurred while setting header for file output.")
logger.error("ERROR:", traceback.format_exc())
start_response(status, [])
yield traceback.format_exc().encode('UTF-8')
else:
try:
start_response(status, headers)
yield remaining
for data_segment in output:
yield data_segment
except:
logger.error("Error occurred while returning file output.")
logger.error("ERROR:", traceback.format_exc())
else:
rets = {
"status": 0,
"data": output
}
error = invoke_posthandlers(urlpath=urlpath, session=session, rets=rets)
if error:
rets = error
for i in return_to_client(start_response=start_response, headers=headers,
session=session, data=rets):
yield i
except:
error = traceback.format_exc()
status = '200 ok'
start_response(status, headers)
rets = {
"status": -1,
"desc": error
}
yield json.dumps(rets).encode('utf-8')
def initialize():
global webapp, BLOCK_SIZE, AES_KEY
# initializing the webapps from the pickled file.
sessions.BLOCK_SIZE = int(os.environ['BLACKPEARL_ENCRYPT_BLOCK_SIZE'])
sessions.AES_KEY = base64.b64decode(os.environ['BLACKPEARL_ENCRYPT_KEY'])
testing.listen = os.environ['BLACKPEARL_LISTEN']
pickle_file = os.environ['BLACKPEARL_PICKLE_FILE']
pfile = open("%s" % pickle_file, "rb")
with pfile:
webapp = pickle.load(pfile)
# We are generating signature object during initialization because, signature
# object is not picklable
for webmodule in webapp.webmodules.values():
webmodule["signature"] = inspect.signature(webmodule["handler"])
# This "application" is called for every request by the app_server (uwsgi)
application = __application__
| gpl-3.0 | -4,571,076,748,584,257,000 | 39.441948 | 103 | 0.514262 | false |
faneshion/MatchZoo | matchzoo/metrics/precision.py | 1 | 1787 | """Precision for ranking."""
import numpy as np
from matchzoo.engine.base_metric import BaseMetric, sort_and_couple
class Precision(BaseMetric):
"""Precision metric."""
ALIAS = 'precision'
def __init__(self, k: int = 1, threshold: float = 0.):
"""
:class:`PrecisionMetric` constructor.
:param k: Number of results to consider.
:param threshold: the label threshold of relevance degree.
"""
self._k = k
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS}@{self._k}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate precision@k.
Example:
>>> y_true = [0, 0, 0, 1]
>>> y_pred = [0.2, 0.4, 0.3, 0.1]
>>> Precision(k=1)(y_true, y_pred)
0.0
>>> Precision(k=2)(y_true, y_pred)
0.0
>>> Precision(k=4)(y_true, y_pred)
0.25
>>> Precision(k=5)(y_true, y_pred)
0.2
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Precision @ k
:raises: ValueError: len(r) must be >= k.
"""
if self._k <= 0:
raise ValueError(f"k must be greater than 0."
f"{self._k} received.")
coupled_pair = sort_and_couple(y_true, y_pred)
precision = 0.0
for idx, (label, score) in enumerate(coupled_pair):
if idx >= self._k:
break
if label > self._threshold:
precision += 1.
return precision / self._k
| apache-2.0 | -571,538,120,318,125,900 | 30.350877 | 68 | 0.51427 | false |
willcassella/WillowEngine | Scripts/Editor/MeshExporter.py | 1 | 1121 | import bpy
import bmesh
import struct
import array
# Target file to save to
target = "C:/Users/Will/Source/WillowEngine/bin/Content/Maps/Sactuary.wmesh"
# Get the active object
active = bpy.context.object
active.update_from_editmode()
# Copy and triangulify it's mesh data
mesh = bmesh.new()
mesh.from_mesh(active.data)
bmesh.ops.triangulate(mesh, faces=mesh.faces)
uvLayer = mesh.loops.layers.uv.active
if uvLayer is None:
print("No UV data!")
# Create float arrays for vertices and elements
vertices = array.array('f')
#elements = array.array('I')
for face in mesh.faces:
for loop in face.loops:
for item in loop.vert.co: # Position
vertices.append(item)
for item in loop[uvLayer].uv: # uv
vertices.append(item)
for item in loop.vert.normal: # normal
vertices.append(item)
# Open a file to write to
with open(target, 'wb') as file:
# Write out vertices
file.write(struct.pack('I', len(vertices) // 8))
vertices.tofile(file)
# Write out elements
#file.write(struct.pack('I', len(elements)))
#elements.tofile(file) | mit | -6,927,467,421,728,766,000 | 25.093023 | 76 | 0.67975 | false |
open-synergy/opnsynid-l10n-indonesia | l10n_id_djbc_kite_lap_pemakaian_bahan_baku/reports/djbc_kite_lap_pemakaian_bahan_baku_subkon.py | 1 | 4109 | # -*- coding: utf-8 -*-
# Copyright 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields
from openerp import tools
class LapKitePemakaianBahanBakuSubkon(models.Model):
_name = "l10n_id.lap_kite_pemakaian_bahan_baku_subkon"
_description = "Laporan Pemakaian Bahan Baku Subkontrak KITE"
_auto = False
no_pengeluaran = fields.Many2one(
string="Nomor Pengeluaran",
comodel_name="stock.picking",
)
tgl_pengeluaran = fields.Datetime(
string="Tanggal Pengeluaran",
)
kode_barang = fields.Char(
string="Kode Barang",
)
nama_barang = fields.Char(
string="Nama Barang",
)
satuan = fields.Many2one(
string="Satuan",
comodel_name="product.uom",
)
jumlah_digunakan = fields.Float(
string="Jumlah Digunakan",
)
jumlah_disubkontrakkan = fields.Float(
string="Jumlah Disubkontrakan",
)
penerima_subkontrak = fields.Many2one(
string="Penerima Subkontrak",
comodel_name="res.partner",
)
gudang = fields.Many2one(
string="Gudang",
comodel_name="stock.warehouse"
)
def _get_movement_type(self, cr):
query = """
SELECT res_id
FROM ir_model_data
WHERE
module = 'l10n_id_djbc_kite_lap_pemakaian_bahan_baku' AND
name = 'djbc_kite_movement_type_pemakaian_bahan_baku_subkontrak'
"""
cr.execute(query)
movement_type = cr.fetchone()
if movement_type:
return movement_type
else:
return 0
def _select(self):
select_str = """
SELECT a.id as id,
a.picking_id AS no_pengeluaran,
a.date AS tgl_pengeluaran,
c.name AS nama_barang,
b.default_code AS kode_barang,
a.product_uom AS satuan,
0.0 AS jumlah_digunakan,
a.product_uom_qty AS jumlah_disubkontrakkan,
e.partner_id AS penerima_subkontrak,
d.warehouse_id AS gudang
"""
return select_str
def _from(self):
from_str = """
FROM stock_move AS a
"""
return from_str
def _join(self):
join_str = """
JOIN product_product AS b ON a.product_id = b.id
JOIN product_template AS c ON b.product_tmpl_id = c.id
JOIN stock_picking_type AS d ON a.picking_type_id = d.id
LEFT JOIN stock_picking AS e ON a.picking_id = e.id
JOIN product_categ_rel AS f ON
c.id = f.product_id
JOIN product_category AS g ON
f.categ_id = g.id
JOIN (
SELECT res_id
FROM ir_model_data AS e1
WHERE
e1.module = 'l10n_id_djbc_kite_common' AND
(e1.name = 'product_categ_kite_bahan_baku')
) as h ON
g.id = h.res_id
"""
return join_str
def _where(self, movement_type_id):
where_str = """
WHERE
a.state = 'done' AND
a.djbc_custom IS TRUE AND
d.djbc_kite_scrap IS FALSE AND
d.djbc_kite_movement_type_id=%s
""" % (movement_type_id)
return where_str
def _order_by(self):
join_str = """
ORDER BY a.date, a.id
"""
return join_str
def init(self, cr):
tools.drop_view_if_exists(cr, self._table)
# pylint: disable=locally-disabled, sql-injection
movement_type_id =\
self._get_movement_type(cr)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
%s
%s
%s
%s
)""" % (
self._table,
self._select(),
self._from(),
self._join(),
self._where(movement_type_id),
self._order_by()
))
| agpl-3.0 | -4,611,849,417,877,237,000 | 28.992701 | 76 | 0.514724 | false |
ifding/ifding.github.io | stylegan2-ada/train.py | 1 | 26201 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Train a GAN using the techniques described in the paper
"Training Generative Adversarial Networks with Limited Data"."""
import os
import argparse
import json
import re
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from training import training_loop
from training import dataset
from metrics import metric_defaults
#----------------------------------------------------------------------------
class UserError(Exception):
pass
#----------------------------------------------------------------------------
def setup_training_options(
# General options (not included in desc).
gpus = None, # Number of GPUs: <int>, default = 1 gpu
snap = None, # Snapshot interval: <int>, default = 50 ticks
# Training dataset.
data = None, # Training dataset (required): <path>
res = None, # Override dataset resolution: <int>, default = highest available
mirror = None, # Augment dataset with x-flips: <bool>, default = False
# Metrics (not included in desc).
metrics = None, # List of metric names: [], ['fid50k_full'] (default), ...
metricdata = None, # Metric dataset (optional): <path>
# Base config.
cfg = None, # Base config: 'auto' (default), 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar', 'cifarbaseline'
gamma = None, # Override R1 gamma: <float>, default = depends on cfg
kimg = None, # Override training duration: <int>, default = depends on cfg
# Discriminator augmentation.
aug = None, # Augmentation mode: 'ada' (default), 'noaug', 'fixed', 'adarv'
p = None, # Specify p for 'fixed' (required): <float>
target = None, # Override ADA target for 'ada' and 'adarv': <float>, default = depends on aug
augpipe = None, # Augmentation pipeline: 'blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc' (default), ..., 'bgcfnc'
# Comparison methods.
cmethod = None, # Comparison method: 'nocmethod' (default), 'bcr', 'zcr', 'pagan', 'wgangp', 'auxrot', 'spectralnorm', 'shallowmap', 'adropout'
dcap = None, # Multiplier for discriminator capacity: <float>, default = 1
# Transfer learning.
resume = None, # Load previous network: 'noresume' (default), 'ffhq256', 'ffhq512', 'ffhq1024', 'celebahq256', 'lsundog256', <file>, <url>
freezed = None, # Freeze-D: <int>, default = 0 discriminator layers
):
# Initialize dicts.
args = dnnlib.EasyDict()
args.G_args = dnnlib.EasyDict(func_name='training.networks.G_main')
args.D_args = dnnlib.EasyDict(func_name='training.networks.D_main')
args.E_args = dnnlib.EasyDict(func_name='training.networks.E_main')
args.G_opt_args = dnnlib.EasyDict(beta1=0.0, beta2=0.99)
args.D_opt_args = dnnlib.EasyDict(beta1=0.0, beta2=0.99)
args.loss_args = dnnlib.EasyDict(func_name='training.loss.stylegan2')
args.augment_args = dnnlib.EasyDict(class_name='training.augment.AdaptiveAugment')
# ---------------------------
# General options: gpus, snap
# ---------------------------
if gpus is None:
gpus = 1
assert isinstance(gpus, int)
if not (gpus >= 1 and gpus & (gpus - 1) == 0):
raise UserError('--gpus must be a power of two')
args.num_gpus = gpus
if snap is None:
snap = 50
assert isinstance(snap, int)
if snap < 1:
raise UserError('--snap must be at least 1')
args.image_snapshot_ticks = snap
args.network_snapshot_ticks = snap
# -----------------------------------
# Training dataset: data, res, mirror
# -----------------------------------
assert data is not None
assert isinstance(data, str)
data_name = os.path.basename(os.path.abspath(data))
if not os.path.isdir(data) or len(data_name) == 0:
raise UserError('--data must point to a directory containing *.tfrecords')
desc = data_name
with tf.Graph().as_default(), tflib.create_session().as_default(): # pylint: disable=not-context-manager
args.train_dataset_args = dnnlib.EasyDict(path=data, max_label_size='full')
dataset_obj = dataset.load_dataset(**args.train_dataset_args) # try to load the data and see what comes out
args.train_dataset_args.resolution = dataset_obj.shape[-1] # be explicit about resolution
args.train_dataset_args.max_label_size = dataset_obj.label_size # be explicit about label size
validation_set_available = dataset_obj.has_validation_set
dataset_obj.close()
dataset_obj = None
if res is None:
res = args.train_dataset_args.resolution
else:
assert isinstance(res, int)
if not (res >= 4 and res & (res - 1) == 0):
raise UserError('--res must be a power of two and at least 4')
if res > args.train_dataset_args.resolution:
raise UserError(f'--res cannot exceed maximum available resolution in the dataset ({args.train_dataset_args.resolution})')
desc += f'-res{res:d}'
args.train_dataset_args.resolution = res
if mirror is None:
mirror = False
else:
assert isinstance(mirror, bool)
if mirror:
desc += '-mirror'
args.train_dataset_args.mirror_augment = mirror
# ----------------------------
# Metrics: metrics, metricdata
# ----------------------------
if metrics is None:
metrics = ['fid50k_full', 'clustering']
assert isinstance(metrics, list)
assert all(isinstance(metric, str) for metric in metrics)
args.metric_arg_list = []
for metric in metrics:
if metric not in metric_defaults.metric_defaults:
raise UserError('\n'.join(['--metrics can only contain the following values:', 'none'] + list(metric_defaults.metric_defaults.keys())))
args.metric_arg_list.append(metric_defaults.metric_defaults[metric])
args.metric_dataset_args = dnnlib.EasyDict(args.train_dataset_args)
if metricdata is not None:
assert isinstance(metricdata, str)
if not os.path.isdir(metricdata):
raise UserError('--metricdata must point to a directory containing *.tfrecords')
args.metric_dataset_args.path = metricdata
# -----------------------------
# Base config: cfg, gamma, kimg
# -----------------------------
if cfg is None:
cfg = 'auto'
assert isinstance(cfg, str)
desc += f'-{cfg}'
cfg_specs = {
'auto': dict(ref_gpus=-1, kimg=25000, mb=-1, mbstd=-1, fmaps=-1, lrate=-1, gamma=-1, ema=-1, ramp=0.05, map=2), # populated dynamically based on 'gpus' and 'res'
'stylegan2': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=10, ema=10, ramp=None, map=8), # uses mixed-precision, unlike original StyleGAN2
'paper256': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=0.5, lrate=0.0025, gamma=1, ema=20, ramp=None, map=8),
'paper512': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=1, lrate=0.0025, gamma=0.5, ema=20, ramp=None, map=8),
'paper1024': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=2, ema=10, ramp=None, map=8),
'cifar': dict(ref_gpus=2, kimg=10000, mb=64, mbstd=32, fmaps=0.5, lrate=0.0025, gamma=0.01, ema=500, ramp=0.05, map=2),
'cifarbaseline': dict(ref_gpus=2, kimg=100000, mb=64, mbstd=32, fmaps=0.5, lrate=0.0025, gamma=0.01, ema=500, ramp=0.05, map=8),
}
assert cfg in cfg_specs
spec = dnnlib.EasyDict(cfg_specs[cfg])
if cfg == 'auto':
desc += f'{gpus:d}'
spec.ref_gpus = gpus
spec.mb = max(min(gpus * min(4096 // res, 32), 64), gpus) # keep gpu memory consumption at bay
spec.mbstd = min(spec.mb // gpus, 4) # other hyperparams behave more predictably if mbstd group size remains fixed
spec.fmaps = 1 if res >= 512 else 0.5
spec.lrate = 0.002 if res >= 1024 else 0.0025
spec.gamma = 0.0002 * (res ** 2) / spec.mb # heuristic formula
spec.ema = spec.mb * 10 / 32
args.total_kimg = spec.kimg
args.minibatch_size = spec.mb
args.minibatch_gpu = spec.mb // spec.ref_gpus
args.D_args.mbstd_group_size = spec.mbstd
args.G_args.fmap_base = args.D_args.fmap_base = int(spec.fmaps * 16384)
args.G_args.fmap_max = args.D_args.fmap_max = 512
args.G_opt_args.learning_rate = args.D_opt_args.learning_rate = spec.lrate
args.loss_args.r1_gamma = spec.gamma
args.G_smoothing_kimg = spec.ema
args.G_smoothing_rampup = spec.ramp
args.G_args.mapping_layers = spec.map
args.G_args.num_fp16_res = args.D_args.num_fp16_res = 4 # enable mixed-precision training
args.G_args.conv_clamp = args.D_args.conv_clamp = 256 # clamp activations to avoid float16 overflow
if cfg == 'cifar':
args.loss_args.pl_weight = 0 # disable path length regularization
args.G_args.style_mixing_prob = None # disable style mixing
args.D_args.architecture = 'orig' # disable residual skip connections
if gamma is not None:
assert isinstance(gamma, float)
if not gamma >= 0:
raise UserError('--gamma must be non-negative')
desc += f'-gamma{gamma:g}'
args.loss_args.r1_gamma = gamma
if kimg is not None:
assert isinstance(kimg, int)
if not kimg >= 1:
raise UserError('--kimg must be at least 1')
desc += f'-kimg{kimg:d}'
args.total_kimg = kimg
# ---------------------------------------------------
# Discriminator augmentation: aug, p, target, augpipe
# ---------------------------------------------------
if aug is None:
aug = 'ada'
else:
assert isinstance(aug, str)
desc += f'-{aug}'
if aug == 'ada':
args.augment_args.tune_heuristic = 'rt'
args.augment_args.tune_target = 0.6
elif aug == 'noaug':
pass
elif aug == 'fixed':
if p is None:
raise UserError(f'--aug={aug} requires specifying --p')
elif aug == 'adarv':
if not validation_set_available:
raise UserError(f'--aug={aug} requires separate validation set; please see "python dataset_tool.py pack -h"')
args.augment_args.tune_heuristic = 'rv'
args.augment_args.tune_target = 0.5
else:
raise UserError(f'--aug={aug} not supported')
if p is not None:
assert isinstance(p, float)
if aug != 'fixed':
raise UserError('--p can only be specified with --aug=fixed')
if not 0 <= p <= 1:
raise UserError('--p must be between 0 and 1')
desc += f'-p{p:g}'
args.augment_args.initial_strength = p
if target is not None:
assert isinstance(target, float)
if aug not in ['ada', 'adarv']:
raise UserError('--target can only be specified with --aug=ada or --aug=adarv')
if not 0 <= target <= 1:
raise UserError('--target must be between 0 and 1')
desc += f'-target{target:g}'
args.augment_args.tune_target = target
assert augpipe is None or isinstance(augpipe, str)
if augpipe is None:
augpipe = 'bgc'
else:
if aug == 'noaug':
raise UserError('--augpipe cannot be specified with --aug=noaug')
desc += f'-{augpipe}'
augpipe_specs = {
'blit': dict(xflip=1, rotate90=1, xint=1),
'geom': dict(scale=1, rotate=1, aniso=1, xfrac=1),
'color': dict(brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'filter': dict(imgfilter=1),
'noise': dict(noise=1),
'cutout': dict(cutout=1),
'bg': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1),
'bgc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
'bgcf': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1),
'bgcfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1),
'bgcfnc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1, cutout=1),
}
assert augpipe in augpipe_specs
if aug != 'noaug':
args.augment_args.apply_func = 'training.augment.augment_pipeline'
args.augment_args.apply_args = augpipe_specs[augpipe]
# ---------------------------------
# Comparison methods: cmethod, dcap
# ---------------------------------
assert cmethod is None or isinstance(cmethod, str)
if cmethod is None:
cmethod = 'nocmethod'
else:
desc += f'-{cmethod}'
if cmethod == 'nocmethod':
pass
elif cmethod == 'bcr':
args.loss_args.func_name = 'training.loss.cmethods'
args.loss_args.bcr_real_weight = 10
args.loss_args.bcr_fake_weight = 10
args.loss_args.bcr_augment = dnnlib.EasyDict(func_name='training.augment.augment_pipeline', xint=1, xint_max=1/32)
elif cmethod == 'zcr':
args.loss_args.func_name = 'training.loss.cmethods'
args.loss_args.zcr_gen_weight = 0.02
args.loss_args.zcr_dis_weight = 0.2
args.G_args.num_fp16_res = args.D_args.num_fp16_res = 0 # disable mixed-precision training
args.G_args.conv_clamp = args.D_args.conv_clamp = None
elif cmethod == 'pagan':
if aug != 'noaug':
raise UserError(f'--cmethod={cmethod} is not compatible with discriminator augmentation; please specify --aug=noaug')
args.D_args.use_pagan = True
args.augment_args.tune_heuristic = 'rt' # enable ada heuristic
args.augment_args.pop('apply_func', None) # disable discriminator augmentation
args.augment_args.pop('apply_args', None)
args.augment_args.tune_target = 0.95
elif cmethod == 'wgangp':
if aug != 'noaug':
raise UserError(f'--cmethod={cmethod} is not compatible with discriminator augmentation; please specify --aug=noaug')
if gamma is not None:
raise UserError(f'--cmethod={cmethod} is not compatible with --gamma')
args.loss_args = dnnlib.EasyDict(func_name='training.loss.wgangp')
args.G_opt_args.learning_rate = args.D_opt_args.learning_rate = 0.001
args.G_args.num_fp16_res = args.D_args.num_fp16_res = 0 # disable mixed-precision training
args.G_args.conv_clamp = args.D_args.conv_clamp = None
args.lazy_regularization = False
elif cmethod == 'auxrot':
if args.train_dataset_args.max_label_size > 0:
raise UserError(f'--cmethod={cmethod} is not compatible with label conditioning; please specify a dataset without labels')
args.loss_args.func_name = 'training.loss.cmethods'
args.loss_args.auxrot_alpha = 10
args.loss_args.auxrot_beta = 5
args.D_args.score_max = 5 # prepare D to output 5 scalars per image instead of just 1
elif cmethod == 'spectralnorm':
args.D_args.use_spectral_norm = True
elif cmethod == 'shallowmap':
if args.G_args.mapping_layers == 2:
raise UserError(f'--cmethod={cmethod} is a no-op for --cfg={cfg}')
args.G_args.mapping_layers = 2
elif cmethod == 'adropout':
if aug != 'noaug':
raise UserError(f'--cmethod={cmethod} is not compatible with discriminator augmentation; please specify --aug=noaug')
args.D_args.adaptive_dropout = 1
args.augment_args.tune_heuristic = 'rt' # enable ada heuristic
args.augment_args.pop('apply_func', None) # disable discriminator augmentation
args.augment_args.pop('apply_args', None)
args.augment_args.tune_target = 0.6
else:
raise UserError(f'--cmethod={cmethod} not supported')
if dcap is not None:
assert isinstance(dcap, float)
if not dcap > 0:
raise UserError('--dcap must be positive')
desc += f'-dcap{dcap:g}'
args.D_args.fmap_base = max(int(args.D_args.fmap_base * dcap), 1)
args.D_args.fmap_max = max(int(args.D_args.fmap_max * dcap), 1)
# ----------------------------------
# Transfer learning: resume, freezed
# ----------------------------------
resume_specs = {
'ffhq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/transfer-learning-source-nets/ffhq-res256-mirror-paper256-noaug.pkl',
'ffhq512': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/transfer-learning-source-nets/ffhq-res512-mirror-stylegan2-noaug.pkl',
'ffhq1024': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/transfer-learning-source-nets/ffhq-res1024-mirror-stylegan2-noaug.pkl',
'celebahq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/transfer-learning-source-nets/celebahq-res256-mirror-paper256-kimg100000-ada-target0.5.pkl',
'lsundog256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/transfer-learning-source-nets/lsundog-res256-paper256-kimg100000-noaug.pkl',
}
assert resume is None or isinstance(resume, str)
if resume is None:
resume = 'noresume'
elif resume == 'noresume':
desc += '-noresume'
elif resume in resume_specs:
desc += f'-resume{resume}'
args.resume_pkl = resume_specs[resume] # predefined url
else:
desc += '-resumecustom'
args.resume_pkl = resume # custom path or url
if resume != 'noresume':
args.augment_args.tune_kimg = 100 # make ADA react faster at the beginning
args.G_smoothing_rampup = None # disable EMA rampup
if freezed is not None:
assert isinstance(freezed, int)
if not freezed >= 0:
raise UserError('--freezed must be non-negative')
desc += f'-freezed{freezed:d}'
args.D_args.freeze_layers = freezed
return desc, args
#----------------------------------------------------------------------------
def run_training(outdir, seed, dry_run, **hyperparam_options):
# Setup training options.
tflib.init_tf({'rnd.np_random_seed': seed})
run_desc, training_options = setup_training_options(**hyperparam_options)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
training_options.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')
assert not os.path.exists(training_options.run_dir)
# Print options.
print()
print('Training options:')
print(json.dumps(training_options, indent=2))
print()
print(f'Output directory: {training_options.run_dir}')
print(f'Training data: {training_options.train_dataset_args.path}')
print(f'Training length: {training_options.total_kimg} kimg')
print(f'Resolution: {training_options.train_dataset_args.resolution}')
print(f'Number of GPUs: {training_options.num_gpus}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Kick off training.
print('Creating output directory...')
os.makedirs(training_options.run_dir)
with open(os.path.join(training_options.run_dir, 'training_options.json'), 'wt') as f:
json.dump(training_options, f, indent=2)
with dnnlib.util.Logger(os.path.join(training_options.run_dir, 'log.txt')):
training_loop.training_loop(**training_options)
#----------------------------------------------------------------------------
def _str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
if v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
raise argparse.ArgumentTypeError('Boolean value expected.')
def _parse_comma_sep(s):
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
#----------------------------------------------------------------------------
_cmdline_help_epilog = '''examples:
# Train custom dataset using 1 GPU.
python %(prog)s --outdir=~/training-runs --gpus=1 --data=~/datasets/custom
# Train class-conditional CIFAR-10 using 2 GPUs.
python %(prog)s --outdir=~/training-runs --gpus=2 --data=~/datasets/cifar10c \\
--cfg=cifar
# Transfer learn MetFaces from FFHQ using 4 GPUs.
python %(prog)s --outdir=~/training-runs --gpus=4 --data=~/datasets/metfaces \\
--cfg=paper1024 --mirror=1 --resume=ffhq1024 --snap=10
# Reproduce original StyleGAN2 config F.
python %(prog)s --outdir=~/training-runs --gpus=8 --data=~/datasets/ffhq \\
--cfg=stylegan2 --res=1024 --mirror=1 --aug=noaug
available base configs (--cfg):
auto Automatically select reasonable defaults based on resolution
and GPU count. Good starting point for new datasets.
stylegan2 Reproduce results for StyleGAN2 config F at 1024x1024.
paper256 Reproduce results for FFHQ and LSUN Cat at 256x256.
paper512 Reproduce results for BreCaHAD and AFHQ at 512x512.
paper1024 Reproduce results for MetFaces at 1024x1024.
cifar Reproduce results for CIFAR-10 (tuned configuration).
cifarbaseline Reproduce results for CIFAR-10 (baseline configuration).
transfer learning source networks (--resume):
ffhq256 FFHQ trained at 256x256 resolution.
ffhq512 FFHQ trained at 512x512 resolution.
ffhq1024 FFHQ trained at 1024x1024 resolution.
celebahq256 CelebA-HQ trained at 256x256 resolution.
lsundog256 LSUN Dog trained at 256x256 resolution.
<path or URL> Custom network pickle.
'''
#----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='Train a GAN using the techniques described in the paper\n"Training Generative Adversarial Networks with Limited Data".',
epilog=_cmdline_help_epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
group = parser.add_argument_group('general options')
group.add_argument('--outdir', help='Where to save the results (required)', required=True, metavar='DIR')
group.add_argument('--gpus', help='Number of GPUs to use (default: 1 gpu)', type=int, metavar='INT')
group.add_argument('--snap', help='Snapshot interval (default: 50 ticks)', type=int, metavar='INT')
group.add_argument('--seed', help='Random seed (default: %(default)s)', type=int, default=1000, metavar='INT')
group.add_argument('-n', '--dry-run', help='Print training options and exit', action='store_true', default=False)
group = parser.add_argument_group('training dataset')
group.add_argument('--data', help='Training dataset path (required)', metavar='PATH', required=True)
group.add_argument('--res', help='Dataset resolution (default: highest available)', type=int, metavar='INT')
group.add_argument('--mirror', help='Augment dataset with x-flips (default: false)', type=_str_to_bool, metavar='BOOL')
group = parser.add_argument_group('metrics')
group.add_argument('--metrics', help='Comma-separated list or "none" (default: fid50k_full)', type=_parse_comma_sep, metavar='LIST')
group.add_argument('--metricdata', help='Dataset to evaluate metrics against (optional)', metavar='PATH')
group = parser.add_argument_group('base config')
group.add_argument('--cfg', help='Base config (default: auto)', choices=['auto', 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar', 'cifarbaseline'])
group.add_argument('--gamma', help='Override R1 gamma', type=float, metavar='FLOAT')
group.add_argument('--kimg', help='Override training duration', type=int, metavar='INT')
group = parser.add_argument_group('discriminator augmentation')
group.add_argument('--aug', help='Augmentation mode (default: ada)', choices=['noaug', 'ada', 'fixed', 'adarv'])
group.add_argument('--p', help='Specify augmentation probability for --aug=fixed', type=float, metavar='FLOAT')
group.add_argument('--target', help='Override ADA target for --aug=ada and --aug=adarv', type=float)
group.add_argument('--augpipe', help='Augmentation pipeline (default: bgc)', choices=['blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc', 'bgcf', 'bgcfn', 'bgcfnc'])
group = parser.add_argument_group('comparison methods')
group.add_argument('--cmethod', help='Comparison method (default: nocmethod)', choices=['nocmethod', 'bcr', 'zcr', 'pagan', 'wgangp', 'auxrot', 'spectralnorm', 'shallowmap', 'adropout'])
group.add_argument('--dcap', help='Multiplier for discriminator capacity', type=float, metavar='FLOAT')
group = parser.add_argument_group('transfer learning')
group.add_argument('--resume', help='Resume from network pickle (default: noresume)')
group.add_argument('--freezed', help='Freeze-D (default: 0 discriminator layers)', type=int, metavar='INT')
args = parser.parse_args()
try:
run_training(**vars(args))
except UserError as err:
print(f'Error: {err}')
exit(1)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| mit | -1,727,428,007,259,815,000 | 45.455674 | 190 | 0.615778 | false |
catseye/T-Rext | src/t_rext/processors.py | 1 | 7130 | # encoding: UTF-8
import re
class Processor(object):
"""An abstract base class that defines the protocol for Processor objects.
"""
def __init__(self, iterable):
"""Given an iterable of objects, become an iterable of other objects.
The two sets of objects need not be the same type.
Note that a file-like object is an iterable of lines.
"""
self._iterable = iterable
self.errors = []
@property
def iterable(self):
for thing in self._iterable:
self.check_input_value(thing)
yield thing
def check_input_value(self, value):
pass
def has_failed(self, original, result):
"""Given two iterables, representing the input and the output
of this Processor, return a boolean indicating whether we think
this Processor has failed or not.
"""
return False
def __iter__(self):
raise NotImplementedError
def __str__(self):
return self.__class__.__name__
class LineProcessor(Processor):
def check_input_value(self, value):
assert isinstance(value, unicode)
class TrailingWhitespaceProcessor(LineProcessor):
def __iter__(self):
for line in self.iterable:
yield line.rstrip()
class SentinelProcessor(LineProcessor):
"""Yields only those lines of the input between the start
sentinel (exclusive) and the end sentinel (exclusive.)
The start sentinel is actually "super-exclusive" in that neither it,
nor any non-blank lines immediately following it, are included in
the output.
Note that cleaned lines are stripped of trailing whitespace.
"""
def __iter__(self):
self.state = 'pre'
for line in self.iterable:
line = line.rstrip()
if self.state == 'pre':
match = re.match(self.START_RE, line.upper())
if match:
self.state = 'consuming-start'
elif self.state == 'consuming-start':
if not line:
self.state = 'mid'
elif self.state == 'mid':
match = re.match(self.END_RE, line.upper())
if match:
self.state = 'post'
else:
yield line
else:
assert self.state == 'post'
pass
class ComposedProcessor(LineProcessor):
"""A Processor which applies multiple Processors to an input in
sequence. If any Processor fails, it returns the result of
processing only up to the point of the failure.
"""
def __init__(self, lines, classes, name=''):
LineProcessor.__init__(self, lines)
self.classes = classes
self.name = name
def __iter__(self):
lines = list(self.iterable)
for cls in self.classes:
filter_ = cls(lines)
new_lines = list(filter_)
if filter_.has_failed(lines, new_lines):
self.errors.append("%s failed to clean '%s'" % (filter_, self.name))
break
lines = new_lines
for line in lines:
yield line
class RewritingProcessor(LineProcessor):
SUBSTITUTIONS = ()
def rewrite_line(self, subject, replacement, line):
count = 1
while count > 0:
(line, count) = re.subn(subject, replacement, line)
return line
def __iter__(self):
for line in self.iterable:
line = line.rstrip()
for (subject, replacement) in self.SUBSTITUTIONS:
line = self.rewrite_line(subject, replacement, line)
yield line
class TidyPunctuationLineFilter(RewritingProcessor):
SUBSTITUTIONS = (
(ur'- ', u'-'),
(ur' ,', u','),
(ur' \.', u'.'),
(ur' \;', u';'),
(ur' \:', u':'),
(ur' \?', u'?'),
(ur' \!', u'!'),
(ur',,', u','),
(ur',\.', u'.'),
(ur'“ ', u'“'),
(ur' ”', u'”'),
)
class FixProductiveEndingsLineFilter(RewritingProcessor):
SUBSTITUTIONS = (
(r'olfs ', 'olves '),
(r'xs', 'xes'),
(r'ullly', 'ully'),
(r'yly', 'ily'),
(r'icly', 'ically'),
(r'lely', 'ly'),
(r' coily', ' coyly'),
)
class FixIndefiniteArticlesLineFilter(RewritingProcessor):
SUBSTITUTIONS = (
(r' An unique', ' A unique'),
(r' an unique', ' a unique'),
(r' An unicorn', ' A unicorn'),
(r' an unicorn', ' a unicorn'),
)
class QuoteOrienterLineFilter(LineProcessor):
"""Note that this expects to work on a single paragraph
only. (If you give it more than one paragraph, it will
happily match quotes between adjacent paragraphs, which
is probably not what you want.)
"""
def __iter__(self):
self.state = 0
for line in self.iterable:
new_line = u''
for character in line:
character = unicode(character)
if character == u'"':
if self.state == 0:
character = u'“'
self.state = 1
else:
assert self.state == 1
character = u'”'
self.state = 0
new_line += character
yield new_line
class Regrouper(Processor):
"""An abstract class that defines the protocol for Regrouper objects."""
pass
class LinesToParagraphsRegrouper(Regrouper):
"""A Regrouper that groups lines into paragraphs and collections of
intervening blank lines.
"""
def __iter__(self):
state = 'begin'
group = []
for line in self.iterable:
line = line.rstrip()
if line:
if state == 'begin':
state = 'para'
group.append(line)
elif state == 'para':
group.append(line)
else:
assert state == 'blank'
yield group
state = 'para'
group = []
group.append(line)
else:
if state == 'begin':
state = 'blank'
group.append(line)
elif state == 'blank':
group.append(line)
else:
assert state == 'para'
yield group
state = 'blank'
group = []
group.append(line)
if group:
yield group
class ParagraphsToLinesRegrouper(Regrouper):
"""A Regrouper that ungroups paragraphs (and collections of blank lines)
into individual lines.
"""
def check_input_value(self, value):
assert isinstance(value, list)
for element in value:
assert isinstance(element, unicode)
def __iter__(self):
for para in self.iterable:
for line in para:
yield line
| unlicense | 2,738,914,160,866,594,000 | 27.586345 | 84 | 0.518123 | false |
ezotrank/wheezy.template | src/wheezy/template/ext/tests/test_determined.py | 1 | 1192 |
""" Unit tests for ``wheezy.templates.ext.determined``.
"""
import unittest
class DeterminedTestCase(unittest.TestCase):
""" Test the ``DeterminedExtension``.
"""
def setUp(self):
from wheezy.template.ext.determined import DeterminedExtension
self.preprocess = DeterminedExtension(
known_calls=['path_for', '_']).preprocessors[0]
def test_determined(self):
""" Substitute determinded expressions for known calls to
preprocessor calls.
"""
assert """\
#ctx['_']('Name:')
#ctx['path_for']('default')
#ctx['path_for']('static', path='/static/css/site.css')
""" == self.preprocess("""\
@_('Name:')
@path_for('default')
@path_for('static', path='/static/css/site.css')
""")
def test_undetermined(self):
""" Calls that are not determined left unchanged.
"""
assert """\
@path_for('item', id=id)
@model.username.label(_('Username: '))
""" == self.preprocess("""\
@path_for('item', id=id)
@model.username.label(_('Username: '))
""")
| mit | -1,081,235,211,300,768,900 | 28.8 | 70 | 0.528523 | false |
Azure/azure-sdk-for-python | sdk/databox/azure-mgmt-databox/azure/mgmt/databox/v2019_09_01/operations/_service_operations.py | 1 | 23846 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceOperations(object):
"""ServiceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databox.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_available_skus(
self,
location, # type: str
available_sku_request, # type: "_models.AvailableSkuRequest"
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailableSkusResult"]
"""This method provides the list of available skus for the given subscription and location.
:param location: The location of the resource.
:type location: str
:param available_sku_request: Filters for showing the available skus.
:type available_sku_request: ~azure.mgmt.databox.models.AvailableSkuRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableSkusResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databox.models.AvailableSkusResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableSkusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_available_skus.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(available_sku_request, 'AvailableSkuRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(available_sku_request, 'AvailableSkuRequest')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableSkusResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_available_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBox/locations/{location}/availableSkus'} # type: ignore
def list_available_skus_by_resource_group(
self,
resource_group_name, # type: str
location, # type: str
available_sku_request, # type: "_models.AvailableSkuRequest"
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AvailableSkusResult"]
"""This method provides the list of available skus for the given subscription, resource group and
location.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param location: The location of the resource.
:type location: str
:param available_sku_request: Filters for showing the available skus.
:type available_sku_request: ~azure.mgmt.databox.models.AvailableSkuRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableSkusResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databox.models.AvailableSkusResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableSkusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_available_skus_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(available_sku_request, 'AvailableSkuRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(available_sku_request, 'AvailableSkuRequest')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableSkusResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_available_skus_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/locations/{location}/availableSkus'} # type: ignore
def validate_address(
self,
location, # type: str
validate_address, # type: "_models.ValidateAddress"
**kwargs # type: Any
):
# type: (...) -> "_models.AddressValidationOutput"
"""[DEPRECATED NOTICE: This operation will soon be removed] This method validates the customer
shipping address and provide alternate addresses if any.
:param location: The location of the resource.
:type location: str
:param validate_address: Shipping address of the customer.
:type validate_address: ~azure.mgmt.databox.models.ValidateAddress
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AddressValidationOutput, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.AddressValidationOutput
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AddressValidationOutput"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.validate_address.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(validate_address, 'ValidateAddress')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AddressValidationOutput', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_address.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBox/locations/{location}/validateAddress'} # type: ignore
def validate_inputs_by_resource_group(
self,
resource_group_name, # type: str
location, # type: str
validation_request, # type: "_models.ValidationRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ValidationResponse"
"""This method does all necessary pre-job creation validation under resource group.
:param resource_group_name: The Resource Group Name.
:type resource_group_name: str
:param location: The location of the resource.
:type location: str
:param validation_request: Inputs of the customer.
:type validation_request: ~azure.mgmt.databox.models.ValidationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ValidationResponse, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.ValidationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ValidationResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.validate_inputs_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(validation_request, 'ValidationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ValidationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_inputs_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBox/locations/{location}/validateInputs'} # type: ignore
def validate_inputs(
self,
location, # type: str
validation_request, # type: "_models.ValidationRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ValidationResponse"
"""This method does all necessary pre-job creation validation under subscription.
:param location: The location of the resource.
:type location: str
:param validation_request: Inputs of the customer.
:type validation_request: ~azure.mgmt.databox.models.ValidationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ValidationResponse, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.ValidationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ValidationResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.validate_inputs.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(validation_request, 'ValidationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ValidationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_inputs.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBox/locations/{location}/validateInputs'} # type: ignore
def region_configuration(
self,
location, # type: str
region_configuration_request, # type: "_models.RegionConfigurationRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.RegionConfigurationResponse"
"""This API provides configuration details specific to given region/location.
:param location: The location of the resource.
:type location: str
:param region_configuration_request: Request body to get the configuration for the region.
:type region_configuration_request: ~azure.mgmt.databox.models.RegionConfigurationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegionConfigurationResponse, or the result of cls(response)
:rtype: ~azure.mgmt.databox.models.RegionConfigurationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegionConfigurationResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.region_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(region_configuration_request, 'RegionConfigurationRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegionConfigurationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
region_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataBox/locations/{location}/regionConfiguration'} # type: ignore
| mit | 8,874,287,759,854,260,000 | 49.521186 | 209 | 0.644469 | false |
exowanderer/SpitzerDeepLearningNetwork | Python Scripts/tpot_genetic_spitzer_calibration.py | 1 | 1249 | from tpot import TPOTRegressor
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
from sklearn.metrics import r2_score
from time import time
n_skip = 100 # testing on smaller data set
features = pd.read_csv('pmap_raw_16features.csv').iloc[::n_skip]
labels = pd.read_csv('pmap_raw_labels_and_errors.csv')['Flux'].iloc[::n_skip]
#Split training, testing, and validation data
idx = np.arange(labels.values.size)
training_indices, validation_indices = train_test_split(idx, test_size=0.20)
#Let Genetic Programming find best ML model and hyperparameters
tpot = TPOTRegressor(generations=10, verbosity=2, n_jobs=-1)
start = time()
tpot.fit(features.iloc[training_indices].values, labels.iloc[training_indices].values)
print('Full TPOT regressor operation took {:.1f} minutes'.format((time() - start)/60))
#Score the accuracy
print('Best pipeline test accuracy: {:.3f}'.format(
tpot.score(features.iloc[validation_indices].values, labels.iloc[validation_indices].values)))
#Export the generated code
tpot.export('spitzer_calibration_tpot_best_pipeline.py')
| mit | 2,787,003,440,955,471,000 | 31.868421 | 96 | 0.760608 | false |
Parsl/parsl | parsl/tests/configs/ec2_spot.py | 1 | 1226 | from parsl.providers import AWSProvider
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
# If you are a developer running tests, make sure to update parsl/tests/configs/user_opts.py
# If you are a user copying-and-pasting this as an example, make sure to either
# 1) create a local `user_opts.py`, or
# 2) delete the user_opts import below and replace all appearances of `user_opts` with the literal value
# (i.e., user_opts['swan']['username'] -> 'your_username')
from parsl.tests.configs.user_opts import user_opts
config = Config(
executors=[
HighThroughputExecutor(
label='ec2_single_node',
address=user_opts['public_ip'],
provider=AWSProvider(
user_opts['ec2']['image_id'],
region=user_opts['ec2']['region'],
key_name=user_opts['ec2']['key_name'],
spot_max_bid='1.0',
profile="default",
state_file='awsproviderstate.json',
nodes_per_block=1,
init_blocks=1,
max_blocks=1,
min_blocks=0,
walltime='01:00:00',
),
)
]
)
| apache-2.0 | 6,097,005,368,305,727,000 | 36.151515 | 110 | 0.578303 | false |
LighthouseHPC/lighthouse | sandbox/petsc/timing/fix.py | 1 | 1498 | #!/usr/bin/env python
import glob,os
def writefile(fd, newlines, stat):
fd.seek(0)
fd.write(''.join(newlines))
fd.truncate()
fd.close()
os.utime(f, (stat.st_atime, stat.st_mtime))
for f in glob.glob("*.log"):
stat = os.stat(f)
fd = open(f,'r+')
newlines = []
lines = fd.readlines()
#if not lines or lines[0].find('Hash: nohash') < 0:
for l in lines:
if not l.startswith('2015-05'): newlines.append(l)
if l.startswith('-f /gpfs/mira-fs0/projects/PEACEndStation/norris/UFloridaSparseMat/petsc/'):
matrixname = l.strip().split('/')[-1].replace('.petsc','')
hashnum = os.path.basename(f).split('.')[-2]
#print f, hashnum
basename = os.path.basename(f)
matname, solver, suffix = basename.split('.')
if matname != matrixname:
print "Renaming", f, "to", os.path.join(os.path.dirname(f), '.'.join([matrixname, solver, suffix]))
fd.close()
fd2 = open(os.path.join(os.path.dirname(f), '.'.join([matrixname, solver, suffix])),'w')
writefile(fd2, newlines, stat)
fd2.close()
#os.remove(f)
os.system('rm %s' % f)
if not lines or not newlines:
fd.close()
continue
#if newlines[0].find('iccHash: 49598909') < 0:
if newlines[0].find('jacobiHash: nohash') < 0:
fd.close()
continue
newlines.insert(0,'Hash: %s\n' % hashnum)
newlines[1] = newlines[1].strip().replace('jacobiHash: nohash','jacobi') + newlines[2]
del newlines[2]
writefile(fd, newlines, stat)
| mit | -672,911,683,012,588,300 | 30.87234 | 104 | 0.610814 | false |
johnnoone/meuh-python | setup.py | 1 | 1455 | #!/usr/bin/env python
from setuptools import setup
setup(
name='meuh',
version='0.1',
description='Create debian package with rsync, docker and love',
author='Xavier Barbosa',
author_email='[email protected]',
license='MIT',
install_requires=[
'cliff==1.9.0',
'docker-py==0.7.1',
'six==1.9.0',
],
packages=['meuh'],
entry_points={
'console_scripts': [
'meuh = meuh.cli:main',
],
'meuh.commands': [
'build = meuh.commands.build:BuildCommand',
'fetch = meuh.commands.build:FetchCommand',
'publish = meuh.commands.build:PublishCommand',
'destroy-all = meuh.commands.admin:DestroyAllCommand',
'settings = meuh.commands.admin:SettingsCommand',
'distro_init = meuh.commands.distro:InitCommand',
'distro_list = meuh.commands.distro:ListCommand',
'distro_show = meuh.commands.distro:ShowCommand',
'distro_destroy = meuh.commands.distro:DestroyCommand',
'distro_destroy-all = meuh.commands.distro:DestroyAllCommand',
'bot_init = meuh.commands.bot:InitCommand',
'bot_show = meuh.commands.bot:ShowCommand',
'bot_destroy = meuh.commands.bot:DestroyCommand',
'bot_destroy-all = meuh.commands.bot:DestroyAllCommand',
'bot_exec = meuh.commands.bot:ExecCommand',
]
}
)
| mit | 4,291,802,773,832,123,000 | 35.375 | 74 | 0.595189 | false |
shanedevane/python-photo-resolution-comparison | docs/conf.py | 1 | 8738 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# python_photo_resolution_comparison documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import python_photo_resolution_comparison
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Photo Resolution Comparison'
copyright = u"2016, Shane Devane"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = python_photo_resolution_comparison.__version__
# The full version, including alpha/beta/rc tags.
release = python_photo_resolution_comparison.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python_photo_resolution_comparisondoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'python_photo_resolution_comparison.tex',
u'Python Photo Resolution Comparison Documentation',
u'Shane Devane', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python_photo_resolution_comparison',
u'Python Photo Resolution Comparison Documentation',
[u'Shane Devane'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python_photo_resolution_comparison',
u'Python Photo Resolution Comparison Documentation',
u'Shane Devane',
'python_photo_resolution_comparison',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | -3,780,680,017,058,500,000 | 30.774545 | 87 | 0.71149 | false |
theislab/scvelo | scvelo/utils.py | 1 | 1047 | from .preprocessing.utils import show_proportions, cleanup
from .preprocessing.utils import set_initial_size, get_initial_size
from .preprocessing.neighbors import get_connectivities
from .preprocessing.moments import get_moments
from .tools.utils import *
from .tools.rank_velocity_genes import get_mean_var
from .tools.run import convert_to_adata, convert_to_loom
from .tools.optimization import leastsq, get_weight
from .tools.velocity_graph import vals_to_csr
from .tools.score_genes_cell_cycle import get_phase_marker_genes
from .tools.transition_matrix import transition_matrix as get_transition_matrix
from .tools.transition_matrix import get_cell_transitions
from .plotting.utils import is_categorical, clip
from .plotting.utils import interpret_colorkey, rgb_custom_colormap
from .plotting.velocity_embedding_grid import compute_velocity_on_grid
from .plotting.simulation import compute_dynamics
from .read_load import clean_obs_names, merge, gene_info
from .read_load import convert_to_gene_names, convert_to_ensembl, load_biomart
| bsd-3-clause | -5,402,232,029,479,116,000 | 42.625 | 79 | 0.820439 | false |
desihub/desispec | doc/nb/tsnr_refset_etc.py | 1 | 3295 | import os
import json
import numpy as np
from astropy.table import Table, join
from desispec.io import findfile
from desiutil.log import get_logger
from pkg_resources import resource_filename
log=get_logger()
fname='/project/projectdirs/desi/spectro/redux/daily/tsnr-exposures.fits'
opath=resource_filename('desispec','data/tsnr/tsnr_refset_etc.csv')
log.info('Writing to {}.'.format(opath))
daily_tsnrs=Table.read(fname, 'TSNR2_EXPID')
daily_tsnrs.pprint()
tokeep = []
for night, expid in zip(daily_tsnrs['NIGHT'], daily_tsnrs['EXPID']):
etcpath=findfile('etc', night=night, expid=expid)
etcdata=None
if os.path.exists(etcpath):
with open(etcpath) as f:
etcdata = json.load(f)
else:
continue
etc_fiberfracs = {}
try:
for tracer in ['psf', 'elg', 'bgs']:
etc_fiberfracs[tracer]=etcdata['expinfo']['ffrac_{}'.format(tracer)]
log.info('Found etc ffracs for {} on {} ({})'.format(expid, night, etc_fiberfracs))
tokeep.append([expid, etc_fiberfracs['psf'], etc_fiberfracs['elg'], etc_fiberfracs['bgs']])
except:
pass
tokeep = np.array(tokeep)
tokeep = Table(tokeep, names=['EXPID', 'ETCFFRAC_PSF', 'ETCFFRAC_ELG', 'ETCFFRAC_BGS'])
tokeep['EXPID'] = tokeep['EXPID'].data.astype(np.int)
# tokeep.pprint()
tokeep = join(daily_tsnrs[np.isin(daily_tsnrs['EXPID'], tokeep['EXPID'])], tokeep, join_type='left', keys='EXPID')
for x in ['PSF', 'ELG', 'BGS']:
print(np.sort(tokeep['ETCFFRAC_{}'.format(x)].data))
# print(tokeep.dtype.names)
fnight = tokeep['NIGHT'].min()
lnight = tokeep['NIGHT'].max()
tokeep.meta['comments'] = ['---- TSNR reference catalog 20210528 ----',\
' MJW',\
' EFFTIME_SPEC normalization based on SV1 (commit a056732 on Mar 19 2021, e.g. data/tsnr/tsnr-efftime.yaml); This cat. propagates this normalization to sv3 & later, where etc ffracs and updated tsnrs are available.',\
' {}'.format(fname),\
' NUMEXP: {}'.format(len(tokeep)),\
' NIGHTS: {} to {}'.format(fnight, lnight)]
tokeep.write(opath, format='csv', overwrite=True, comment='#')
tokeepcsv = Table.read(opath, comment='#')
print(tokeepcsv.meta)
tokeepcsv.pprint()
## Check.
daily_tsnrs=Table.read(fname, 'TSNR2_EXPID')
for i, (night, expid, ffrac_psf, ffrac_elg, ffrac_bgs, efftime_spec) in enumerate(zip(tokeepcsv['NIGHT'], tokeepcsv['EXPID'], tokeepcsv['ETCFFRAC_PSF'], tokeepcsv['ETCFFRAC_ELG'], tokeepcsv['ETCFFRAC_BGS'], tokeepcsv['EFFTIME_SPEC'])):
etcpath=findfile('etc', night=night, expid=expid)
etcdata=None
with open(etcpath) as f:
etcdata = json.load(f)
etc_fiberfracs = {}
for tracer in ['psf', 'elg', 'bgs']:
etc_fiberfracs[tracer]=etcdata['expinfo']['ffrac_{}'.format(tracer)]
assert ffrac_psf == etc_fiberfracs['psf']
assert ffrac_elg == etc_fiberfracs['elg']
assert ffrac_bgs == etc_fiberfracs['bgs']
assert efftime_spec == daily_tsnrs[daily_tsnrs['EXPID'] == expid]['EFFTIME_SPEC']
print('Row {}: expid {} on night {} passes etc check'.format(i, expid, night))
print('\n\nDone.\n\n')
| bsd-3-clause | 5,179,725,702,002,123,000 | 31.95 | 245 | 0.618816 | false |
docwhite/appleseed | sandbox/shaders/src/compile_shaders.py | 1 | 2324 |
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2015-2016 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
import os
import sys
if len(sys.argv) != 2:
print("Usage: {0} [path-to-oslc]".format(sys.argv[0]))
sys.exit(0)
oslc_cmd = sys.argv[1]
include_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "include")
for dirpath, dirnames, filenames in os.walk("."):
for filename in filenames:
if filename.endswith(".osl"):
src_filepath = os.path.join(dirpath, filename)
dest_dir = os.path.join("..", dirpath)
dst_filename = filename.replace(".osl", ".oso")
dst_filepath = os.path.join(dest_dir, dst_filename)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
retcode = os.system("{0} -v -I{1} -o {2} {3}".format(oslc_cmd, include_dir, dst_filepath, src_filepath))
if retcode != 0:
print("Compilation of {0} failed with error code {1}. Stopping.".format(src_filepath, retcode))
sys.exit(retcode)
| mit | 2,278,735,905,753,137,200 | 39.068966 | 116 | 0.694923 | false |
ctrevino/DIGITS | digits/test_views.py | 1 | 4491 | # Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import time
import json
import urllib
from gevent import monkey; monkey.patch_all()
from urlparse import urlparse
import webapp
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(object):
"""
Abstract class with a Flask context and a Scheduler
Provides some other useful functions to children who inherit this
"""
@classmethod
def setUpClass(cls):
# Start up the server
assert webapp.scheduler.start(), "scheduler wouldn't start"
webapp.app.config['WTF_CSRF_ENABLED'] = False
webapp.app.config['TESTING'] = True
cls.app = webapp.app.test_client()
cls.created_datasets = []
cls.created_models = []
@classmethod
def tearDownClass(cls):
# Remove all created jobs
for job_id in cls.created_models:
cls.delete_model(job_id)
for job_id in cls.created_datasets:
cls.delete_dataset(job_id)
@classmethod
def job_id_from_response(cls, rv):
"""
Extract the job_id from an HTTP response
"""
job_url = rv.headers['Location']
parsed_url = urlparse(job_url)
return parsed_url.path.split('/')[-1]
@classmethod
def job_exists(cls, job_id, job_type='jobs'):
"""
Test whether a job exists
"""
url = '/%s/%s' % (job_type, job_id)
rv = cls.app.get(url, follow_redirects=True)
assert rv.status_code in [200, 404], 'got status code "%s" from "%s"' % (rv.status_code, url)
return rv.status_code == 200
@classmethod
def job_status(cls, job_id, job_type='jobs'):
"""
Get the status of a job
"""
url = '/%s/%s/status' % (job_type, job_id)
rv = cls.app.get(url)
assert rv.status_code == 200, 'Cannot get status of job %s. "%s" returned %s' % (job_id, url, rv.status_code)
status = json.loads(rv.data)
return status['status']
@classmethod
def abort_job(cls, job_id, job_type='jobs'):
"""
Abort a job
Returns the HTTP status code
"""
rv = cls.app.post('/%s/%s/abort' % (job_type, job_id))
return rv.status_code
@classmethod
def job_wait_completion(cls, job_id, timeout=10, polling_period=0.5, job_type='jobs'):
"""
Poll the job status until it completes
Returns the final status
Arguments:
job_id -- the job to wait for
Keyword arguments:
timeout -- maximum wait time (seconds)
polling_period -- how often to poll (seconds)
job_type -- [datasets|models]
"""
start = time.time()
while True:
status = cls.job_status(job_id, job_type=job_type)
if status in ['Done', 'Abort', 'Error']:
return status
assert (time.time() - start) < timeout, 'Job took more than %s seconds' % timeout
time.sleep(polling_period)
@classmethod
def delete_job(cls, job_id, job_type='jobs'):
"""
Delete a job
Returns the HTTP status code
"""
rv = cls.app.delete('/%s/%s' % (job_type, job_id))
return rv.status_code
################################################################################
# Test classes
################################################################################
class TestViews(BaseViewsTest):
def test_homepage(self):
rv = self.app.get('/')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
for text in ['Home', 'Datasets', 'Models']:
assert text in rv.data, 'unexpected page format'
def test_invalid_page(self):
rv = self.app.get('/foo')
assert rv.status_code == 404, 'should return 404'
def test_autocomplete(self):
for absolute_path in (True, False):
yield self.check_autocomplete, absolute_path
def check_autocomplete(self, absolute_path):
path = '/' if absolute_path else './'
url = '/autocomplete/path?query=%s' % (urllib.quote(path,safe=''))
rv = self.app.get(url)
assert rv.status_code == 200
status = json.loads(rv.data)
assert 'suggestions' in status
| bsd-3-clause | -9,086,761,276,290,964,000 | 32.266667 | 117 | 0.540859 | false |
zapstar/gae-location | main.py | 1 | 5991 | #!/usr/bin/env python
import os
import webapp2
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
#Data store model
class GeoLocation(db.Model):
user = db.UserProperty()
date = db.DateTimeProperty(auto_now_add = True)
#GeoPt object which stores Latitude and Longitude
position = db.GeoPtProperty()
address = db.PostalAddressProperty()
header = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
#Class Variables: To be passed as template values
admin_str = ''
geo_str = ''
loginout_str = ''
user_str = ''
#GET method
def get(self):
#Check if the user has logged in (Google Account)
user = users.get_current_user()
if not user:
#Create appropriate login/logout string for the template
login_url = users.create_login_url(self.request.url)
#self.loginout_str = '<a href="' + login_url + '">Login</a>'
self.loginout_str = ''
#Ask the user to login if he wants personalized results.
self.geo_str = '<center><p>Please <a href="' + login_url + '">login here</a> with your Google Account to enjoy personalized geo-location based services.</p></center>'
else:
#Create appropriate login/logout string for the template
logout_url = users.create_logout_url(self.request.url)
self.loginout_str = '<a href="' + logout_url + '">Logout</a>'
#If the user is admin generate Admin Area Link string
if users.is_current_user_admin():
self.admin_str = '<a href="/admin/">Admin Area</a> |'
#Welcome string for the user (his e-mail ID for now)
self.user_str = '<p>Hello, ' + user.email() + '</p>'
#Selective JavaScript html to be pasted if the user has logged in.
self.geo_str = """<!-- Geo-Coding JavaScript start -->
<center>
<p id="geoloc"><img height="50px" width="50px" src="static/loading.gif" alt="Loading ..." />
<br>Waiting for your permission/Processing ...</p>
</center>
<script src="static/jquery-min.js" type="text/javascript" charset="utf-8"></script>
<script type="text/javascript" src="https://maps.googleapis.com/maps/api/js?sensor=false"></script>
<script src="http://code.google.com/apis/gears/gears_init.js" type="text/javascript" charset="utf-8"></script>
<script src="static/geo-min.js" type="text/javascript" charset="utf-8"></script>
<script type="text/javascript" src="static/main.js"></script>
<!-- Geo-coding JavaScript End -->"""
#templating and rendering using the above variables
template_values = {
'loginout_str' : self.loginout_str,
'geo_str' : self.geo_str,
'user_str' : self.user_str,
'admin_str' : self.admin_str
}
file_path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
html = template.render(file_path, template_values)
self.response.out.write(html)
#Class MainHandler End
#When a user posts the data to the server this handles the request
class StoreHandler(webapp2.RequestHandler):
def post(self):
gstore = GeoLocation(parent = None)
gstore.user = users.get_current_user()
gstore.position = db.GeoPt(float(self.request.get('lat')), float(self.request.get('long')))
gstore.header = db.Text(str(self.request.headers))
address = self.request.get('address')
gstore.address = db.PostalAddress(address)
gstore.put()
#Getting the values from POST request header and inserting them into the DataStore
#End of StoreHandler class
#Admin Area class: Shows the last 100 peoples' information
#as a table
class AdminHandler(webapp2.RequestHandler):
#Again some class variables to handle template values
loginout_str = ''
admin_str = ''
query_dict = None
#Get method
def get(self):
#See if the user has logged in
user = users.get_current_user()
if user:
#Double check if the user is an administrator
if users.is_current_user_admin():
#Create appropriate login/logout url
logout_url = users.create_logout_url(self.request.url)
self.loginout_str = '<a href="' + logout_url + '">Logout</a>'
#Admin Area Login Link (Not necessary)
if users.is_current_user_admin():
self.admin_str = '<a href="/admin/">Admin Area</a> |'
#Query the datastore for the last 100 entries from the dataModel
#named 'GeoLocation', there are no ancestors for this datastore (no namespaces)
self.query_dict = db.GqlQuery("SELECT * FROM GeoLocation ORDER BY date DESC LIMIT 100")
#the regular templating follows this code
template_values = {
'table' : self.query_dict,
'loginout_str' : self.loginout_str,
'admin_str' : self.admin_str
}
file_path = os.path.join(os.path.dirname(__file__), 'templates/admin.html')
html = template.render(file_path, template_values)
self.response.out.write(html)
else:
self.response.out.write('Your\'e not an administrator!')
else:
self.response.out.write('Please <a href="' + users.create_login_url() + '">Login</a> here')
app = webapp2.WSGIApplication([('/store',StoreHandler),('/', MainHandler),('/admin/.*', AdminHandler)], debug=True) | mit | -1,185,012,889,165,203,500 | 44.052632 | 178 | 0.581372 | false |
prasunkgupta/sca | SCAtool_resize.py | 1 | 5123 | # -*- coding: utf-8 -*-
# Snow cover area assessment tool using Python
#
# Created: Fri Apr 25 2014
# Authors: Antara / Prasun (@pkg_sd)
#
# Published under GPLv3
from PySide import QtCore, QtGui
import gdal
import numpy
import pylab as plt
import glob
class Ui_Dialog(object):
folder = ''
files = []
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(788, 371)
self.lineEdit = QtGui.QLineEdit(Dialog)
self.lineEdit.setGeometry(QtCore.QRect(30, 90, 381, 31))
self.lineEdit.setObjectName("lineEdit")
self.pushButton = QtGui.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(420, 90, 91, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
###keedda####
self.pushButton1 = QtGui.QPushButton(Dialog)
self.pushButton1.setGeometry(QtCore.QRect(220, 190, 91, 31))
font = QtGui.QFont()
font.setPointSize(10)
self.pushButton1.setFont(font)
self.pushButton1.setObjectName("pushButton1")
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(-10, 30, 361, 41))
font = QtGui.QFont()
font.setPointSize(14)
font.setWeight(75)
font.setBold(True)
self.label.setFont(font)
self.label.setLayoutDirection(QtCore.Qt.RightToLeft)
self.label.setIndent(0)
self.label.setObjectName("label")
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(540, 40, 241, 301))
font = QtGui.QFont()
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setAutoFillBackground(False)
self.label_2.setStyleSheet("""background color=rgb(255, 255, 255)""")
self.label_2.setObjectName("label_2")
self.line = QtGui.QFrame(Dialog)
self.line.setGeometry(QtCore.QRect(510, 90, 20, 201))
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtGui.QFrame(Dialog)
self.line_2.setGeometry(QtCore.QRect(30, 70, 731, 16))
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.frame = QtGui.QFrame(Dialog)
self.frame.setGeometry(QtCore.QRect(9, 19, 771, 331))
self.frame.setStyleSheet("bgcolor=rgb(255, 255, 255)")
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName("frame")
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.pushButton,QtCore.SIGNAL("clicked()"),self.browse)
QtCore.QObject.connect(self.pushButton1,QtCore.SIGNAL("clicked()"),self.run)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("Dialog", "Browse", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton1.setText(QtGui.QApplication.translate("Dialog", "RUN", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", " Snow Cover Assesment Tool", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Dialog", "Calculates the snow cover area\n"
"using MODIS Terra datasets\n"
"and plots a graph of the change.\n"
"Requires an input folder of all\n"
"the MODIS .HDF files to be\n"
"analysed, arranged sequentially.", None, QtGui.QApplication.UnicodeUTF8))
@QtCore.Slot()
def browse(self):
print 'selecting folder containing modis data'
self.folder=str(QtGui.QFileDialog.getExistingDirectory())
self.folder.replace('\\','/')
self.lineEdit.setText(self.folder)
@QtCore.Slot()
def run(self):
print 'running the code'
self.files=glob.glob(self.folder+'/*.tif')
print 'Number of files found in %s = %d' %(self.folder, len(self.files))
areas=[]
years=[]
for eachfile in self.files:
print eachfile,' - ',
f=gdal.Open(eachfile)
data=f.ReadAsArray()
threshold_data=numpy.where(data==200)
areas.append(len(threshold_data[0])*(0.25))
years.append(eachfile[-36:-32])
plt.plot(years, areas)
plt.ylabel('Snow Cover Area in sq. km')
plt.xlabel('Years')
plt.show()
if __name__ == "__main__":
import sys
try:
app = QtGui.QApplication(sys.argv)
except RuntimeError:
app = QtCore.QCoreApplication.instance()
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
| gpl-3.0 | -4,829,754,988,613,635,000 | 38.10687 | 137 | 0.635565 | false |
polyanskiy/refractiveindex.info-scripts | scripts/Adachi 1991 - ZnSe.py | 1 | 3247 | # -*- coding: utf-8 -*-
# Author: Mikhail Polyanskiy
# Last modified: 2017-04-09
# Original data: Adachi and Taguchi 1991, https://doi.org/10.1103/PhysRevB.43.9569
import numpy as np
import matplotlib.pyplot as plt
π = np.pi
# parameters from table II
E0 = 2.69 #eV
Δ0 = 3.10-E0 #eV
G0 = 0.017 #eV
A = 23.4 #eV**1.5
A0x = 0.03 #eV
Γ0 = 0.030 #eV
E1 = 4.75 #eV
Δ1 = 5.05-E1 #eV
B1x = 2.31 #eV
B2x = 1.16 #eV
Γ1 = 0.37 #eV
E2 = 6.7 #eV
C = 1.6
γ = 0.2
ε1 = 1.2
def Epsilon_A(ħω):
χ0 = (ħω+1j*Γ0) / E0
χs0 = (ħω+1j*Γ0) / (E0+Δ0)
fχ0 = χ0**-2 * ( 2-(1+χ0)**0.5-(1-χ0)**0.5 )
fχs0 = χs0**-2 * ( 2-(1+χs0)**0.5-(1-χs0)**0.5 )
return A*E0**-1.5 * (fχ0+0.5*(E0/(E0+Δ0))**1.5*fχs0)
def Epsilon_Ax(ħω):
y=0
for n in range(1,1000):
y += A0x/n**3 * ( 1/(E0-G0/n**2-ħω-1j*Γ0) + 0.5/(E0+Δ0-G0/n**2-ħω-1j*Γ0) )
return y
def Epsilon_Bx(ħω):
y=0
for n in range(1,1000):
y += 1/(2*n-1)**3 * ( B1x/(E1-ħω-1j*Γ1) + B2x/(E1+Δ1-ħω-1j*Γ1) )
return y
def Epsilon_C(ħω):
χ2 = ħω/E2
return C/((1-χ2**2)-1j*χ2*γ)
ev_min=1.5
ev_max=5.3
npoints=500
eV = np.linspace(ev_min, ev_max, npoints)
μm = 4.13566733e-1*2.99792458/eV
εA = Epsilon_A(eV)
εAx = Epsilon_Ax(eV)
εBx = Epsilon_Bx(eV)
εC = Epsilon_C(eV)
ε = εA + εAx + εBx + εC + ε1
n = (ε**.5).real
k = (ε**.5).imag
α = 4*π*k/μm*1e4 #1/cm
#============================ DATA OUTPUT =================================
file = open('out.txt', 'w')
for i in range(npoints-1, -1, -1):
file.write('\n {:.4e} {:.4e} {:.4e}'.format(μm[i],n[i],k[i]))
file.close()
#=============================== PLOT =====================================
plt.rc('font', family='Arial', size='14')
#plot ε vs eV
plt.figure(1)
plt.plot(eV, ε.real, label="ε1")
plt.plot(eV, ε.imag, label="ε2")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot intermediate data (for debugging)
plt.figure(2)
plt.plot(eV, εA.real, label="Re(εA)")
plt.plot(eV, εAx.real, label="Re(εAx)")
plt.plot(eV, εBx.real, label="Re(εBx)")
plt.plot(eV, εC.real, label="Re(εC)")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
plt.figure(3)
plt.plot(eV, εA.imag, label="Im(εA)")
plt.plot(eV, εAx.imag, label="Im(εAx)")
plt.plot(eV, εBx.imag, label="Im(εBx)")
plt.plot(eV, εC.imag, label="Im(εC)")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs eV
plt.figure(4)
plt.plot(eV, n, label="n")
plt.plot(eV, k, label="k")
plt.xlabel('Photon energy (eV)')
plt.ylabel('n, k')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs μm
plt.figure(5)
plt.plot(μm, n, label="n")
plt.plot(μm, k, label="k")
plt.xlabel('Wavelength (μm)')
plt.ylabel('n, k')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot α vs eV
plt.figure(6)
plt.plot(eV,α)
plt.yscale('log')
plt.ylim([1e3,1e7])
plt.xlabel('Photon energy (eV)')
plt.ylabel('α (1/cm)') | gpl-3.0 | -7,386,708,689,700,512,000 | 23.379845 | 82 | 0.567112 | false |
bernard357/shellbot | tests/routes/test_wrapper.py | 1 | 2270 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import gc
import logging
import os
from multiprocessing import Queue
import sys
from shellbot import Context
from shellbot.routes.wrapper import Wrapper
class WrapperTests(unittest.TestCase):
def tearDown(self):
collected = gc.collect()
logging.info("Garbage collector: collected %d objects." % (collected))
def test_wrapper(self):
r = Wrapper(Context())
with self.assertRaises(AttributeError):
r.get()
with self.assertRaises(AttributeError):
r.post()
with self.assertRaises(AttributeError):
r.put()
with self.assertRaises(AttributeError):
r.delete()
def hook():
return 'hello'
def hook_patched():
return 'world'
r = Wrapper(callable=hook,
route='/wrapped')
self.assertEqual(r.route, '/wrapped')
self.assertTrue(r.callable is not None)
self.assertEqual(r.get(), 'hello')
self.assertEqual(r.post(), 'hello')
self.assertEqual(r.put(), 'hello')
self.assertEqual(r.delete(), 'hello')
r.callable = hook_patched
self.assertEqual(r.get(), 'world')
self.assertEqual(r.post(), 'world')
self.assertEqual(r.put(), 'world')
self.assertEqual(r.delete(), 'world')
context = Context()
class Callable(object):
def __init__(self, context):
self.context = context
def hook(self, **kwargs):
self.context.set('signal', 'wrapped!')
return 'OK'
callable = Callable(context)
r = Wrapper(context=context,
callable=callable.hook,
route='/wrapped')
self.assertEqual(r.route, '/wrapped')
self.assertEqual(r.callable, callable.hook)
self.assertEqual(context.get('signal'), None)
self.assertEqual(r.get(), 'OK')
self.assertEqual(r.post(), 'OK')
self.assertEqual(r.put(), 'OK')
self.assertEqual(r.delete(), 'OK')
self.assertEqual(context.get('signal'), 'wrapped!')
if __name__ == '__main__':
Context.set_logger()
sys.exit(unittest.main())
| apache-2.0 | -6,020,358,717,423,782,000 | 25.091954 | 78 | 0.573128 | false |
pansapiens/mytardis | tardis/tardis_portal/auth/__init__.py | 3 | 1877 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2011, Monash e-Research Centre
# (Monash University, Australia)
# Copyright (c) 2010-2011, VeRSI Consortium
# (Victorian eResearch Strategic Initiative, Australia)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the VeRSI, the VeRSI Consortium members, nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from tardis.tardis_portal.auth.authservice import AuthService
# The auth_service ``singleton``
auth_service = AuthService()
| bsd-3-clause | 3,798,589,930,639,004,000 | 52.628571 | 79 | 0.764518 | false |
pbanaszkiewicz/amy | amy/workshops/migrations/0040_invoice_status.py | 1 | 1412 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def migrate_invoiced(apps, schema_editor):
"""Migrate `invoiced` bool field into `invoice_status` text field."""
Event = apps.get_model('workshops', 'Event')
# null → 'unknown'
Event.objects.filter(invoiced__isnull=True) \
.update(invoice_status='unknown')
# true → 'invoiced'
Event.objects.filter(invoiced=True) \
.update(invoice_status='invoiced')
# false → 'invoiced'
Event.objects.filter(invoiced=False) \
.update(invoice_status='not-invoiced')
class Migration(migrations.Migration):
dependencies = [
('workshops', '0039_add_permission_groups'),
]
operations = [
migrations.AddField(
model_name='event',
name='invoice_status',
field=models.CharField(verbose_name='Invoice status', max_length=40, default='unknown', blank=True, choices=[('unknown', 'Unknown'), ('invoiced', 'Invoiced'), ('not-invoiced', 'Not invoiced'), ('na-self-org', 'Not applicable because self-organized'), ('na-waiver', 'Not applicable because waiver granted'), ('na-other', 'Not applicable because other arrangements made')]),
),
migrations.RunPython(migrate_invoiced),
migrations.RemoveField(
model_name='event',
name='invoiced',
),
]
| mit | 3,336,593,032,867,878,400 | 35.051282 | 384 | 0.633713 | false |
FoodLust/FL | members/tests.py | 1 | 4060 | from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from foodlust.tests.factories import UserFactory, MealFactory
class MemberTestCase(TestCase):
""" This class will establish the test cases for the member model."""
def setUp(self):
""" Setup for Member model test """
self.user = User(username='Test User', first_name='Test')
self.user.save()
def test_user_is_member(self):
""" Test to confirm that user is a member """
self.assertTrue(hasattr(self.user, 'member'))
def test_user_name(self):
""" Test to confirm username is correct on member model """
self.assertEqual(self.user.username, 'Test User')
def test_member_string_method(self):
""" Test to confirm member string method prints correctly """
self.assertEqual(str(self.user.member), 'Test')
def test_member_is_active(self):
""" Test to confirm member is made active upon registration """
self.assertTrue(self.user.member.active)
class TestRegistrationView(TestCase):
"""Test Registration View."""
def setUp(self):
"""Set up registration."""
self.response = self.client.get(reverse('registration_register'))
def test_register_view_status_code(self):
"""Test status code is 200."""
self.assertEqual(self.response.status_code, 200)
def test_register_view_has_form(self):
"""Test if template has form"""
self.assertContains(self.response, "</form>")
def test_register_failure(self):
"""Test register with bad creditinals."""
self.assertEqual(
self.client.post(reverse('registration_register'), {}).status_code,
200)
class TestMemberView(TestCase):
"""Test profile view."""
def setUp(self):
"""Set up for authenticated user."""
self.user = User(username="mike", first_name='mike')
self.user.save()
def test_member_page_status_code(self):
"""Test member page status code."""
self.client.force_login(self.user)
self.assertEqual(self.client.get(reverse('member')).status_code, 200)
def test_login_required(self):
"""Test member page requires login."""
self.client.logout()
self.assertEqual(self.client.get(reverse('member')).status_code, 302)
def test_template_has_profile_tag(self):
"""Test template contains the word profile."""
self.client.force_login(self.user)
response = self.client.get(reverse('member'))
self.assertContains(response, '<h1>Profile</h1>')
def test_template_contains_username(self):
"""Test member page has username."""
self.client.force_login(self.user)
response = self.client.get(reverse('member'))
self.assertContains(response, self.user.username)
def test_template_contains_first_name(self):
"""Test member page has first name."""
self.client.force_login(self.user)
response = self.client.get(reverse('member'))
self.assertContains(response, 'mike')
def test_meals_on_member_page(self):
"""Test meals show up on member page."""
self.meal = MealFactory()
self.client.force_login(self.meal.member)
response = self.client.get(reverse('member'))
self.assertContains(response, self.meal.title)
class TestEditMemberView(TestCase):
"""Testcase for edit member info."""
def setUp(self):
"""Setup for testcase."""
self.user = User(username="mike", first_name='mike')
self.user.save()
self.client.force_login(self.user)
self.response = self.client.get(reverse('member_edit'))
def test_memebr_edit_status_code_authd(self):
"""Test status code 200 for authenticated user for edit."""
self.assertEqual(self.response.status_code, 200)
def test_member_edit_contains_edit(self):
"""Test edit page contains edit profile."""
self.assertContains(self.response, 'Edit') | mit | -4,471,844,967,980,824,600 | 35.258929 | 79 | 0.646552 | false |
tbicr/osm-validator | init_env.py | 1 | 1437 | import os
import cryptography.fernet
def wizard():
print('This is osm validator setting initialization wizard.')
print('Please read and enter a few settings.')
print('Press ENTER to continue.')
input()
print()
if os.path.exists('.env'):
print('You already have `.env` file, please move it to another location '
'if you sure that you want to use this wizard to create new `.env` file.')
return
print('You need configure and provide OpenStreetMap OAuth credentials.')
print('Please check documentation: https://wiki.openstreetmap.org/wiki/OAuth')
print('and configure new application: '
'https://www.openstreetmap.org/user/username/oauth_clients/new')
print()
OAUTH_OPENSTREETMAP_KEY = input('Enter OSM Consumer Key: ').strip()
OAUTH_OPENSTREETMAP_SECRET = input('Enter OSM Consumer Secret: ').strip()
print()
with open('.env.template') as template_handle, open('.env', 'w') as env_handle:
env_handle.write(template_handle.read().format(
SECRET_KEY=cryptography.fernet.Fernet.generate_key().decode(),
OAUTH_OPENSTREETMAP_KEY=OAUTH_OPENSTREETMAP_KEY,
OAUTH_OPENSTREETMAP_SECRET=OAUTH_OPENSTREETMAP_SECRET,
))
print('Well done!')
print('`.env` created, now you can start you application with `docker-compose up` command.')
print()
if __name__ == '__main__':
wizard()
| mit | 4,221,900,792,476,179,500 | 34.04878 | 96 | 0.659708 | false |
Habitissimo/vespapp-web | web/urls.py | 1 | 2169 | from django.conf.urls import url
from web import views
from web.views import HomePageView
from web.views import FAQView
from web.views import SightingExpertCommentsView
from web.views import SightingView
from web.views import SightingsView
from web.views import SightQuestionView
from web.views import LocationsPageView
from web.views import SightingCommentView
from web.views import SightingCommentsView
from web.views import SightExpertCommentView
from web.views import NewSightingView
urlpatterns = [
url(r'^$', HomePageView.as_view(), name='home'),
url(r'^faq/$', FAQView.as_view(), name='faq'),
url(r'^locations/$', LocationsPageView.as_view(), name='locations'),
url(r'^new_sighting/$', NewSightingView.as_view(), name='new_sighting'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/expert_comments/$',
SightingExpertCommentsView.as_view(), name='sighting_expert_comments'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/$', SightingView.as_view(), name="sighting_id"),
url(r'^sightings/$', SightingsView.as_view(), name='sightings'),
url(r'^sight_question/(?P<sighting_id>[0-9]+)/$', SightQuestionView.as_view(), name='sight_question'),
url(r'^sightings/$', SightingsView.as_view(), name='sightings'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/$', SightingView.as_view(), name="sighting_id"),
url(r'^sightings/$', SightingsView.as_view(), name='sightings'),
url(r'^sight_question/(?P<sighting_id>[0-9]+)/$', SightQuestionView.as_view(), name='sight_question'),
url(r'^locations/$', LocationsPageView.as_view(), name='locations'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/user_comments/$', SightingCommentsView.as_view(), name='sighting_comments'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/user_comment/(?P<comment_id>[0-9]+)/$', SightingCommentView.as_view(),
name='sighting_comment'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/expert_comments/$', SightingExpertCommentsView.as_view(),
name='sighting_expert_comments'),
url(r'^sighting/(?P<sighting_id>[0-9]+)/expert_comments/(?P<expert_comment_id>[0-9]+)$',
SightExpertCommentView.as_view(), name='sight_expert_comment'),
]
| gpl-3.0 | -6,261,989,701,574,931,000 | 50.642857 | 120 | 0.693868 | false |
quantumlib/Cirq | cirq-google/cirq_google/optimizers/two_qubit_gates/math_utils_test.py | 1 | 1458 | import numpy as np
import pytest
import cirq
from cirq import value
from cirq_google.optimizers.two_qubit_gates.math_utils import (
weyl_chamber_mesh,
kak_vector_infidelity,
random_qubit_unitary,
)
def test_weyl_chamber_mesh_spacing_too_small_throws_error():
with pytest.raises(ValueError, match='may cause system to crash'):
weyl_chamber_mesh(spacing=5e-4)
def test_kak_vector_infidelity_ignore_equivalent_nontrivial():
x, y, z = np.pi / 4, 1, 0.5
kak_0 = cirq.kak_canonicalize_vector(x, y, z).interaction_coefficients
kak_1 = cirq.kak_canonicalize_vector(x - 1e-3, y, z).interaction_coefficients
inf_check_equivalent = kak_vector_infidelity(kak_0, kak_1, False)
inf_ignore_equivalent = kak_vector_infidelity(kak_0, kak_1, True)
assert inf_check_equivalent < inf_ignore_equivalent
def test_random_qubit_unitary_shape():
rng = value.parse_random_state(11)
actual = random_qubit_unitary((3, 4, 5), True, rng).ravel()
rng = value.parse_random_state(11)
expected = random_qubit_unitary((3 * 4 * 5,), True, rng).ravel()
np.testing.assert_almost_equal(actual, expected)
def test_random_qubit_default():
rng = value.parse_random_state(11)
actual = random_qubit_unitary(randomize_global_phase=True, rng=rng).ravel()
rng = value.parse_random_state(11)
expected = random_qubit_unitary((1, 1, 1), True, rng=rng).ravel()
np.testing.assert_almost_equal(actual, expected)
| apache-2.0 | -3,370,362,060,325,714,400 | 33.714286 | 81 | 0.703704 | false |
crsmithdev/arrow | tests/test_parser.py | 1 | 61226 | import calendar
import os
import time
from datetime import datetime
import pytest
from dateutil import tz
import arrow
from arrow import formatter, parser
from arrow.constants import MAX_TIMESTAMP_US
from arrow.parser import DateTimeParser, ParserError, ParserMatchError
from .utils import make_full_tz_list
@pytest.mark.usefixtures("dt_parser")
class TestDateTimeParser:
def test_parse_multiformat(self, mocker):
mocker.patch(
"arrow.parser.DateTimeParser.parse",
string="str",
fmt="fmt_a",
side_effect=parser.ParserError,
)
with pytest.raises(parser.ParserError):
self.parser._parse_multiformat("str", ["fmt_a"])
mock_datetime = mocker.Mock()
mocker.patch(
"arrow.parser.DateTimeParser.parse",
string="str",
fmt="fmt_b",
return_value=mock_datetime,
)
result = self.parser._parse_multiformat("str", ["fmt_a", "fmt_b"])
assert result == mock_datetime
def test_parse_multiformat_all_fail(self, mocker):
mocker.patch(
"arrow.parser.DateTimeParser.parse",
string="str",
fmt="fmt_a",
side_effect=parser.ParserError,
)
mocker.patch(
"arrow.parser.DateTimeParser.parse",
string="str",
fmt="fmt_b",
side_effect=parser.ParserError,
)
with pytest.raises(parser.ParserError):
self.parser._parse_multiformat("str", ["fmt_a", "fmt_b"])
def test_parse_multiformat_unself_expected_fail(self, mocker):
class UnselfExpectedError(Exception):
pass
mocker.patch(
"arrow.parser.DateTimeParser.parse",
string="str",
fmt="fmt_a",
side_effect=UnselfExpectedError,
)
with pytest.raises(UnselfExpectedError):
self.parser._parse_multiformat("str", ["fmt_a", "fmt_b"])
def test_parse_token_nonsense(self):
parts = {}
self.parser._parse_token("NONSENSE", "1900", parts)
assert parts == {}
def test_parse_token_invalid_meridians(self):
parts = {}
self.parser._parse_token("A", "a..m", parts)
assert parts == {}
self.parser._parse_token("a", "p..m", parts)
assert parts == {}
def test_parser_no_caching(self, mocker):
mocked_parser = mocker.patch(
"arrow.parser.DateTimeParser._generate_pattern_re", fmt="fmt_a"
)
self.parser = parser.DateTimeParser(cache_size=0)
for _ in range(100):
self.parser._generate_pattern_re("fmt_a")
assert mocked_parser.call_count == 100
def test_parser_1_line_caching(self, mocker):
mocked_parser = mocker.patch("arrow.parser.DateTimeParser._generate_pattern_re")
self.parser = parser.DateTimeParser(cache_size=1)
for _ in range(100):
self.parser._generate_pattern_re(fmt="fmt_a")
assert mocked_parser.call_count == 1
assert mocked_parser.call_args_list[0] == mocker.call(fmt="fmt_a")
for _ in range(100):
self.parser._generate_pattern_re(fmt="fmt_b")
assert mocked_parser.call_count == 2
assert mocked_parser.call_args_list[1] == mocker.call(fmt="fmt_b")
for _ in range(100):
self.parser._generate_pattern_re(fmt="fmt_a")
assert mocked_parser.call_count == 3
assert mocked_parser.call_args_list[2] == mocker.call(fmt="fmt_a")
def test_parser_multiple_line_caching(self, mocker):
mocked_parser = mocker.patch("arrow.parser.DateTimeParser._generate_pattern_re")
self.parser = parser.DateTimeParser(cache_size=2)
for _ in range(100):
self.parser._generate_pattern_re(fmt="fmt_a")
assert mocked_parser.call_count == 1
assert mocked_parser.call_args_list[0] == mocker.call(fmt="fmt_a")
for _ in range(100):
self.parser._generate_pattern_re(fmt="fmt_b")
assert mocked_parser.call_count == 2
assert mocked_parser.call_args_list[1] == mocker.call(fmt="fmt_b")
# fmt_a and fmt_b are in the cache, so no new calls should be made
for _ in range(100):
self.parser._generate_pattern_re(fmt="fmt_a")
for _ in range(100):
self.parser._generate_pattern_re(fmt="fmt_b")
assert mocked_parser.call_count == 2
assert mocked_parser.call_args_list[0] == mocker.call(fmt="fmt_a")
assert mocked_parser.call_args_list[1] == mocker.call(fmt="fmt_b")
def test_YY_and_YYYY_format_list(self):
assert self.parser.parse("15/01/19", ["DD/MM/YY", "DD/MM/YYYY"]) == datetime(
2019, 1, 15
)
# Regression test for issue #580
assert self.parser.parse("15/01/2019", ["DD/MM/YY", "DD/MM/YYYY"]) == datetime(
2019, 1, 15
)
assert (
self.parser.parse(
"15/01/2019T04:05:06.789120Z",
["D/M/YYThh:mm:ss.SZ", "D/M/YYYYThh:mm:ss.SZ"],
)
== datetime(2019, 1, 15, 4, 5, 6, 789120, tzinfo=tz.tzutc())
)
# regression test for issue #447
def test_timestamp_format_list(self):
# should not match on the "X" token
assert (
self.parser.parse(
"15 Jul 2000",
["MM/DD/YYYY", "YYYY-MM-DD", "X", "DD-MMMM-YYYY", "D MMM YYYY"],
)
== datetime(2000, 7, 15)
)
with pytest.raises(ParserError):
self.parser.parse("15 Jul", "X")
@pytest.mark.usefixtures("dt_parser")
class TestDateTimeParserParse:
def test_parse_list(self, mocker):
mocker.patch(
"arrow.parser.DateTimeParser._parse_multiformat",
string="str",
formats=["fmt_a", "fmt_b"],
return_value="result",
)
result = self.parser.parse("str", ["fmt_a", "fmt_b"])
assert result == "result"
def test_parse_unrecognized_token(self, mocker):
mocker.patch.dict("arrow.parser.DateTimeParser._BASE_INPUT_RE_MAP")
del arrow.parser.DateTimeParser._BASE_INPUT_RE_MAP["YYYY"]
# need to make another local parser to apply patch changes
_parser = parser.DateTimeParser()
with pytest.raises(parser.ParserError):
_parser.parse("2013-01-01", "YYYY-MM-DD")
def test_parse_parse_no_match(self):
with pytest.raises(ParserError):
self.parser.parse("01-01", "YYYY-MM-DD")
def test_parse_separators(self):
with pytest.raises(ParserError):
self.parser.parse("1403549231", "YYYY-MM-DD")
def test_parse_numbers(self):
self.expected = datetime(2012, 1, 1, 12, 5, 10)
assert (
self.parser.parse("2012-01-01 12:05:10", "YYYY-MM-DD HH:mm:ss")
== self.expected
)
def test_parse_year_two_digit(self):
self.expected = datetime(1979, 1, 1, 12, 5, 10)
assert (
self.parser.parse("79-01-01 12:05:10", "YY-MM-DD HH:mm:ss") == self.expected
)
def test_parse_timestamp(self):
tz_utc = tz.tzutc()
float_timestamp = time.time()
int_timestamp = int(float_timestamp)
self.expected = datetime.fromtimestamp(int_timestamp, tz=tz_utc)
assert self.parser.parse(f"{int_timestamp:d}", "X") == self.expected
self.expected = datetime.fromtimestamp(float_timestamp, tz=tz_utc)
assert self.parser.parse(f"{float_timestamp:f}", "X") == self.expected
# test handling of ns timestamp (arrow will round to 6 digits regardless)
self.expected = datetime.fromtimestamp(float_timestamp, tz=tz_utc)
assert self.parser.parse(f"{float_timestamp:f}123", "X") == self.expected
# test ps timestamp (arrow will round to 6 digits regardless)
self.expected = datetime.fromtimestamp(float_timestamp, tz=tz_utc)
assert self.parser.parse(f"{float_timestamp:f}123456", "X") == self.expected
# NOTE: timestamps cannot be parsed from natural language strings (by removing the ^...$) because it will
# break cases like "15 Jul 2000" and a format list (see issue #447)
with pytest.raises(ParserError):
natural_lang_string = "Meet me at {} at the restaurant.".format(
float_timestamp
)
self.parser.parse(natural_lang_string, "X")
with pytest.raises(ParserError):
self.parser.parse("1565982019.", "X")
with pytest.raises(ParserError):
self.parser.parse(".1565982019", "X")
# NOTE: negative timestamps cannot be handled by datetime on Windows
# Must use timedelta to handle them: https://stackoverflow.com/questions/36179914
@pytest.mark.skipif(
os.name == "nt", reason="negative timestamps are not supported on Windows"
)
def test_parse_negative_timestamp(self):
# regression test for issue #662
tz_utc = tz.tzutc()
float_timestamp = time.time()
int_timestamp = int(float_timestamp)
negative_int_timestamp = -int_timestamp
self.expected = datetime.fromtimestamp(negative_int_timestamp, tz=tz_utc)
assert self.parser.parse(f"{negative_int_timestamp:d}", "X") == self.expected
negative_float_timestamp = -float_timestamp
self.expected = datetime.fromtimestamp(negative_float_timestamp, tz=tz_utc)
assert self.parser.parse(f"{negative_float_timestamp:f}", "X") == self.expected
def test_parse_expanded_timestamp(self):
# test expanded timestamps that include milliseconds
# and microseconds as multiples rather than decimals
# requested in issue #357
tz_utc = tz.tzutc()
timestamp = 1569982581.413132
timestamp_milli = round(timestamp * 1000)
timestamp_micro = round(timestamp * 1_000_000)
# "x" token should parse integer timestamps below MAX_TIMESTAMP normally
self.expected = datetime.fromtimestamp(int(timestamp), tz=tz_utc)
assert self.parser.parse(f"{int(timestamp):d}", "x") == self.expected
self.expected = datetime.fromtimestamp(round(timestamp, 3), tz=tz_utc)
assert self.parser.parse(f"{timestamp_milli:d}", "x") == self.expected
self.expected = datetime.fromtimestamp(timestamp, tz=tz_utc)
assert self.parser.parse(f"{timestamp_micro:d}", "x") == self.expected
# anything above max µs timestamp should fail
with pytest.raises(ValueError):
self.parser.parse(f"{int(MAX_TIMESTAMP_US) + 1:d}", "x")
# floats are not allowed with the "x" token
with pytest.raises(ParserMatchError):
self.parser.parse(f"{timestamp:f}", "x")
def test_parse_names(self):
self.expected = datetime(2012, 1, 1)
assert self.parser.parse("January 1, 2012", "MMMM D, YYYY") == self.expected
assert self.parser.parse("Jan 1, 2012", "MMM D, YYYY") == self.expected
def test_parse_pm(self):
self.expected = datetime(1, 1, 1, 13, 0, 0)
assert self.parser.parse("1 pm", "H a") == self.expected
assert self.parser.parse("1 pm", "h a") == self.expected
self.expected = datetime(1, 1, 1, 1, 0, 0)
assert self.parser.parse("1 am", "H A") == self.expected
assert self.parser.parse("1 am", "h A") == self.expected
self.expected = datetime(1, 1, 1, 0, 0, 0)
assert self.parser.parse("12 am", "H A") == self.expected
assert self.parser.parse("12 am", "h A") == self.expected
self.expected = datetime(1, 1, 1, 12, 0, 0)
assert self.parser.parse("12 pm", "H A") == self.expected
assert self.parser.parse("12 pm", "h A") == self.expected
def test_parse_tz_hours_only(self):
self.expected = datetime(2025, 10, 17, 5, 30, 10, tzinfo=tz.tzoffset(None, 0))
parsed = self.parser.parse("2025-10-17 05:30:10+00", "YYYY-MM-DD HH:mm:ssZ")
assert parsed == self.expected
def test_parse_tz_zz(self):
self.expected = datetime(2013, 1, 1, tzinfo=tz.tzoffset(None, -7 * 3600))
assert self.parser.parse("2013-01-01 -07:00", "YYYY-MM-DD ZZ") == self.expected
@pytest.mark.parametrize("full_tz_name", make_full_tz_list())
def test_parse_tz_name_zzz(self, full_tz_name):
self.expected = datetime(2013, 1, 1, tzinfo=tz.gettz(full_tz_name))
assert (
self.parser.parse(f"2013-01-01 {full_tz_name}", "YYYY-MM-DD ZZZ")
== self.expected
)
# note that offsets are not timezones
with pytest.raises(ParserError):
self.parser.parse("2013-01-01 12:30:45.9+1000", "YYYY-MM-DDZZZ")
with pytest.raises(ParserError):
self.parser.parse("2013-01-01 12:30:45.9+10:00", "YYYY-MM-DDZZZ")
with pytest.raises(ParserError):
self.parser.parse("2013-01-01 12:30:45.9-10", "YYYY-MM-DDZZZ")
def test_parse_subsecond(self):
self.expected = datetime(2013, 1, 1, 12, 30, 45, 900000)
assert (
self.parser.parse("2013-01-01 12:30:45.9", "YYYY-MM-DD HH:mm:ss.S")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 980000)
assert (
self.parser.parse("2013-01-01 12:30:45.98", "YYYY-MM-DD HH:mm:ss.SS")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987000)
assert (
self.parser.parse("2013-01-01 12:30:45.987", "YYYY-MM-DD HH:mm:ss.SSS")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987600)
assert (
self.parser.parse("2013-01-01 12:30:45.9876", "YYYY-MM-DD HH:mm:ss.SSSS")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987650)
assert (
self.parser.parse("2013-01-01 12:30:45.98765", "YYYY-MM-DD HH:mm:ss.SSSSS")
== self.expected
)
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987654)
assert (
self.parser.parse(
"2013-01-01 12:30:45.987654", "YYYY-MM-DD HH:mm:ss.SSSSSS"
)
== self.expected
)
def test_parse_subsecond_rounding(self):
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987654)
datetime_format = "YYYY-MM-DD HH:mm:ss.S"
# round up
string = "2013-01-01 12:30:45.9876539"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# round down
string = "2013-01-01 12:30:45.98765432"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# round half-up
string = "2013-01-01 12:30:45.987653521"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# round half-down
string = "2013-01-01 12:30:45.9876545210"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# overflow (zero out the subseconds and increment the seconds)
# regression tests for issue #636
def test_parse_subsecond_rounding_overflow(self):
datetime_format = "YYYY-MM-DD HH:mm:ss.S"
self.expected = datetime(2013, 1, 1, 12, 30, 46)
string = "2013-01-01 12:30:45.9999995"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
self.expected = datetime(2013, 1, 1, 12, 31, 0)
string = "2013-01-01 12:30:59.9999999"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
self.expected = datetime(2013, 1, 2, 0, 0, 0)
string = "2013-01-01 23:59:59.9999999"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# 6 digits should remain unrounded
self.expected = datetime(2013, 1, 1, 12, 30, 45, 999999)
string = "2013-01-01 12:30:45.999999"
assert self.parser.parse(string, datetime_format) == self.expected
assert self.parser.parse_iso(string) == self.expected
# Regression tests for issue #560
def test_parse_long_year(self):
with pytest.raises(ParserError):
self.parser.parse("09 January 123456789101112", "DD MMMM YYYY")
with pytest.raises(ParserError):
self.parser.parse("123456789101112 09 January", "YYYY DD MMMM")
with pytest.raises(ParserError):
self.parser.parse("68096653015/01/19", "YY/M/DD")
def test_parse_with_extra_words_at_start_and_end_invalid(self):
input_format_pairs = [
("blah2016", "YYYY"),
("blah2016blah", "YYYY"),
("2016blah", "YYYY"),
("2016-05blah", "YYYY-MM"),
("2016-05-16blah", "YYYY-MM-DD"),
("2016-05-16T04:05:06.789120blah", "YYYY-MM-DDThh:mm:ss.S"),
("2016-05-16T04:05:06.789120ZblahZ", "YYYY-MM-DDThh:mm:ss.SZ"),
("2016-05-16T04:05:06.789120Zblah", "YYYY-MM-DDThh:mm:ss.SZ"),
("2016-05-16T04:05:06.789120blahZ", "YYYY-MM-DDThh:mm:ss.SZ"),
]
for pair in input_format_pairs:
with pytest.raises(ParserError):
self.parser.parse(pair[0], pair[1])
def test_parse_with_extra_words_at_start_and_end_valid(self):
# Spaces surrounding the parsable date are ok because we
# allow the parsing of natural language input. Additionally, a single
# character of specific punctuation before or after the date is okay.
# See docs for full list of valid punctuation.
assert self.parser.parse("blah 2016 blah", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse("blah 2016", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse("2016 blah", "YYYY") == datetime(2016, 1, 1)
# test one additional space along with space divider
assert self.parser.parse(
"blah 2016-05-16 04:05:06.789120", "YYYY-MM-DD hh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
assert self.parser.parse(
"2016-05-16 04:05:06.789120 blah", "YYYY-MM-DD hh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
# test one additional space along with T divider
assert self.parser.parse(
"blah 2016-05-16T04:05:06.789120", "YYYY-MM-DDThh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
assert self.parser.parse(
"2016-05-16T04:05:06.789120 blah", "YYYY-MM-DDThh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
assert (
self.parser.parse(
"Meet me at 2016-05-16T04:05:06.789120 at the restaurant.",
"YYYY-MM-DDThh:mm:ss.S",
)
== datetime(2016, 5, 16, 4, 5, 6, 789120)
)
assert (
self.parser.parse(
"Meet me at 2016-05-16 04:05:06.789120 at the restaurant.",
"YYYY-MM-DD hh:mm:ss.S",
)
== datetime(2016, 5, 16, 4, 5, 6, 789120)
)
# regression test for issue #701
# tests cases of a partial match surrounded by punctuation
# for the list of valid punctuation, see documentation
def test_parse_with_punctuation_fences(self):
assert self.parser.parse(
"Meet me at my house on Halloween (2019-31-10)", "YYYY-DD-MM"
) == datetime(2019, 10, 31)
assert self.parser.parse(
"Monday, 9. September 2019, 16:15-20:00", "dddd, D. MMMM YYYY"
) == datetime(2019, 9, 9)
assert self.parser.parse("A date is 11.11.2011.", "DD.MM.YYYY") == datetime(
2011, 11, 11
)
with pytest.raises(ParserMatchError):
self.parser.parse("11.11.2011.1 is not a valid date.", "DD.MM.YYYY")
with pytest.raises(ParserMatchError):
self.parser.parse(
"This date has too many punctuation marks following it (11.11.2011).",
"DD.MM.YYYY",
)
def test_parse_with_leading_and_trailing_whitespace(self):
assert self.parser.parse(" 2016", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse("2016 ", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse(" 2016 ", "YYYY") == datetime(2016, 1, 1)
assert self.parser.parse(
" 2016-05-16 04:05:06.789120 ", "YYYY-MM-DD hh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
assert self.parser.parse(
" 2016-05-16T04:05:06.789120 ", "YYYY-MM-DDThh:mm:ss.S"
) == datetime(2016, 5, 16, 4, 5, 6, 789120)
def test_parse_YYYY_DDDD(self):
assert self.parser.parse("1998-136", "YYYY-DDDD") == datetime(1998, 5, 16)
assert self.parser.parse("1998-006", "YYYY-DDDD") == datetime(1998, 1, 6)
with pytest.raises(ParserError):
self.parser.parse("1998-456", "YYYY-DDDD")
def test_parse_YYYY_DDD(self):
assert self.parser.parse("1998-6", "YYYY-DDD") == datetime(1998, 1, 6)
assert self.parser.parse("1998-136", "YYYY-DDD") == datetime(1998, 5, 16)
with pytest.raises(ParserError):
self.parser.parse("1998-756", "YYYY-DDD")
# month cannot be passed with DDD and DDDD tokens
def test_parse_YYYY_MM_DDDD(self):
with pytest.raises(ParserError):
self.parser.parse("2015-01-009", "YYYY-MM-DDDD")
# year is required with the DDD and DDDD tokens
def test_parse_DDD_only(self):
with pytest.raises(ParserError):
self.parser.parse("5", "DDD")
def test_parse_DDDD_only(self):
with pytest.raises(ParserError):
self.parser.parse("145", "DDDD")
def test_parse_ddd_and_dddd(self):
fr_parser = parser.DateTimeParser("fr")
# Day of week should be ignored when a day is passed
# 2019-10-17 is a Thursday, so we know day of week
# is ignored if the same date is outputted
expected = datetime(2019, 10, 17)
assert self.parser.parse("Tue 2019-10-17", "ddd YYYY-MM-DD") == expected
assert fr_parser.parse("mar 2019-10-17", "ddd YYYY-MM-DD") == expected
assert self.parser.parse("Tuesday 2019-10-17", "dddd YYYY-MM-DD") == expected
assert fr_parser.parse("mardi 2019-10-17", "dddd YYYY-MM-DD") == expected
# Get first Tuesday after epoch
expected = datetime(1970, 1, 6)
assert self.parser.parse("Tue", "ddd") == expected
assert fr_parser.parse("mar", "ddd") == expected
assert self.parser.parse("Tuesday", "dddd") == expected
assert fr_parser.parse("mardi", "dddd") == expected
# Get first Tuesday in 2020
expected = datetime(2020, 1, 7)
assert self.parser.parse("Tue 2020", "ddd YYYY") == expected
assert fr_parser.parse("mar 2020", "ddd YYYY") == expected
assert self.parser.parse("Tuesday 2020", "dddd YYYY") == expected
assert fr_parser.parse("mardi 2020", "dddd YYYY") == expected
# Get first Tuesday in February 2020
expected = datetime(2020, 2, 4)
assert self.parser.parse("Tue 02 2020", "ddd MM YYYY") == expected
assert fr_parser.parse("mar 02 2020", "ddd MM YYYY") == expected
assert self.parser.parse("Tuesday 02 2020", "dddd MM YYYY") == expected
assert fr_parser.parse("mardi 02 2020", "dddd MM YYYY") == expected
# Get first Tuesday in February after epoch
expected = datetime(1970, 2, 3)
assert self.parser.parse("Tue 02", "ddd MM") == expected
assert fr_parser.parse("mar 02", "ddd MM") == expected
assert self.parser.parse("Tuesday 02", "dddd MM") == expected
assert fr_parser.parse("mardi 02", "dddd MM") == expected
# Times remain intact
expected = datetime(2020, 2, 4, 10, 25, 54, 123456, tz.tzoffset(None, -3600))
assert (
self.parser.parse(
"Tue 02 2020 10:25:54.123456-01:00", "ddd MM YYYY HH:mm:ss.SZZ"
)
== expected
)
assert (
fr_parser.parse(
"mar 02 2020 10:25:54.123456-01:00", "ddd MM YYYY HH:mm:ss.SZZ"
)
== expected
)
assert (
self.parser.parse(
"Tuesday 02 2020 10:25:54.123456-01:00", "dddd MM YYYY HH:mm:ss.SZZ"
)
== expected
)
assert (
fr_parser.parse(
"mardi 02 2020 10:25:54.123456-01:00", "dddd MM YYYY HH:mm:ss.SZZ"
)
== expected
)
def test_parse_ddd_and_dddd_ignore_case(self):
# Regression test for issue #851
expected = datetime(2019, 6, 24)
assert (
self.parser.parse("MONDAY, June 24, 2019", "dddd, MMMM DD, YYYY")
== expected
)
def test_parse_ddd_and_dddd_then_format(self):
# Regression test for issue #446
arw_formatter = formatter.DateTimeFormatter()
assert arw_formatter.format(self.parser.parse("Mon", "ddd"), "ddd") == "Mon"
assert (
arw_formatter.format(self.parser.parse("Monday", "dddd"), "dddd")
== "Monday"
)
assert arw_formatter.format(self.parser.parse("Tue", "ddd"), "ddd") == "Tue"
assert (
arw_formatter.format(self.parser.parse("Tuesday", "dddd"), "dddd")
== "Tuesday"
)
assert arw_formatter.format(self.parser.parse("Wed", "ddd"), "ddd") == "Wed"
assert (
arw_formatter.format(self.parser.parse("Wednesday", "dddd"), "dddd")
== "Wednesday"
)
assert arw_formatter.format(self.parser.parse("Thu", "ddd"), "ddd") == "Thu"
assert (
arw_formatter.format(self.parser.parse("Thursday", "dddd"), "dddd")
== "Thursday"
)
assert arw_formatter.format(self.parser.parse("Fri", "ddd"), "ddd") == "Fri"
assert (
arw_formatter.format(self.parser.parse("Friday", "dddd"), "dddd")
== "Friday"
)
assert arw_formatter.format(self.parser.parse("Sat", "ddd"), "ddd") == "Sat"
assert (
arw_formatter.format(self.parser.parse("Saturday", "dddd"), "dddd")
== "Saturday"
)
assert arw_formatter.format(self.parser.parse("Sun", "ddd"), "ddd") == "Sun"
assert (
arw_formatter.format(self.parser.parse("Sunday", "dddd"), "dddd")
== "Sunday"
)
def test_parse_HH_24(self):
assert self.parser.parse(
"2019-10-30T24:00:00", "YYYY-MM-DDTHH:mm:ss"
) == datetime(2019, 10, 31, 0, 0, 0, 0)
assert self.parser.parse("2019-10-30T24:00", "YYYY-MM-DDTHH:mm") == datetime(
2019, 10, 31, 0, 0, 0, 0
)
assert self.parser.parse("2019-10-30T24", "YYYY-MM-DDTHH") == datetime(
2019, 10, 31, 0, 0, 0, 0
)
assert self.parser.parse(
"2019-10-30T24:00:00.0", "YYYY-MM-DDTHH:mm:ss.S"
) == datetime(2019, 10, 31, 0, 0, 0, 0)
assert self.parser.parse(
"2019-10-31T24:00:00", "YYYY-MM-DDTHH:mm:ss"
) == datetime(2019, 11, 1, 0, 0, 0, 0)
assert self.parser.parse(
"2019-12-31T24:00:00", "YYYY-MM-DDTHH:mm:ss"
) == datetime(2020, 1, 1, 0, 0, 0, 0)
assert self.parser.parse(
"2019-12-31T23:59:59.9999999", "YYYY-MM-DDTHH:mm:ss.S"
) == datetime(2020, 1, 1, 0, 0, 0, 0)
with pytest.raises(ParserError):
self.parser.parse("2019-12-31T24:01:00", "YYYY-MM-DDTHH:mm:ss")
with pytest.raises(ParserError):
self.parser.parse("2019-12-31T24:00:01", "YYYY-MM-DDTHH:mm:ss")
with pytest.raises(ParserError):
self.parser.parse("2019-12-31T24:00:00.1", "YYYY-MM-DDTHH:mm:ss.S")
with pytest.raises(ParserError):
self.parser.parse("2019-12-31T24:00:00.999999", "YYYY-MM-DDTHH:mm:ss.S")
def test_parse_W(self):
assert self.parser.parse("2011-W05-4", "W") == datetime(2011, 2, 3)
assert self.parser.parse("2011W054", "W") == datetime(2011, 2, 3)
assert self.parser.parse("2011-W05", "W") == datetime(2011, 1, 31)
assert self.parser.parse("2011W05", "W") == datetime(2011, 1, 31)
assert self.parser.parse("2011-W05-4T14:17:01", "WTHH:mm:ss") == datetime(
2011, 2, 3, 14, 17, 1
)
assert self.parser.parse("2011W054T14:17:01", "WTHH:mm:ss") == datetime(
2011, 2, 3, 14, 17, 1
)
assert self.parser.parse("2011-W05T14:17:01", "WTHH:mm:ss") == datetime(
2011, 1, 31, 14, 17, 1
)
assert self.parser.parse("2011W05T141701", "WTHHmmss") == datetime(
2011, 1, 31, 14, 17, 1
)
assert self.parser.parse("2011W054T141701", "WTHHmmss") == datetime(
2011, 2, 3, 14, 17, 1
)
bad_formats = [
"201W22",
"1995-W1-4",
"2001-W34-90",
"2001--W34",
"2011-W03--3",
"thstrdjtrsrd676776r65",
"2002-W66-1T14:17:01",
"2002-W23-03T14:17:01",
]
for fmt in bad_formats:
with pytest.raises(ParserError):
self.parser.parse(fmt, "W")
def test_parse_normalize_whitespace(self):
assert self.parser.parse(
"Jun 1 2005 1:33PM", "MMM D YYYY H:mmA", normalize_whitespace=True
) == datetime(2005, 6, 1, 13, 33)
with pytest.raises(ParserError):
self.parser.parse("Jun 1 2005 1:33PM", "MMM D YYYY H:mmA")
assert (
self.parser.parse(
"\t 2013-05-05 T \n 12:30:45\t123456 \t \n",
"YYYY-MM-DD T HH:mm:ss S",
normalize_whitespace=True,
)
== datetime(2013, 5, 5, 12, 30, 45, 123456)
)
with pytest.raises(ParserError):
self.parser.parse(
"\t 2013-05-05 T \n 12:30:45\t123456 \t \n",
"YYYY-MM-DD T HH:mm:ss S",
)
assert self.parser.parse(
" \n Jun 1\t 2005\n ", "MMM D YYYY", normalize_whitespace=True
) == datetime(2005, 6, 1)
with pytest.raises(ParserError):
self.parser.parse(" \n Jun 1\t 2005\n ", "MMM D YYYY")
@pytest.mark.usefixtures("dt_parser_regex")
class TestDateTimeParserRegex:
def test_format_year(self):
assert self.format_regex.findall("YYYY-YY") == ["YYYY", "YY"]
def test_format_month(self):
assert self.format_regex.findall("MMMM-MMM-MM-M") == ["MMMM", "MMM", "MM", "M"]
def test_format_day(self):
assert self.format_regex.findall("DDDD-DDD-DD-D") == ["DDDD", "DDD", "DD", "D"]
def test_format_hour(self):
assert self.format_regex.findall("HH-H-hh-h") == ["HH", "H", "hh", "h"]
def test_format_minute(self):
assert self.format_regex.findall("mm-m") == ["mm", "m"]
def test_format_second(self):
assert self.format_regex.findall("ss-s") == ["ss", "s"]
def test_format_subsecond(self):
assert self.format_regex.findall("SSSSSS-SSSSS-SSSS-SSS-SS-S") == [
"SSSSSS",
"SSSSS",
"SSSS",
"SSS",
"SS",
"S",
]
def test_format_tz(self):
assert self.format_regex.findall("ZZZ-ZZ-Z") == ["ZZZ", "ZZ", "Z"]
def test_format_am_pm(self):
assert self.format_regex.findall("A-a") == ["A", "a"]
def test_format_timestamp(self):
assert self.format_regex.findall("X") == ["X"]
def test_format_timestamp_milli(self):
assert self.format_regex.findall("x") == ["x"]
def test_escape(self):
escape_regex = parser.DateTimeParser._ESCAPE_RE
assert escape_regex.findall("2018-03-09 8 [h] 40 [hello]") == ["[h]", "[hello]"]
def test_month_names(self):
p = parser.DateTimeParser("en-us")
text = "_".join(calendar.month_name[1:])
result = p._input_re_map["MMMM"].findall(text)
assert result == calendar.month_name[1:]
def test_month_abbreviations(self):
p = parser.DateTimeParser("en-us")
text = "_".join(calendar.month_abbr[1:])
result = p._input_re_map["MMM"].findall(text)
assert result == calendar.month_abbr[1:]
def test_digits(self):
assert parser.DateTimeParser._ONE_OR_TWO_DIGIT_RE.findall("4-56") == ["4", "56"]
assert parser.DateTimeParser._ONE_OR_TWO_OR_THREE_DIGIT_RE.findall(
"4-56-789"
) == ["4", "56", "789"]
assert parser.DateTimeParser._ONE_OR_MORE_DIGIT_RE.findall(
"4-56-789-1234-12345"
) == ["4", "56", "789", "1234", "12345"]
assert parser.DateTimeParser._TWO_DIGIT_RE.findall("12-3-45") == ["12", "45"]
assert parser.DateTimeParser._THREE_DIGIT_RE.findall("123-4-56") == ["123"]
assert parser.DateTimeParser._FOUR_DIGIT_RE.findall("1234-56") == ["1234"]
def test_tz(self):
tz_z_re = parser.DateTimeParser._TZ_Z_RE
assert tz_z_re.findall("-0700") == [("-", "07", "00")]
assert tz_z_re.findall("+07") == [("+", "07", "")]
assert tz_z_re.search("15/01/2019T04:05:06.789120Z") is not None
assert tz_z_re.search("15/01/2019T04:05:06.789120") is None
tz_zz_re = parser.DateTimeParser._TZ_ZZ_RE
assert tz_zz_re.findall("-07:00") == [("-", "07", "00")]
assert tz_zz_re.findall("+07") == [("+", "07", "")]
assert tz_zz_re.search("15/01/2019T04:05:06.789120Z") is not None
assert tz_zz_re.search("15/01/2019T04:05:06.789120") is None
tz_name_re = parser.DateTimeParser._TZ_NAME_RE
assert tz_name_re.findall("Europe/Warsaw") == ["Europe/Warsaw"]
assert tz_name_re.findall("GMT") == ["GMT"]
def test_timestamp(self):
timestamp_re = parser.DateTimeParser._TIMESTAMP_RE
assert timestamp_re.findall("1565707550.452729") == ["1565707550.452729"]
assert timestamp_re.findall("-1565707550.452729") == ["-1565707550.452729"]
assert timestamp_re.findall("-1565707550") == ["-1565707550"]
assert timestamp_re.findall("1565707550") == ["1565707550"]
assert timestamp_re.findall("1565707550.") == []
assert timestamp_re.findall(".1565707550") == []
def test_timestamp_milli(self):
timestamp_expanded_re = parser.DateTimeParser._TIMESTAMP_EXPANDED_RE
assert timestamp_expanded_re.findall("-1565707550") == ["-1565707550"]
assert timestamp_expanded_re.findall("1565707550") == ["1565707550"]
assert timestamp_expanded_re.findall("1565707550.452729") == []
assert timestamp_expanded_re.findall("1565707550.") == []
assert timestamp_expanded_re.findall(".1565707550") == []
def test_time(self):
time_re = parser.DateTimeParser._TIME_RE
time_seperators = [":", ""]
for sep in time_seperators:
assert time_re.findall("12") == [("12", "", "", "", "")]
assert time_re.findall(f"12{sep}35") == [("12", "35", "", "", "")]
assert time_re.findall("12{sep}35{sep}46".format(sep=sep)) == [
("12", "35", "46", "", "")
]
assert time_re.findall("12{sep}35{sep}46.952313".format(sep=sep)) == [
("12", "35", "46", ".", "952313")
]
assert time_re.findall("12{sep}35{sep}46,952313".format(sep=sep)) == [
("12", "35", "46", ",", "952313")
]
assert time_re.findall("12:") == []
assert time_re.findall("12:35:46.") == []
assert time_re.findall("12:35:46,") == []
@pytest.mark.usefixtures("dt_parser")
class TestDateTimeParserISO:
def test_YYYY(self):
assert self.parser.parse_iso("2013") == datetime(2013, 1, 1)
def test_YYYY_DDDD(self):
assert self.parser.parse_iso("1998-136") == datetime(1998, 5, 16)
assert self.parser.parse_iso("1998-006") == datetime(1998, 1, 6)
with pytest.raises(ParserError):
self.parser.parse_iso("1998-456")
# 2016 is a leap year, so Feb 29 exists (leap day)
assert self.parser.parse_iso("2016-059") == datetime(2016, 2, 28)
assert self.parser.parse_iso("2016-060") == datetime(2016, 2, 29)
assert self.parser.parse_iso("2016-061") == datetime(2016, 3, 1)
# 2017 is not a leap year, so Feb 29 does not exist
assert self.parser.parse_iso("2017-059") == datetime(2017, 2, 28)
assert self.parser.parse_iso("2017-060") == datetime(2017, 3, 1)
assert self.parser.parse_iso("2017-061") == datetime(2017, 3, 2)
# Since 2016 is a leap year, the 366th day falls in the same year
assert self.parser.parse_iso("2016-366") == datetime(2016, 12, 31)
# Since 2017 is not a leap year, the 366th day falls in the next year
assert self.parser.parse_iso("2017-366") == datetime(2018, 1, 1)
def test_YYYY_DDDD_HH_mm_ssZ(self):
assert self.parser.parse_iso("2013-036 04:05:06+01:00") == datetime(
2013, 2, 5, 4, 5, 6, tzinfo=tz.tzoffset(None, 3600)
)
assert self.parser.parse_iso("2013-036 04:05:06Z") == datetime(
2013, 2, 5, 4, 5, 6, tzinfo=tz.tzutc()
)
def test_YYYY_MM_DDDD(self):
with pytest.raises(ParserError):
self.parser.parse_iso("2014-05-125")
def test_YYYY_MM(self):
for separator in DateTimeParser.SEPARATORS:
assert self.parser.parse_iso(separator.join(("2013", "02"))) == datetime(
2013, 2, 1
)
def test_YYYY_MM_DD(self):
for separator in DateTimeParser.SEPARATORS:
assert self.parser.parse_iso(
separator.join(("2013", "02", "03"))
) == datetime(2013, 2, 3)
def test_YYYY_MM_DDTHH_mmZ(self):
assert self.parser.parse_iso("2013-02-03T04:05+01:00") == datetime(
2013, 2, 3, 4, 5, tzinfo=tz.tzoffset(None, 3600)
)
def test_YYYY_MM_DDTHH_mm(self):
assert self.parser.parse_iso("2013-02-03T04:05") == datetime(2013, 2, 3, 4, 5)
def test_YYYY_MM_DDTHH(self):
assert self.parser.parse_iso("2013-02-03T04") == datetime(2013, 2, 3, 4)
def test_YYYY_MM_DDTHHZ(self):
assert self.parser.parse_iso("2013-02-03T04+01:00") == datetime(
2013, 2, 3, 4, tzinfo=tz.tzoffset(None, 3600)
)
def test_YYYY_MM_DDTHH_mm_ssZ(self):
assert self.parser.parse_iso("2013-02-03T04:05:06+01:00") == datetime(
2013, 2, 3, 4, 5, 6, tzinfo=tz.tzoffset(None, 3600)
)
def test_YYYY_MM_DDTHH_mm_ss(self):
assert self.parser.parse_iso("2013-02-03T04:05:06") == datetime(
2013, 2, 3, 4, 5, 6
)
def test_YYYY_MM_DD_HH_mmZ(self):
assert self.parser.parse_iso("2013-02-03 04:05+01:00") == datetime(
2013, 2, 3, 4, 5, tzinfo=tz.tzoffset(None, 3600)
)
def test_YYYY_MM_DD_HH_mm(self):
assert self.parser.parse_iso("2013-02-03 04:05") == datetime(2013, 2, 3, 4, 5)
def test_YYYY_MM_DD_HH(self):
assert self.parser.parse_iso("2013-02-03 04") == datetime(2013, 2, 3, 4)
def test_invalid_time(self):
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03T")
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03 044")
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03 04:05:06.")
def test_YYYY_MM_DD_HH_mm_ssZ(self):
assert self.parser.parse_iso("2013-02-03 04:05:06+01:00") == datetime(
2013, 2, 3, 4, 5, 6, tzinfo=tz.tzoffset(None, 3600)
)
def test_YYYY_MM_DD_HH_mm_ss(self):
assert self.parser.parse_iso("2013-02-03 04:05:06") == datetime(
2013, 2, 3, 4, 5, 6
)
def test_YYYY_MM_DDTHH_mm_ss_S(self):
assert self.parser.parse_iso("2013-02-03T04:05:06.7") == datetime(
2013, 2, 3, 4, 5, 6, 700000
)
assert self.parser.parse_iso("2013-02-03T04:05:06.78") == datetime(
2013, 2, 3, 4, 5, 6, 780000
)
assert self.parser.parse_iso("2013-02-03T04:05:06.789") == datetime(
2013, 2, 3, 4, 5, 6, 789000
)
assert self.parser.parse_iso("2013-02-03T04:05:06.7891") == datetime(
2013, 2, 3, 4, 5, 6, 789100
)
assert self.parser.parse_iso("2013-02-03T04:05:06.78912") == datetime(
2013, 2, 3, 4, 5, 6, 789120
)
# ISO 8601:2004(E), ISO, 2004-12-01, 4.2.2.4 ... the decimal fraction
# shall be divided from the integer part by the decimal sign specified
# in ISO 31-0, i.e. the comma [,] or full stop [.]. Of these, the comma
# is the preferred sign.
assert self.parser.parse_iso("2013-02-03T04:05:06,789123678") == datetime(
2013, 2, 3, 4, 5, 6, 789124
)
# there is no limit on the number of decimal places
assert self.parser.parse_iso("2013-02-03T04:05:06.789123678") == datetime(
2013, 2, 3, 4, 5, 6, 789124
)
def test_YYYY_MM_DDTHH_mm_ss_SZ(self):
assert self.parser.parse_iso("2013-02-03T04:05:06.7+01:00") == datetime(
2013, 2, 3, 4, 5, 6, 700000, tzinfo=tz.tzoffset(None, 3600)
)
assert self.parser.parse_iso("2013-02-03T04:05:06.78+01:00") == datetime(
2013, 2, 3, 4, 5, 6, 780000, tzinfo=tz.tzoffset(None, 3600)
)
assert self.parser.parse_iso("2013-02-03T04:05:06.789+01:00") == datetime(
2013, 2, 3, 4, 5, 6, 789000, tzinfo=tz.tzoffset(None, 3600)
)
assert self.parser.parse_iso("2013-02-03T04:05:06.7891+01:00") == datetime(
2013, 2, 3, 4, 5, 6, 789100, tzinfo=tz.tzoffset(None, 3600)
)
assert self.parser.parse_iso("2013-02-03T04:05:06.78912+01:00") == datetime(
2013, 2, 3, 4, 5, 6, 789120, tzinfo=tz.tzoffset(None, 3600)
)
assert self.parser.parse_iso("2013-02-03 04:05:06.78912Z") == datetime(
2013, 2, 3, 4, 5, 6, 789120, tzinfo=tz.tzutc()
)
def test_W(self):
assert self.parser.parse_iso("2011-W05-4") == datetime(2011, 2, 3)
assert self.parser.parse_iso("2011-W05-4T14:17:01") == datetime(
2011, 2, 3, 14, 17, 1
)
assert self.parser.parse_iso("2011W054") == datetime(2011, 2, 3)
assert self.parser.parse_iso("2011W054T141701") == datetime(
2011, 2, 3, 14, 17, 1
)
def test_invalid_Z(self):
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03T04:05:06.78912z")
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03T04:05:06.78912zz")
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03T04:05:06.78912Zz")
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03T04:05:06.78912ZZ")
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03T04:05:06.78912+Z")
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03T04:05:06.78912-Z")
with pytest.raises(ParserError):
self.parser.parse_iso("2013-02-03T04:05:06.78912 Z")
def test_parse_subsecond(self):
self.expected = datetime(2013, 1, 1, 12, 30, 45, 900000)
assert self.parser.parse_iso("2013-01-01 12:30:45.9") == self.expected
self.expected = datetime(2013, 1, 1, 12, 30, 45, 980000)
assert self.parser.parse_iso("2013-01-01 12:30:45.98") == self.expected
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987000)
assert self.parser.parse_iso("2013-01-01 12:30:45.987") == self.expected
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987600)
assert self.parser.parse_iso("2013-01-01 12:30:45.9876") == self.expected
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987650)
assert self.parser.parse_iso("2013-01-01 12:30:45.98765") == self.expected
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987654)
assert self.parser.parse_iso("2013-01-01 12:30:45.987654") == self.expected
# use comma as subsecond separator
self.expected = datetime(2013, 1, 1, 12, 30, 45, 987654)
assert self.parser.parse_iso("2013-01-01 12:30:45,987654") == self.expected
def test_gnu_date(self):
"""Regression tests for parsing output from GNU date."""
# date -Ins
assert self.parser.parse_iso("2016-11-16T09:46:30,895636557-0800") == datetime(
2016, 11, 16, 9, 46, 30, 895636, tzinfo=tz.tzoffset(None, -3600 * 8)
)
# date --rfc-3339=ns
assert self.parser.parse_iso("2016-11-16 09:51:14.682141526-08:00") == datetime(
2016, 11, 16, 9, 51, 14, 682142, tzinfo=tz.tzoffset(None, -3600 * 8)
)
def test_isoformat(self):
dt = datetime.utcnow()
assert self.parser.parse_iso(dt.isoformat()) == dt
def test_parse_iso_normalize_whitespace(self):
assert self.parser.parse_iso(
"2013-036 \t 04:05:06Z", normalize_whitespace=True
) == datetime(2013, 2, 5, 4, 5, 6, tzinfo=tz.tzutc())
with pytest.raises(ParserError):
self.parser.parse_iso("2013-036 \t 04:05:06Z")
assert self.parser.parse_iso(
"\t 2013-05-05T12:30:45.123456 \t \n", normalize_whitespace=True
) == datetime(2013, 5, 5, 12, 30, 45, 123456)
with pytest.raises(ParserError):
self.parser.parse_iso("\t 2013-05-05T12:30:45.123456 \t \n")
def test_parse_iso_with_leading_and_trailing_whitespace(self):
datetime_string = " 2016-11-15T06:37:19.123456"
with pytest.raises(ParserError):
self.parser.parse_iso(datetime_string)
datetime_string = " 2016-11-15T06:37:19.123456 "
with pytest.raises(ParserError):
self.parser.parse_iso(datetime_string)
datetime_string = "2016-11-15T06:37:19.123456 "
with pytest.raises(ParserError):
self.parser.parse_iso(datetime_string)
datetime_string = "2016-11-15T 06:37:19.123456"
with pytest.raises(ParserError):
self.parser.parse_iso(datetime_string)
# leading whitespace
datetime_string = " 2016-11-15 06:37:19.123456"
with pytest.raises(ParserError):
self.parser.parse_iso(datetime_string)
# trailing whitespace
datetime_string = "2016-11-15 06:37:19.123456 "
with pytest.raises(ParserError):
self.parser.parse_iso(datetime_string)
datetime_string = " 2016-11-15 06:37:19.123456 "
with pytest.raises(ParserError):
self.parser.parse_iso(datetime_string)
# two dividing spaces
datetime_string = "2016-11-15 06:37:19.123456"
with pytest.raises(ParserError):
self.parser.parse_iso(datetime_string)
def test_parse_iso_with_extra_words_at_start_and_end_invalid(self):
test_inputs = [
"blah2016",
"blah2016blah",
"blah 2016 blah",
"blah 2016",
"2016 blah",
"blah 2016-05-16 04:05:06.789120",
"2016-05-16 04:05:06.789120 blah",
"blah 2016-05-16T04:05:06.789120",
"2016-05-16T04:05:06.789120 blah",
"2016blah",
"2016-05blah",
"2016-05-16blah",
"2016-05-16T04:05:06.789120blah",
"2016-05-16T04:05:06.789120ZblahZ",
"2016-05-16T04:05:06.789120Zblah",
"2016-05-16T04:05:06.789120blahZ",
"Meet me at 2016-05-16T04:05:06.789120 at the restaurant.",
"Meet me at 2016-05-16 04:05:06.789120 at the restaurant.",
]
for ti in test_inputs:
with pytest.raises(ParserError):
self.parser.parse_iso(ti)
def test_iso8601_basic_format(self):
assert self.parser.parse_iso("20180517") == datetime(2018, 5, 17)
assert self.parser.parse_iso("20180517T10") == datetime(2018, 5, 17, 10)
assert self.parser.parse_iso("20180517T105513.843456") == datetime(
2018, 5, 17, 10, 55, 13, 843456
)
assert self.parser.parse_iso("20180517T105513Z") == datetime(
2018, 5, 17, 10, 55, 13, tzinfo=tz.tzutc()
)
assert self.parser.parse_iso("20180517T105513.843456-0700") == datetime(
2018, 5, 17, 10, 55, 13, 843456, tzinfo=tz.tzoffset(None, -25200)
)
assert self.parser.parse_iso("20180517T105513-0700") == datetime(
2018, 5, 17, 10, 55, 13, tzinfo=tz.tzoffset(None, -25200)
)
assert self.parser.parse_iso("20180517T105513-07") == datetime(
2018, 5, 17, 10, 55, 13, tzinfo=tz.tzoffset(None, -25200)
)
# ordinal in basic format: YYYYDDDD
assert self.parser.parse_iso("1998136") == datetime(1998, 5, 16)
# timezone requires +- seperator
with pytest.raises(ParserError):
self.parser.parse_iso("20180517T1055130700")
with pytest.raises(ParserError):
self.parser.parse_iso("20180517T10551307")
# too many digits in date
with pytest.raises(ParserError):
self.parser.parse_iso("201860517T105513Z")
# too many digits in time
with pytest.raises(ParserError):
self.parser.parse_iso("20180517T1055213Z")
def test_midnight_end_day(self):
assert self.parser.parse_iso("2019-10-30T24:00:00") == datetime(
2019, 10, 31, 0, 0, 0, 0
)
assert self.parser.parse_iso("2019-10-30T24:00") == datetime(
2019, 10, 31, 0, 0, 0, 0
)
assert self.parser.parse_iso("2019-10-30T24:00:00.0") == datetime(
2019, 10, 31, 0, 0, 0, 0
)
assert self.parser.parse_iso("2019-10-31T24:00:00") == datetime(
2019, 11, 1, 0, 0, 0, 0
)
assert self.parser.parse_iso("2019-12-31T24:00:00") == datetime(
2020, 1, 1, 0, 0, 0, 0
)
assert self.parser.parse_iso("2019-12-31T23:59:59.9999999") == datetime(
2020, 1, 1, 0, 0, 0, 0
)
with pytest.raises(ParserError):
self.parser.parse_iso("2019-12-31T24:01:00")
with pytest.raises(ParserError):
self.parser.parse_iso("2019-12-31T24:00:01")
with pytest.raises(ParserError):
self.parser.parse_iso("2019-12-31T24:00:00.1")
with pytest.raises(ParserError):
self.parser.parse_iso("2019-12-31T24:00:00.999999")
@pytest.mark.usefixtures("tzinfo_parser")
class TestTzinfoParser:
def test_parse_local(self):
assert self.parser.parse("local") == tz.tzlocal()
def test_parse_utc(self):
assert self.parser.parse("utc") == tz.tzutc()
assert self.parser.parse("UTC") == tz.tzutc()
def test_parse_iso(self):
assert self.parser.parse("01:00") == tz.tzoffset(None, 3600)
assert self.parser.parse("11:35") == tz.tzoffset(None, 11 * 3600 + 2100)
assert self.parser.parse("+01:00") == tz.tzoffset(None, 3600)
assert self.parser.parse("-01:00") == tz.tzoffset(None, -3600)
assert self.parser.parse("0100") == tz.tzoffset(None, 3600)
assert self.parser.parse("+0100") == tz.tzoffset(None, 3600)
assert self.parser.parse("-0100") == tz.tzoffset(None, -3600)
assert self.parser.parse("01") == tz.tzoffset(None, 3600)
assert self.parser.parse("+01") == tz.tzoffset(None, 3600)
assert self.parser.parse("-01") == tz.tzoffset(None, -3600)
def test_parse_str(self):
assert self.parser.parse("US/Pacific") == tz.gettz("US/Pacific")
def test_parse_fails(self):
with pytest.raises(parser.ParserError):
self.parser.parse("fail")
@pytest.mark.usefixtures("dt_parser")
class TestDateTimeParserMonthName:
def test_shortmonth_capitalized(self):
assert self.parser.parse("2013-Jan-01", "YYYY-MMM-DD") == datetime(2013, 1, 1)
def test_shortmonth_allupper(self):
assert self.parser.parse("2013-JAN-01", "YYYY-MMM-DD") == datetime(2013, 1, 1)
def test_shortmonth_alllower(self):
assert self.parser.parse("2013-jan-01", "YYYY-MMM-DD") == datetime(2013, 1, 1)
def test_month_capitalized(self):
assert self.parser.parse("2013-January-01", "YYYY-MMMM-DD") == datetime(
2013, 1, 1
)
def test_month_allupper(self):
assert self.parser.parse("2013-JANUARY-01", "YYYY-MMMM-DD") == datetime(
2013, 1, 1
)
def test_month_alllower(self):
assert self.parser.parse("2013-january-01", "YYYY-MMMM-DD") == datetime(
2013, 1, 1
)
def test_localized_month_name(self):
parser_ = parser.DateTimeParser("fr-fr")
assert parser_.parse("2013-Janvier-01", "YYYY-MMMM-DD") == datetime(2013, 1, 1)
def test_localized_month_abbreviation(self):
parser_ = parser.DateTimeParser("it-it")
assert parser_.parse("2013-Gen-01", "YYYY-MMM-DD") == datetime(2013, 1, 1)
@pytest.mark.usefixtures("dt_parser")
class TestDateTimeParserMeridians:
def test_meridians_lowercase(self):
assert self.parser.parse("2013-01-01 5am", "YYYY-MM-DD ha") == datetime(
2013, 1, 1, 5
)
assert self.parser.parse("2013-01-01 5pm", "YYYY-MM-DD ha") == datetime(
2013, 1, 1, 17
)
def test_meridians_capitalized(self):
assert self.parser.parse("2013-01-01 5AM", "YYYY-MM-DD hA") == datetime(
2013, 1, 1, 5
)
assert self.parser.parse("2013-01-01 5PM", "YYYY-MM-DD hA") == datetime(
2013, 1, 1, 17
)
def test_localized_meridians_lowercase(self):
parser_ = parser.DateTimeParser("hu-hu")
assert parser_.parse("2013-01-01 5 de", "YYYY-MM-DD h a") == datetime(
2013, 1, 1, 5
)
assert parser_.parse("2013-01-01 5 du", "YYYY-MM-DD h a") == datetime(
2013, 1, 1, 17
)
def test_localized_meridians_capitalized(self):
parser_ = parser.DateTimeParser("hu-hu")
assert parser_.parse("2013-01-01 5 DE", "YYYY-MM-DD h A") == datetime(
2013, 1, 1, 5
)
assert parser_.parse("2013-01-01 5 DU", "YYYY-MM-DD h A") == datetime(
2013, 1, 1, 17
)
# regression test for issue #607
def test_es_meridians(self):
parser_ = parser.DateTimeParser("es")
assert parser_.parse(
"Junio 30, 2019 - 08:00 pm", "MMMM DD, YYYY - hh:mm a"
) == datetime(2019, 6, 30, 20, 0)
with pytest.raises(ParserError):
parser_.parse(
"Junio 30, 2019 - 08:00 pasdfasdfm", "MMMM DD, YYYY - hh:mm a"
)
def test_fr_meridians(self):
parser_ = parser.DateTimeParser("fr")
# the French locale always uses a 24 hour clock, so it does not support meridians
with pytest.raises(ParserError):
parser_.parse("Janvier 30, 2019 - 08:00 pm", "MMMM DD, YYYY - hh:mm a")
@pytest.mark.usefixtures("dt_parser")
class TestDateTimeParserMonthOrdinalDay:
def test_english(self):
parser_ = parser.DateTimeParser("en-us")
assert parser_.parse("January 1st, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 1
)
assert parser_.parse("January 2nd, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 2
)
assert parser_.parse("January 3rd, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 3
)
assert parser_.parse("January 4th, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 4
)
assert parser_.parse("January 11th, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 11
)
assert parser_.parse("January 12th, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 12
)
assert parser_.parse("January 13th, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 13
)
assert parser_.parse("January 21st, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 21
)
assert parser_.parse("January 31st, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 31
)
with pytest.raises(ParserError):
parser_.parse("January 1th, 2013", "MMMM Do, YYYY")
with pytest.raises(ParserError):
parser_.parse("January 11st, 2013", "MMMM Do, YYYY")
def test_italian(self):
parser_ = parser.DateTimeParser("it-it")
assert parser_.parse("Gennaio 1º, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 1
)
def test_spanish(self):
parser_ = parser.DateTimeParser("es-es")
assert parser_.parse("Enero 1º, 2013", "MMMM Do, YYYY") == datetime(2013, 1, 1)
def test_french(self):
parser_ = parser.DateTimeParser("fr-fr")
assert parser_.parse("Janvier 1er, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 1
)
assert parser_.parse("Janvier 2e, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 2
)
assert parser_.parse("Janvier 11e, 2013", "MMMM Do, YYYY") == datetime(
2013, 1, 11
)
@pytest.mark.usefixtures("dt_parser")
class TestDateTimeParserSearchDate:
def test_parse_search(self):
assert self.parser.parse(
"Today is 25 of September of 2003", "DD of MMMM of YYYY"
) == datetime(2003, 9, 25)
def test_parse_search_with_numbers(self):
assert self.parser.parse(
"2000 people met the 2012-01-01 12:05:10", "YYYY-MM-DD HH:mm:ss"
) == datetime(2012, 1, 1, 12, 5, 10)
assert self.parser.parse(
"Call 01-02-03 on 79-01-01 12:05:10", "YY-MM-DD HH:mm:ss"
) == datetime(1979, 1, 1, 12, 5, 10)
def test_parse_search_with_names(self):
assert self.parser.parse("June was born in May 1980", "MMMM YYYY") == datetime(
1980, 5, 1
)
def test_parse_search_locale_with_names(self):
p = parser.DateTimeParser("sv-se")
assert p.parse("Jan föddes den 31 Dec 1980", "DD MMM YYYY") == datetime(
1980, 12, 31
)
assert p.parse("Jag föddes den 25 Augusti 1975", "DD MMMM YYYY") == datetime(
1975, 8, 25
)
def test_parse_search_fails(self):
with pytest.raises(parser.ParserError):
self.parser.parse("Jag föddes den 25 Augusti 1975", "DD MMMM YYYY")
def test_escape(self):
format = "MMMM D, YYYY [at] h:mma"
assert self.parser.parse(
"Thursday, December 10, 2015 at 5:09pm", format
) == datetime(2015, 12, 10, 17, 9)
format = "[MMMM] M D, YYYY [at] h:mma"
assert self.parser.parse("MMMM 12 10, 2015 at 5:09pm", format) == datetime(
2015, 12, 10, 17, 9
)
format = "[It happened on] MMMM Do [in the year] YYYY [a long time ago]"
assert self.parser.parse(
"It happened on November 25th in the year 1990 a long time ago", format
) == datetime(1990, 11, 25)
format = "[It happened on] MMMM Do [in the][ year] YYYY [a long time ago]"
assert self.parser.parse(
"It happened on November 25th in the year 1990 a long time ago", format
) == datetime(1990, 11, 25)
format = "[I'm][ entirely][ escaped,][ weee!]"
assert self.parser.parse("I'm entirely escaped, weee!", format) == datetime(
1, 1, 1
)
# Special RegEx characters
format = "MMM DD, YYYY |^${}().*+?<>-& h:mm A"
assert self.parser.parse(
"Dec 31, 2017 |^${}().*+?<>-& 2:00 AM", format
) == datetime(2017, 12, 31, 2, 0)
@pytest.mark.usefixtures("dt_parser")
class TestFuzzInput:
# Regression test for issue #860
def test_no_match_group(self):
fmt_str = str(b"[|\x1f\xb9\x03\x00\x00\x00\x00:-yI:][\x01yI:yI:I")
payload = str(b"")
with pytest.raises(parser.ParserMatchError):
self.parser.parse(payload, fmt_str)
# Regression test for issue #854
def test_regex_module_error(self):
fmt_str = str(b"struct n[X+,N-M)MMXdMM]<")
payload = str(b"")
with pytest.raises(parser.ParserMatchError):
self.parser.parse(payload, fmt_str)
| apache-2.0 | 3,494,521,050,826,470,400 | 35.746699 | 113 | 0.575008 | false |
mordred-descriptor/mordred | mordred/BaryszMatrix.py | 1 | 2410 | from __future__ import division
import numpy as np
from networkx import Graph, floyd_warshall_numpy
from ._base import Descriptor
from ._atomic_property import AtomicProperty, get_properties
from ._matrix_attributes import methods, get_method
__all__ = ("BaryszMatrix",)
class BaryszMatrixBase(Descriptor):
explicit_hydrogens = False
__slots__ = ()
class Barysz(BaryszMatrixBase):
__slots__ = ("_prop",)
hermitian = True
def parameters(self):
return (self._prop,)
def __init__(self, prop):
self._prop = prop
def dependencies(self):
return {"P": self._prop}
def calculate(self, P):
C = self._prop.carbon
G = Graph()
G.add_nodes_from(a.GetIdx() for a in self.mol.GetAtoms())
for bond in self.mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
pi = bond.GetBondTypeAsDouble()
with self.rethrow_zerodiv():
w = (C * C) / (P[i] * P[j] * pi)
G.add_edge(i, j, weight=w)
sp = floyd_warshall_numpy(G)
np.fill_diagonal(sp, [1.0 - C / P[a.GetIdx()] for a in self.mol.GetAtoms()])
return sp
class BaryszMatrix(BaryszMatrixBase):
r"""barysz matrix descriptor.
:type prop: :py:class:`str` or :py:class:`function`
:param prop: :ref:`atomic_properties`
:type type: str
:param type: :ref:`matrix_aggregating_methods`
:returns: NaN when any properties are NaN
"""
since = "1.0.0"
__slots__ = ("_prop", "_type")
def description(self):
return "{} from Barysz matrix weighted by {}".format(
self._type.description(), self._prop.get_long()
)
@classmethod
def preset(cls, version):
return (cls(p, m) for p in get_properties() for m in methods)
def __str__(self):
return "{}_Dz{}".format(self._type.__name__, self._prop.as_argument)
def parameters(self):
return self._prop, self._type
def __init__(self, prop="Z", type="SpMax"):
self._prop = AtomicProperty(self.explicit_hydrogens, prop)
self._type = get_method(type)
def dependencies(self):
return {
"result": self._type(
Barysz(self._prop), self.explicit_hydrogens, self.kekulize
)
}
def calculate(self, result):
return result
rtype = float
| bsd-3-clause | -6,378,623,785,350,799,000 | 23.343434 | 84 | 0.581328 | false |
asadoughi/python-neutronclient | neutronclient/neutron/v2_0/floatingip.py | 1 | 5690 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import argparse
import logging
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.openstack.common.gettextutils import _
class ListFloatingIP(neutronV20.ListCommand):
"""List floating ips that belong to a given tenant."""
resource = 'floatingip'
log = logging.getLogger(__name__ + '.ListFloatingIP')
list_columns = ['id', 'fixed_ip_address', 'floating_ip_address',
'port_id']
pagination_support = True
sorting_support = True
class ShowFloatingIP(neutronV20.ShowCommand):
"""Show information of a given floating ip."""
resource = 'floatingip'
log = logging.getLogger(__name__ + '.ShowFloatingIP')
allow_names = False
class CreateFloatingIP(neutronV20.CreateCommand):
"""Create a floating ip for a given tenant."""
resource = 'floatingip'
log = logging.getLogger(__name__ + '.CreateFloatingIP')
def add_known_arguments(self, parser):
parser.add_argument(
'floating_network_id', metavar='FLOATING_NETWORK',
help=_('Network name or id to allocate floating IP from'))
parser.add_argument(
'--port-id',
help=_('ID of the port to be associated with the floatingip'))
parser.add_argument(
'--port_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--fixed-ip-address',
help=_('IP address on the port (only required if port has multiple'
'IPs)'))
parser.add_argument(
'--fixed_ip_address',
help=argparse.SUPPRESS)
def args2body(self, parsed_args):
_network_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'network', parsed_args.floating_network_id)
body = {self.resource: {'floating_network_id': _network_id}}
if parsed_args.port_id:
body[self.resource].update({'port_id': parsed_args.port_id})
if parsed_args.tenant_id:
body[self.resource].update({'tenant_id': parsed_args.tenant_id})
if parsed_args.fixed_ip_address:
body[self.resource].update({'fixed_ip_address':
parsed_args.fixed_ip_address})
return body
class DeleteFloatingIP(neutronV20.DeleteCommand):
"""Delete a given floating ip."""
log = logging.getLogger(__name__ + '.DeleteFloatingIP')
resource = 'floatingip'
allow_names = False
class AssociateFloatingIP(neutronV20.NeutronCommand):
"""Create a mapping between a floating ip and a fixed ip."""
api = 'network'
log = logging.getLogger(__name__ + '.AssociateFloatingIP')
resource = 'floatingip'
def get_parser(self, prog_name):
parser = super(AssociateFloatingIP, self).get_parser(prog_name)
parser.add_argument(
'floatingip_id', metavar='FLOATINGIP_ID',
help=_('ID of the floating IP to associate'))
parser.add_argument(
'port_id', metavar='PORT',
help=_('ID or name of the port to be associated with the '
'floatingip'))
parser.add_argument(
'--fixed-ip-address',
help=_('IP address on the port (only required if port has multiple'
'IPs)'))
parser.add_argument(
'--fixed_ip_address',
help=argparse.SUPPRESS)
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
update_dict = {}
if parsed_args.port_id:
update_dict['port_id'] = parsed_args.port_id
if parsed_args.fixed_ip_address:
update_dict['fixed_ip_address'] = parsed_args.fixed_ip_address
neutron_client.update_floatingip(parsed_args.floatingip_id,
{'floatingip': update_dict})
print >>self.app.stdout, (
_('Associated floatingip %s') % parsed_args.floatingip_id)
class DisassociateFloatingIP(neutronV20.NeutronCommand):
"""Remove a mapping from a floating ip to a fixed ip.
"""
api = 'network'
log = logging.getLogger(__name__ + '.DisassociateFloatingIP')
resource = 'floatingip'
def get_parser(self, prog_name):
parser = super(DisassociateFloatingIP, self).get_parser(prog_name)
parser.add_argument(
'floatingip_id', metavar='FLOATINGIP_ID',
help=_('ID of the floating IP to associate'))
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
neutron_client.update_floatingip(parsed_args.floatingip_id,
{'floatingip': {'port_id': None}})
print >>self.app.stdout, (
_('Disassociated floatingip %s') % parsed_args.floatingip_id)
| apache-2.0 | -3,693,814,631,943,761,400 | 36.434211 | 79 | 0.620562 | false |
bob48523/modified_wrn | dpn.py | 1 | 3609 | '''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 10)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = Variable(torch.randn(1,3,32,32))
y = net(x)
print(y)
# test()
| apache-2.0 | 741,063,464,851,315,500 | 35.09 | 116 | 0.581602 | false |
anaruse/chainer | tests/chainer_tests/functions_tests/connection_tests/test_convolution_2d.py | 1 | 15004 | import unittest
import numpy
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
from chainer.testing import condition
@testing.parameterize(*(testing.product({
'c_contiguous': [True, False],
'cover_all': [True, False],
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
}) + testing.product({
'c_contiguous': [False],
'cover_all': [False],
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
'nobias': [True, False],
})))
@backend.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product([
[{'use_cuda': True}],
# Without cuDNN
testing.product({
'use_cudnn': ['never'],
})
# With cuDNN
+ testing.product({
'use_cudnn': ['always'],
'cudnn_deterministic': [True, False],
'autotune': [True, False],
})]))
class TestConvolution2DFunction(unittest.TestCase):
def setUp(self):
batches = 2
in_channels_a_group = 3
out_channels_a_group = 2
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
self.stride = 2
self.pad = (int(kh / 2) * self.dilate, int(kw / 2) * self.dilate)
W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(self.W_dtype)
if self.nobias:
b = None
else:
b = numpy.random.uniform(
-1, 1, out_channels).astype(self.x_dtype)
x = numpy.random.uniform(
-1, 1, (batches, in_channels, 4, 3)).astype(self.x_dtype)
if self.cover_all:
gy = numpy.random.uniform(
-1, 1, (batches, out_channels, 3, 2)).astype(self.x_dtype)
else:
gy = numpy.random.uniform(
-1, 1, (batches, out_channels, 2, 2)).astype(self.x_dtype)
ggx = numpy.random.uniform(-1, 1, x.shape).astype(
self.x_dtype)
ggW = numpy.random.uniform(-1, 1, W.shape).astype(
self.W_dtype)
ggb = None if b is None else numpy.random.uniform(
-1, 1, b.shape).astype(self.x_dtype)
self.inputs = [x, W, b]
self.grad_outputs = [gy]
self.grad_grad_inputs = [ggx, ggW, ggb]
def forward_cpu(self, inputs):
x, W, b = inputs
x_cpu = chainer.Variable(x)
W_cpu = chainer.Variable(W)
b_cpu = None if b is None else chainer.Variable(b)
with chainer.using_config('use_ideep', 'never'):
y_cpu = F.convolution_2d(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
return y_cpu,
def check_forward(self, inputs, backend_config):
y_expected, = self.forward_cpu(inputs)
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
x, W, b = inputs
x = chainer.Variable(x)
W = chainer.Variable(W)
b = None if b is None else chainer.Variable(b)
with backend_config:
y_actual = F.convolution_2d(
x, W, b, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
testing.assert_allclose(
y_expected.data, y_actual.data, atol=5e-4, rtol=5e-3)
def test_forward(self, backend_config):
self.check_forward(self.inputs, backend_config)
def check_backward(self, inputs, grad_outputs, backend_config):
xp = backend_config.xp
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
grad_outputs = cuda.to_gpu(grad_outputs)
x_data, W_data, b_data = inputs
y_grad, = grad_outputs
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
W_data = xp.asfortranarray(W_data)
y_grad = xp.asfortranarray(y_grad)
assert not x_data.flags.c_contiguous
assert not W_data.flags.c_contiguous
assert not y_grad.flags.c_contiguous
if b_data is not None:
b = xp.empty((len(b_data) * 2,), dtype=b_data.dtype)
b[::2] = b_data
b_data = b[::2]
assert not b_data.flags.c_contiguous
args = (x_data, W_data)
if b_data is not None:
args = args + (b_data,)
def f(*args):
return F.convolution_2d(*args, stride=self.stride, pad=self.pad,
cover_all=self.cover_all,
dilate=self.dilate, groups=self.groups)
with backend_config:
gradient_check.check_backward(
f, args, y_grad, dtype='d', atol=5e-4, rtol=5e-3)
@condition.retry(3)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
def check_double_backward(
self, inputs, grad_outputs, grad_grad_inputs, backend_config):
xp = backend_config.xp
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
grad_outputs = cuda.to_gpu(grad_outputs)
grad_grad_inputs = cuda.to_gpu(grad_grad_inputs)
x_data, W_data, b_data = inputs
y_grad, = grad_outputs
x_grad_grad, W_grad_grad, b_grad_grad = grad_grad_inputs
if not self.c_contiguous:
x_data = xp.asfortranarray(x_data)
W_data = xp.asfortranarray(W_data)
y_grad = xp.asfortranarray(y_grad)
x_grad_grad = xp.asfortranarray(x_grad_grad)
W_grad_grad = xp.asfortranarray(W_grad_grad)
assert not x_data.flags.c_contiguous
assert not W_data.flags.c_contiguous
assert not y_grad.flags.c_contiguous
assert not x_grad_grad.flags.c_contiguous
assert not W_grad_grad.flags.c_contiguous
if b_data is not None:
b = xp.empty((len(b_data) * 2,), dtype=b_data.dtype)
b[::2] = b_data
b_data = b[::2]
assert not b_data.flags.c_contiguous
ggb = xp.empty((len(b_data) * 2,), dtype=b_data.dtype)
ggb[::2] = b_grad_grad
b_grad_grad = ggb[::2]
assert not b_grad_grad.flags.c_contiguous
args = (x_data, W_data)
grad_grads = (x_grad_grad, W_grad_grad)
if b_data is not None:
args = args + (b_data,)
grad_grads = grad_grads + (b_grad_grad,)
def f(*args):
y = F.convolution_2d(*args, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
return y * y # make the function nonlinear
with backend_config:
gradient_check.check_double_backward(
f, args, y_grad, grad_grads,
dtype='d', atol=5e-3, rtol=5e-2)
@condition.retry(3)
def test_double_backward(self, backend_config):
self.check_double_backward(
self.inputs, self.grad_outputs, self.grad_grad_inputs,
backend_config)
@testing.parameterize(*(testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False, True],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [1],
'groups': [1, 2],
}) + testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'cudnn_deterministic': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'dilate': [2],
'groups': [1, 2],
})))
@attr.cudnn
class TestConvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
batches = 2
in_channels_a_group = 3
out_channels_a_group = 2
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
self.stride = 2
self.pad = (int(kh / 2) * self.dilate, int(kw / 2) * self.dilate)
self.x = cuda.cupy.random.uniform(
-1, 1, (batches, in_channels, 4, 3)).astype(self.dtype)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(
-1, 1, (batches, out_channels, 2, 2)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.should_call_cudnn = chainer.should_use_cudnn('>=auto')
if self.dilate > 1 and cuda.cuda.cudnn.getVersion() < 6000:
self.should_call_cudnn = False
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.convolution_2d(x, W, None, stride=self.stride, pad=self.pad,
dilate=self.dilate, groups=self.groups)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
with testing.patch('cupy.cudnn.convolution_forward') as func:
self.forward()
self.assertEqual(func.called, self.should_call_cudnn)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with chainer.using_config('cudnn_deterministic',
self.cudnn_deterministic):
y = self.forward()
y.grad = self.gy
name = 'cupy.cudnn.convolution_backward_data'
with testing.patch(name) as func:
y.backward()
self.assertEqual(func.called, self.should_call_cudnn)
@testing.parameterize(*testing.product({
'c_contiguous': [True, False],
'nobias': [True, False],
'groups': [1, 2],
}))
@attr.gpu
@attr.cudnn
class TestConvolution2DFunctionCudnnDeterministic(unittest.TestCase):
def setUp(self):
self.stride = 2
self.pad = 1
batch_sz = 2
in_channels_a_group = 64
out_channels_a_group = 64
in_channels = in_channels_a_group * self.groups
out_channels = out_channels_a_group * self.groups
kh, kw = (3, 3)
in_h, in_w = (32, 128)
out_h, out_w = (16, 64)
# should be same types for cudnn test
x_dtype = numpy.float32
W_dtype = numpy.float32
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels_a_group)),
(out_channels, in_channels_a_group, kh, kw)).astype(W_dtype)
self.b = numpy.random.uniform(-1, 1, out_channels).astype(x_dtype)
self.x = numpy.random.uniform(
-1, 1, (batch_sz, in_channels, in_h, in_w)).astype(x_dtype)
self.gy = numpy.random.uniform(
-1, 1, (batch_sz, out_channels, out_h, out_w)).astype(x_dtype)
self.should_call_cudnn = True
if self.groups > 1 and cuda.cuda.cudnn.getVersion() < 7000:
self.should_call_cudnn = False
def test_called(self):
with testing.patch(
'cupy.cudnn.convolution_backward_filter', autospec=True) as f:
# cuDNN version >= v3 supports `cudnn_deterministic` option
self._run()
# in Convolution2DFunction.backward_gpu()
assert f.called == self.should_call_cudnn
def test_cudnn_deterministic(self):
x1, W1, b1, y1 = self._run()
x2, W2, b2, y2 = self._run()
cuda.cupy.testing.assert_array_equal(x1.grad, x2.grad)
cuda.cupy.testing.assert_array_equal(y1.data, y2.data)
cuda.cupy.testing.assert_array_equal(W1.grad, W2.grad)
def _contiguous(self, x_data, W_data, b_data, gy_data):
if not self.c_contiguous:
x_data = numpy.asfortranarray(x_data)
W_data = numpy.asfortranarray(W_data)
gy_data = numpy.asfortranarray(gy_data)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(gy_data.flags.c_contiguous)
b = numpy.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
return x_data, W_data, b_data, gy_data
def _run(self):
with chainer.using_config('use_cudnn', 'always'):
with chainer.using_config('cudnn_deterministic', True):
# verify data continuity and move to gpu
x_data, W_data, b_data, gy_data = tuple(
cuda.to_gpu(data) for data in self._contiguous(
self.x, self.W, self.b, self.gy))
x, W, b, y = self._run_forward(x_data, W_data, b_data)
y.grad = gy_data
y.backward()
return x, W, b, y
def _run_forward(self, x_data, W_data, b_data):
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
b = None if self.nobias else chainer.Variable(b_data)
y = F.convolution_2d(x, W, b, stride=self.stride, pad=self.pad,
cover_all=False, groups=self.groups)
return x, W, b, y
class TestConvolution2DBackwardNoncontiguousGradOutputs(unittest.TestCase):
# NumPy raises an error when the inputs of dot operation are not
# contiguous. This test ensures this issue is correctly handled.
# (https://github.com/chainer/chainer/issues/2744)
# This test depdends on that backward() of F.sum generates
# a non-contiguous array.
def test_1(self):
n_batches = 2
in_channels = 3
out_channels = 1 # important
x_shape = (n_batches, in_channels, 10, 10)
w_shape = (out_channels, in_channels, 3, 3)
x = numpy.ones(x_shape, numpy.float32)
w = numpy.ones(w_shape, numpy.float32)
y = F.convolution_2d(x, chainer.Variable(w))
z = F.sum(y)
z.backward()
testing.run_module(__name__, __file__)
| mit | -1,979,272,162,736,530,700 | 36.60401 | 78 | 0.560051 | false |
HookTeam/learning_proj | arcanoid.py | 1 | 11082 |
import pygame
from pygame.locals import *
from lib.gameelements import *
__version__ = "0.1.1"
__author__ = 'MaxA <[email protected]>'
WINDOWWIDTH = 480
WINDOWHEIGHT = 640
BARWIDTH = 70
BARHEIGHT = 20
BARCOLOR = (180, 180, 180)
BASELINE = 10
BRICKWIDTH = 55
BRICKHEIGHT = 20
BRICKINTERVAL = 4
BRICKCOLOR = (0, 180, 0)
FRAMECOLOR = (180, 0, 0)
FRAMEWIDTH = 2
BACKGROUNDCOLOR = (0, 0, 0)
GAMEFIELDDWIDTH = WINDOWWIDTH - 2 * FRAMEWIDTH
GAMEFIELDDHEIGHT = WINDOWHEIGHT - FRAMEWIDTH
BALLRADIUS = 10
BALLCOLOR = (0, 0, 180)
levels = [
[
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)]
],
[
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)]
],
[
[(1, 1), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (1, 1)],
[(1, 1), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (1, 1)],
[(1, 1), (1, 1), (0, 0), (0, 0), (0, 0), (0, 0), (1, 1), (1, 1)],
[(1, 1), (1, 1), (0, 0), (0, 0), (0, 0), (0, 0), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (0, 0), (0, 0), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (0, 0), (0, 0), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1)],
[(0, 0), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (0, 0)],
[(0, 0), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (0, 0)],
[(0, 0), (0, 0), (1, 1), (1, 1), (1, 1), (1, 1), (0, 0), (0, 0)],
[(0, 0), (0, 0), (1, 1), (1, 1), (1, 1), (1, 1), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (1, 1), (1, 1), (0, 0), (0, 0), (0, 0)],
[(0, 0), (0, 0), (0, 0), (1, 1), (1, 1), (0, 0), (0, 0), (0, 0)]
]
]
game_states = {
'run_init': 0,
'run': 1,
'title': 2,
'game_over': 3,
'pause': 4,
'settings': 5
}
def drawobject(surface, element):
if isinstance(element, Ball):
x_pos, y_pos = element.position
pygame.draw.circle(surface, BALLCOLOR, (int(x_pos), int(y_pos)), int(element.radius))
elif isinstance(element, Brick):
x_pos, y_pos = element.position
pygame.draw.rect(surface, element.color, (x_pos, y_pos, element.width, element.height))
class ArcanoidGame:
def __init__(self, game_speed=30):
self.game_state = game_states['title']
self.game_speed = game_speed
self.game_level = 0
self.time_dispath = pygame.time.Clock()
self.display = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0)
pygame.mouse.set_visible(False)
@property
def game_speed(self):
return self.__game_speed
@game_speed.setter
def game_speed(self, game_speed):
self.__game_speed = game_speed
def init_game(self):
self.game_state = game_states['run_init']
pygame.mouse.set_pos([WINDOWWIDTH / 2, BASELINE])
ball_position = (WINDOWWIDTH / 2, WINDOWHEIGHT - BASELINE - BARHEIGHT - BALLRADIUS)
ball_radius = BALLRADIUS
ball_color = BALLCOLOR
ball_vector = (1, -1)
bar_position = (WINDOWWIDTH / 2 - BARWIDTH / 2, WINDOWHEIGHT - BASELINE - BARHEIGHT)
bar_width = BARWIDTH
bar_height = BARHEIGHT
bar_color = BARCOLOR
self.ball = Ball(ball_position, ball_radius, ball_color, ball_vector)
self.bar = Brick(bar_position, bar_width, bar_height, bar_color)
self.bricksmap = []
num_rows = len(levels[self.game_level])
for i in range(num_rows):
num_columns = len(levels[self.game_level][i])
for j in range(num_columns):
brick = levels[self.game_level][i][j]
if brick[0] == 0:
continue
position_x = FRAMEWIDTH + (j + 1) * BRICKINTERVAL + j * BRICKWIDTH
position_y = FRAMEWIDTH + (i + 1) * BRICKINTERVAL + i * BRICKHEIGHT
game_brick = Brick((position_x, position_y), BRICKWIDTH, BRICKHEIGHT, BRICKCOLOR)
self.bricksmap.append(game_brick)
def draw_title(self):
pass
def draw_game_over(self):
pass
def draw_pause(self):
pass
def draw_settings(self):
pass
def update_state(self):
mouse_x, mouse_y = pygame.mouse.get_pos()
if mouse_x < BARWIDTH / 2 + FRAMEWIDTH:
mouse_x = BARWIDTH / 2 + FRAMEWIDTH
elif mouse_x > WINDOWWIDTH - BARWIDTH / 2 - FRAMEWIDTH:
mouse_x = WINDOWWIDTH - BARWIDTH / 2 - FRAMEWIDTH
self.bar.position = (mouse_x - BARWIDTH / 2, WINDOWHEIGHT - BASELINE - BARHEIGHT)
if self.game_state == game_states['run_init']:
self.ball.position = (mouse_x, WINDOWHEIGHT - BASELINE - BARHEIGHT - BALLRADIUS)
else:
self.ball.move()
if self.ball.intersect(FRAMEWIDTH, 'VERTICAL'):
self.ball.bounce('VERTICAL')
if self.ball.intersect(WINDOWWIDTH - FRAMEWIDTH, 'VERTICAL'):
self.ball.bounce('VERTICAL')
if self.ball.intersect(FRAMEWIDTH, 'HORIZONTAL'):
self.ball.bounce('HORIZONTAL')
if self.ball.intersect(WINDOWHEIGHT, 'HORIZONTAL'):
self.game_state = game_states['game_over']
return
top, bottom, left, right = self.ball.intersect(self.bar)
if top:
self.ball.bounce('HORIZONTAL')
if left and self.ball.vector[0] > 0:
self.ball.bounce('VERTICAL')
elif right and self.ball.vector[0] < 0:
self.ball.bounce('VERTICAL')
for brick in self.bricksmap:
top, bottom, left, right = self.ball.intersect(brick)
if top or bottom:
self.ball.bounce('HORIZONTAL')
if left and self.ball.vector[0] > 0:
self.ball.bounce('VERTICAL')
elif right and self.ball.vector[0] < 0:
self.ball.bounce('VERTICAL')
if top or right or left or bottom:
self.bricksmap.remove(brick)
if len(self.bricksmap) == 0:
self.game_level += 1
self.init_game()
def draw_scene(self):
self.display.fill(BACKGROUNDCOLOR)
pygame.draw.rect(self.display, FRAMECOLOR, (0, 0, FRAMEWIDTH, WINDOWHEIGHT))
pygame.draw.rect(self.display, FRAMECOLOR, (0, 0, WINDOWWIDTH, FRAMEWIDTH))
pygame.draw.rect(self.display, FRAMECOLOR, (WINDOWWIDTH - FRAMEWIDTH, 0, FRAMEWIDTH, WINDOWHEIGHT))
drawobject(self.display, self.bar)
drawobject(self.display, self.ball)
for brick in self.bricksmap:
drawobject(self.display, brick)
def run(self):
still_going = True
while still_going:
game_events = pygame.event.get()
for event in game_events:
if event.type == QUIT:
still_going = False
elif self.game_state == game_states['title']:
if event.type == KEYDOWN and event.key == K_RETURN:
self.init_game()
elif event.type == KEYDOWN and event.key == K_ESCAPE:
still_going = False
elif self.game_state == game_states['run_init']:
if event.type == MOUSEBUTTONUP and event.button == 1:
self.game_state = game_states['run']
elif self.game_state == game_states['run']:
if event.type == KEYDOWN and event.key == K_ESCAPE:
self.game_state = game_states['pause']
elif self.game_state == game_states['pause']:
if event.type == KEYDOWN and event.key == K_ESCAPE:
self.game_state = game_states['run']
elif self.game_state == game_states['game_over']:
if event.type == KEYDOWN and event.key == K_ESCAPE:
self.game_state = game_states['title']
if self.game_state == game_states['title']:
self.draw_title()
elif self.game_state == game_states['game_over']:
self.draw_game_over()
elif self.game_state == game_states['pause']:
self.draw_pause()
elif self.game_state == game_states['settings']:
self.draw_settings()
elif self.game_state == game_states['run_init'] or self.game_state == game_states['run']:
self.update_state()
self.draw_scene()
else:
self.draw_title()
pygame.display.flip()
self.time_dispath.tick(self.game_speed)
game = ArcanoidGame(200)
game.run()
| gpl-2.0 | 3,227,290,233,103,899,600 | 37.884211 | 107 | 0.453258 | false |
kmggh/python-guess-number | test_player.py | 1 | 3149 | #!/usr/bin/env python
# Fri 2013-05-03 23:44:30 -0400
# Copyright (c) 2013 by Ken Guyton. All Rights Reserved.
"""Test the guess binary player classes."""
__author__ = 'Ken Guyton'
import guess_num2
import player
import unittest
NUM_TO_GUESS = 42
HIGH_GUESS = 50
LOW_GUESS = 21
# Binary search.
EXPECTED_SEQUENCE = (
(50, 1),
(25, -1),
(37, -1),
(43, 1),
(40, -1),
(41, -1),
(42, 0))
RANDOM_SEQUENCE = (
(15, 1),
(14, 1),
(7, 1),
(6, 1),
(4, 0)
)
class FakeRandom(object):
"""Fake the random module."""
def randrange(self, min_val, max_val):
"""Return a known and predictable number for testing."""
return NUM_TO_GUESS
class TestBinaryPlayer(unittest.TestCase):
def setUp(self):
self.game = guess_num2.Game(random_mod=FakeRandom())
self.player = player.BinaryPlayer(self.game)
def test_create(self):
self.assertNotEqual(self.player, None)
self.assertEqual(self.player.game, self.game)
def test_sequence(self):
play_list = list(self.player.play())
expected_list = list(EXPECTED_SEQUENCE)
self.assertEqual(play_list, expected_list)
def test_split_range(self):
self.assertEqual(self.player.split_range(0, 100), 50)
self.assertEqual(self.player.split_range(25, 50), 37)
self.assertEqual(self.player.split_range(38, 50), 44)
self.assertEqual(self.player.split_range(38, 44), 41)
self.assertEqual(self.player.split_range(41, 44), 42)
self.assertEqual(self.player.split_range(41, 43), 42)
self.assertEqual(self.player.split_range(0, 1), 0)
self.assertEqual(self.player.split_range(0, 2), 1)
self.assertEqual(self.player.split_range(0, 3), 1)
def test_guess(self):
self.assertEqual(self.player.guess(0, 100), 50)
self.assertEqual(self.player.guess(25, 50), 37)
self.assertEqual(self.player.guess(38, 50), 44)
self.assertEqual(self.player.guess(38, 44), 41)
self.assertEqual(self.player.guess(41, 44), 42)
self.assertEqual(self.player.guess(41, 43), 42)
self.assertEqual(self.player.guess(0, 1), 0)
self.assertEqual(self.player.guess(0, 2), 1)
self.assertEqual(self.player.guess(0, 3), 1)
class TestRandomPlayer(unittest.TestCase):
def setUp(self):
self.game = guess_num2.Game(random_mod=FakeRandom())
self.player = player.RandomPlayer(self.game,
random_mod=FakeRandom())
def test_create(self):
self.assertNotEqual(self.player, None)
self.assertEqual(self.player.game, self.game)
def test_guess(self):
self.assertEqual(self.player.guess(0, 100), 42)
self.assertEqual(self.player.guess(25, 50), 42)
self.assertEqual(self.player.guess(38, 50), 42)
self.assertEqual(self.player.guess(38, 44), 42)
self.assertEqual(self.player.guess(41, 44), 42)
self.assertEqual(self.player.guess(41, 43), 42)
self.assertEqual(self.player.guess(0, 1), 42)
self.assertEqual(self.player.guess(0, 2), 42)
self.assertEqual(self.player.guess(0, 3), 42)
def test_sequence(self):
play_list = list(self.player.play())
self.assertEqual(play_list, [(42, 0)])
if __name__ == '__main__':
unittest.main()
| artistic-2.0 | 2,927,562,909,394,932,000 | 28.157407 | 69 | 0.66402 | false |
Code-ReaQtor/NoCase | nocase/nocase.py | 1 | 2103 | #!/usr/bin/python
__author__ = 'Ronie Martinez'
import re
import inspect
import sys
_camel = re.compile('^[a-z0-9]+([A-Z][a-z0-9]+)+$')
_snake = re.compile('^[a-z0-9]+(_[a-z0-9]+)+$')
_camel_splitter = re.compile('([A-Z][a-z0-9]+)')
def _to_snake_case(string):
return '_'.join([m.lower() for m in _camel_splitter.split(string) if m])
def _to_camel_case(string):
words = string.split('_')
for i in range(1, len(words)):
words[i] = words[i].title()
return ''.join(words)
class NoCase(object):
"""
NoCase is a class that automatically converts method calls from camelCase to snake_case and vice versa.
Sample usage:
class MyClass(NoCase):
def myMethod(self):
return 'my method'
def my_other_method(self):
return 'my other method'
my_class = MyClass()
print(my_class.my_method()) # prints 'my method'
print(my_class.myOtherMethod()) # prints 'my other method'
"""
def __getattr__(self, name):
if _camel.match(name):
return getattr(self, _to_snake_case(name))
elif _snake.match(name):
return getattr(self, _to_camel_case(name))
raise AttributeError(name)
def register_no_case(module=sys.modules['__main__']):
"""
Finds all functions in the current module, 'module', and registers a function to it's equivalent CamelCase or snake_case name
Sample usage:
from nocase import register_no_case
def myMethod():
return 'my method'
def my_other_method():
return 'my other method'
register_no_case()
print(my_method())
print(myOtherMethod())
:param module: module name to inspect and to where new functions will be registered
"""
functions = inspect.getmembers(module, inspect.isfunction)
for name, function in functions:
if _camel.match(name):
setattr(module, _to_snake_case(name), function)
elif _snake.match(name):
setattr(module, _to_camel_case(name), function)
registerNoCase = register_no_case # CamelCase call to register_no_case
| lgpl-2.1 | 9,000,033,571,745,704,000 | 26.671053 | 129 | 0.624346 | false |
tboyce1/home-assistant | homeassistant/components/android_ip_webcam.py | 2 | 9799 | """
Support for IP Webcam, an Android app that acts as a full-featured webcam.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/android_ip_webcam/
"""
import asyncio
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, CONF_USERNAME, CONF_PASSWORD,
CONF_SENSORS, CONF_SWITCHES, CONF_TIMEOUT, CONF_SCAN_INTERVAL,
CONF_PLATFORM)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_send, async_dispatcher_connect)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from homeassistant.components.camera.mjpeg import (
CONF_MJPEG_URL, CONF_STILL_IMAGE_URL)
REQUIREMENTS = ['pydroid-ipcam==0.8']
_LOGGER = logging.getLogger(__name__)
ATTR_AUD_CONNS = 'Audio Connections'
ATTR_HOST = 'host'
ATTR_VID_CONNS = 'Video Connections'
CONF_MOTION_SENSOR = 'motion_sensor'
DATA_IP_WEBCAM = 'android_ip_webcam'
DEFAULT_NAME = 'IP Webcam'
DEFAULT_PORT = 8080
DEFAULT_TIMEOUT = 10
DOMAIN = 'android_ip_webcam'
SCAN_INTERVAL = timedelta(seconds=10)
SIGNAL_UPDATE_DATA = 'android_ip_webcam_update'
KEY_MAP = {
'audio_connections': 'Audio Connections',
'adet_limit': 'Audio Trigger Limit',
'antibanding': 'Anti-banding',
'audio_only': 'Audio Only',
'battery_level': 'Battery Level',
'battery_temp': 'Battery Temperature',
'battery_voltage': 'Battery Voltage',
'coloreffect': 'Color Effect',
'exposure': 'Exposure Level',
'exposure_lock': 'Exposure Lock',
'ffc': 'Front-facing Camera',
'flashmode': 'Flash Mode',
'focus': 'Focus',
'focus_homing': 'Focus Homing',
'focus_region': 'Focus Region',
'focusmode': 'Focus Mode',
'gps_active': 'GPS Active',
'idle': 'Idle',
'ip_address': 'IPv4 Address',
'ipv6_address': 'IPv6 Address',
'ivideon_streaming': 'Ivideon Streaming',
'light': 'Light Level',
'mirror_flip': 'Mirror Flip',
'motion': 'Motion',
'motion_active': 'Motion Active',
'motion_detect': 'Motion Detection',
'motion_event': 'Motion Event',
'motion_limit': 'Motion Limit',
'night_vision': 'Night Vision',
'night_vision_average': 'Night Vision Average',
'night_vision_gain': 'Night Vision Gain',
'orientation': 'Orientation',
'overlay': 'Overlay',
'photo_size': 'Photo Size',
'pressure': 'Pressure',
'proximity': 'Proximity',
'quality': 'Quality',
'scenemode': 'Scene Mode',
'sound': 'Sound',
'sound_event': 'Sound Event',
'sound_timeout': 'Sound Timeout',
'torch': 'Torch',
'video_connections': 'Video Connections',
'video_chunk_len': 'Video Chunk Length',
'video_recording': 'Video Recording',
'video_size': 'Video Size',
'whitebalance': 'White Balance',
'whitebalance_lock': 'White Balance Lock',
'zoom': 'Zoom'
}
ICON_MAP = {
'audio_connections': 'mdi:speaker',
'battery_level': 'mdi:battery',
'battery_temp': 'mdi:thermometer',
'battery_voltage': 'mdi:battery-charging-100',
'exposure_lock': 'mdi:camera',
'ffc': 'mdi:camera-front-variant',
'focus': 'mdi:image-filter-center-focus',
'gps_active': 'mdi:crosshairs-gps',
'light': 'mdi:flashlight',
'motion': 'mdi:run',
'night_vision': 'mdi:weather-night',
'overlay': 'mdi:monitor',
'pressure': 'mdi:gauge',
'proximity': 'mdi:map-marker-radius',
'quality': 'mdi:quality-high',
'sound': 'mdi:speaker',
'sound_event': 'mdi:speaker',
'sound_timeout': 'mdi:speaker',
'torch': 'mdi:white-balance-sunny',
'video_chunk_len': 'mdi:video',
'video_connections': 'mdi:eye',
'video_recording': 'mdi:record-rec',
'whitebalance_lock': 'mdi:white-balance-auto'
}
SWITCHES = ['exposure_lock', 'ffc', 'focus', 'gps_active', 'night_vision',
'overlay', 'torch', 'whitebalance_lock', 'video_recording']
SENSORS = ['audio_connections', 'battery_level', 'battery_temp',
'battery_voltage', 'light', 'motion', 'pressure', 'proximity',
'sound', 'video_connections']
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(cv.ensure_list, [vol.Schema({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL):
cv.time_period,
vol.Inclusive(CONF_USERNAME, 'authentication'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'authentication'): cv.string,
vol.Optional(CONF_SWITCHES, default=None):
vol.All(cv.ensure_list, [vol.In(SWITCHES)]),
vol.Optional(CONF_SENSORS, default=None):
vol.All(cv.ensure_list, [vol.In(SENSORS)]),
vol.Optional(CONF_MOTION_SENSOR, default=None): cv.boolean,
})])
}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the IP Webcam component."""
from pydroid_ipcam import PyDroidIPCam
webcams = hass.data[DATA_IP_WEBCAM] = {}
websession = async_get_clientsession(hass)
@asyncio.coroutine
def async_setup_ipcamera(cam_config):
"""Set up an IP camera."""
host = cam_config[CONF_HOST]
username = cam_config.get(CONF_USERNAME)
password = cam_config.get(CONF_PASSWORD)
name = cam_config[CONF_NAME]
interval = cam_config[CONF_SCAN_INTERVAL]
switches = cam_config[CONF_SWITCHES]
sensors = cam_config[CONF_SENSORS]
motion = cam_config[CONF_MOTION_SENSOR]
# Init ip webcam
cam = PyDroidIPCam(
hass.loop, websession, host, cam_config[CONF_PORT],
username=username, password=password,
timeout=cam_config[CONF_TIMEOUT]
)
if switches is None:
switches = [setting for setting in cam.enabled_settings
if setting in SWITCHES]
if sensors is None:
sensors = [sensor for sensor in cam.enabled_sensors
if sensor in SENSORS]
sensors.extend(['audio_connections', 'video_connections'])
if motion is None:
motion = 'motion_active' in cam.enabled_sensors
@asyncio.coroutine
def async_update_data(now):
"""Update data from IP camera in SCAN_INTERVAL."""
yield from cam.update()
async_dispatcher_send(hass, SIGNAL_UPDATE_DATA, host)
async_track_point_in_utc_time(
hass, async_update_data, utcnow() + interval)
yield from async_update_data(None)
# Load platforms
webcams[host] = cam
mjpeg_camera = {
CONF_PLATFORM: 'mjpeg',
CONF_MJPEG_URL: cam.mjpeg_url,
CONF_STILL_IMAGE_URL: cam.image_url,
CONF_NAME: name,
}
if username and password:
mjpeg_camera.update({
CONF_USERNAME: username,
CONF_PASSWORD: password
})
hass.async_add_job(discovery.async_load_platform(
hass, 'camera', 'mjpeg', mjpeg_camera, config))
if sensors:
hass.async_add_job(discovery.async_load_platform(
hass, 'sensor', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
CONF_SENSORS: sensors,
}, config))
if switches:
hass.async_add_job(discovery.async_load_platform(
hass, 'switch', DOMAIN, {
CONF_NAME: name,
CONF_HOST: host,
CONF_SWITCHES: switches,
}, config))
if motion:
hass.async_add_job(discovery.async_load_platform(
hass, 'binary_sensor', DOMAIN, {
CONF_HOST: host,
CONF_NAME: name,
}, config))
tasks = [async_setup_ipcamera(conf) for conf in config[DOMAIN]]
if tasks:
yield from asyncio.wait(tasks, loop=hass.loop)
return True
class AndroidIPCamEntity(Entity):
"""The Android device running IP Webcam."""
def __init__(self, host, ipcam):
"""Initialize the data object."""
self._host = host
self._ipcam = ipcam
@asyncio.coroutine
def async_added_to_hass(self):
"""Register update dispatcher."""
@callback
def async_ipcam_update(host):
"""Update callback."""
if self._host != host:
return
self.async_schedule_update_ha_state(True)
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_DATA, async_ipcam_update)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return False
@property
def available(self):
"""Return True if entity is available."""
return self._ipcam.available
@property
def device_state_attributes(self):
"""Return the state attributes."""
state_attr = {ATTR_HOST: self._host}
if self._ipcam.status_data is None:
return state_attr
state_attr[ATTR_VID_CONNS] = \
self._ipcam.status_data.get('video_connections')
state_attr[ATTR_AUD_CONNS] = \
self._ipcam.status_data.get('audio_connections')
return state_attr
| apache-2.0 | 3,411,025,067,473,729,500 | 32.443686 | 77 | 0.614961 | false |
Huyuwei/tvm | nnvm/python/nnvm/frontend/onnx.py | 1 | 36119 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines
"""ONNX: Open Neural Network Exchange frontend."""
from __future__ import absolute_import as _abs
import numpy as np
import tvm
from .. import symbol as _sym
from .common import get_nnvm_op, Renamer, SymbolTable, AttrConverter as AttrCvt
from .onnx_caffe2_utils import dimension_picker, dimension_constraint, \
infer_channels, revert_caffe2_pad
__all__ = ['from_onnx']
def onnx_storage_order2layout(storage_order):
if storage_order not in (0, 1):
raise tvm.error.OpAttributeInvalid('Mode of storage_order must be either 0 or 1')
return 'NCHW' if storage_order == 0 else 'NHWC'
class OnnxOpConverter(object):
""" A helper class for holding onnx op converters.
"""
@classmethod
def get_converter(cls, opset):
""" Get converter matches given opset.
:param opset: opset from model.
:return: converter, which should be `_impl_vx`. Number x is the biggest
number smaller than or equal to opset belongs to all support versions.
"""
versions = [
int(d.replace('_impl_v', '')) for d in dir(cls) if '_impl_v' in d
]
versions = sorted(versions + [opset])
version = versions[
max([i for i, v in enumerate(versions) if v == opset]) - 1]
if hasattr(cls, '_impl_v{}'.format(version)):
return getattr(cls, '_impl_v{}'.format(version))
raise NotImplementedError(
'opset version {} of {} not implemented'.format(
version, cls.__name__))
class Elemwise(OnnxOpConverter):
""" A helper class for elemwise op converters.
"""
name = ''
@classmethod
def _math_name_picker(cls, suffix):
def _impl(attr):
if attr.get('broadcast', 0):
return 'broadcast_' + suffix
return 'elemwise_' + suffix
return _impl
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Math op take 2 inputs, {} given".format(
len(inputs))
op_name = cls._math_name_picker(cls.name)(attr)
axis = int(attr.get('axis', 0))
conv_ops = ["conv2d", "conv2d_transpose"]
if op_name == 'broadcast_add' and inputs[0].attr('op_name') in conv_ops:
# TODO(zhreshold): remove hard coded infershape
inputs[1] = _sym.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_nnvm_op(op_name)(*inputs)
class Pool(OnnxOpConverter):
""" A helper class for pool op converters.
"""
name = ''
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad)
},
# very weird attributes here in onnx, force check
ignores=['dilations'],
# TODO(zhreshold): make sure ceil_mode in onnx, and layout?
extras={'ceil_mode': False},
custom_check=dimension_constraint())(inputs, attr, params)
class Absolute(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _sym.relu(inputs[0]) + _sym.relu(_sym.negative(inputs[0]))
class Add(Elemwise):
name = 'add'
class AveragePool(Pool):
name = 'avg_pool'
class BatchNorm(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
# TODO(zhreshold): 'spatial' is not properly handled here.
return AttrCvt(
op_name='batch_norm',
disables=['momentum'],
ignores=['spatial', 'is_test', 'consumed_inputs'])(inputs, attr,
params)
class Conv(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], params)
attr['channels'] = channels
return AttrCvt(
op_name=dimension_picker('conv'),
transforms={
'kernel_shape': 'kernel_size',
'dilations': ('dilation', (0, 0)),
'pads': ('padding', (0, 0), revert_caffe2_pad),
'group': ('groups', 1)
},
extras={'use_bias': len(inputs) == 3},
custom_check=dimension_constraint())(inputs, attr, params)
class ConvTranspose(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], params, True)
attr['channels'] = channels
groups = attr.pop('group')
attr['groups'] = groups
return AttrCvt(
op_name=dimension_picker('conv', '_transpose'),
transforms={
'kernel_shape': 'kernel_size',
'dilations': ('dilation', (0, 0)),
'pads': ('padding', (0, 0), revert_caffe2_pad)
},
disables=['output_shape'],
extras={'use_bias': len(inputs) == 3},
custom_check=dimension_constraint())(inputs, attr, params)
class Div(Elemwise):
name = 'div'
class Elu(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.0))
return -alpha * _sym.relu(1 - _sym.exp(inputs[0])) + _sym.relu(
inputs[0])
class Gemm(OnnxOpConverter):
""" Operator converter for Gemm.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 3, "Gemm op take 3 inputs, {} given".format(
len(inputs))
# Y = alpha * A * B + beta * C
alpha = float(attr.get('alpha', 1.0))
beta = float(attr.get('beta', 1.0))
transA = int(attr.get('transA', 0))
transB = int(attr.get('transB', 0))
# get number of channels
channels = infer_channels(inputs[1], params, not transB)
if transA:
inputs[0] = _sym.transpose(inputs[0], axes=(1, 0))
if not transB:
inputs[1] = _sym.transpose(inputs[1], axes=(1, 0))
inputs[0] = _sym.flatten(inputs[0])
return _sym.dense(
alpha * inputs[0], inputs[1], beta * inputs[2], units=channels)
class MaxPool(Pool):
""" Operator converter for MaxPool
"""
name = 'max_pool'
@classmethod
def _impl_v8(cls, inputs, attr, params):
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'storage_order': ('layout', 'NCHW', onnx_storage_order2layout),
},
# very weird attributes here in onnx, force check
ignores=['dilations', 'auto_pad'],
# TODO(higumachan): make sure ceil_mode in onnx, and layout?
extras={'ceil_mode': False},
custom_check=dimension_constraint())(inputs, attr, params)
@classmethod
def _impl_v10(cls, inputs, attr, params):
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', (0, 0), revert_caffe2_pad),
'storage_order': ('layout', 'NCHW', onnx_storage_order2layout),
'ceil_mode': 'ceil_mode'
},
# very weird attributes here in onnx, force check
ignores=['dilations', 'auto_pad'],
custom_check=dimension_constraint())(inputs, attr, params)
class Mul(Elemwise):
name = 'mul'
class Pad(OnnxOpConverter):
""" Operator converter for Pad.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
pad_width = []
pads = attr.pop('paddings')
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i+dims]))
attr['pad_width'] = pad_width
return AttrCvt(
op_name='pad',
transforms={
'value': 'pad_value',
},
ignores=['mode'],
custom_check=(lambda attrs: attrs.get('mode', 'constant').decode("utf-8") == 'constant',
'split mode != constant'))(inputs, attr, params)
@classmethod
def _impl_v2(cls, inputs, attr, params):
pad_width = []
pads = attr.pop('pads')
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i+dims]))
attr['pad_width'] = pad_width
return AttrCvt(
op_name='pad',
transforms={
'value': 'pad_value',
},
ignores=['mode'],
custom_check=(lambda attrs: attrs.get('mode', 'constant').decode("utf-8") == 'constant',
'split mode != constant'))(inputs, attr, params)
class ParametricSoftPlus(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.0))
beta = float(attr.get('beta', 1.0))
return _sym.log(_sym.exp(beta * inputs[0]) + 1) * alpha
class Prelu(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Prelu need 2 inputs, {} given".format(
len(inputs))
return _sym.prelu(inputs[0], inputs[1])
class Reciprocal(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
return 1.0 / inputs[0]
class Reshape(OnnxOpConverter):
""" Operator converter for Reshape.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _sym.reshape(inputs[0], shape=attr['shape'])
@classmethod
def _impl_v5(cls, inputs, attr, params):
if inputs[1].list_output_names()[0] in params:
shape = tuple(params[inputs[1].list_output_names()[0]].asnumpy())
out = _sym.reshape(inputs[0], shape=shape)
else:
out = _sym.reshape_like(inputs[0], inputs[1])
return out
class Scale(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
scale = float(attr.get('scale', 1.0))
return inputs[0] * scale
class Selu(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.6732))
gamma = float(attr.get('gamma', 1.0507))
return gamma * (
-alpha * _sym.relu(1 - _sym.exp(inputs[0])) + _sym.relu(inputs[0]))
class ScaledTanh(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.0))
beta = float(attr.get('beta', 1.0))
return _sym.tanh(beta * inputs[0]) * alpha
class SoftPlus(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _sym.log(_sym.exp(inputs[0]) + 1)
class Softsign(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
return inputs[0] / (1 + Absolute.get_converter(1)(inputs, attr, params))
class Sub(Elemwise):
name = 'sub'
class Sum(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Onnx Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _sym.broadcast_add(inputs[in_index],
inputs[in_index + 1])
return inputs[len(inputs) - 1]
class ThresholdedRelu(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.0))
alpha_tensor = _sym.full_like(inputs[0], fill_value=float(alpha))
return _sym.elemwise_mul(inputs[0], _sym.greater(inputs[0], alpha_tensor))
class ImageScaler(OnnxOpConverter):
@classmethod
def _impl_v1(cls, inputs, attr, params):
channelScale = attr['scale']
bias_attr = attr['bias']
bias = SymbolTable().new_const(np.array(bias_attr).reshape([3, 1, 1]))
scaledChannel = _sym.__mul_scalar__(inputs[0], scalar=channelScale)
ret = _sym.broadcast_add(scaledChannel, bias)
return ret
def _broadcast_constraint():
def _broadcast_check(attrs):
if attrs.get('axis', None):
return False
return True
return _broadcast_check, "Specifying broadcast axis not allowed."
def _fully_connected(opset):
def _impl(inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], params)
attr['units'] = channels
return AttrCvt('dense', ignores=['axis', 'axis_w'])(inputs, attr)
return _impl
class Upsample(OnnxOpConverter):
""" Operator converter for Upsample (nearest mode).
"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
scales = attr.get('scales')
if not scales:
#Here we are going to higher OPSET version.
assert len(inputs) == 2, "Upsample op take 2 inputs, {} given".format(len(inputs))
input_name = inputs[1].list_input_names()[0]
scales = params[input_name].asnumpy()
inputs = inputs[:1]
assert len(scales) == 4 and scales[0] == 1.0 and scales[1] == 1.0 and scales[2] == scales[3]
mode = attr.get('mode')
if mode == b'nearest':
method = "NEAREST_NEIGHBOR"
elif mode == b'linear':
method = "BILINEAR"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode))
return _sym.upsampling(inputs[0], scale=int(scales[-1]), method=method, layout='NCHW')
class Shape(OnnxOpConverter):
""" Operator converter for Shape.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Result of this operator is prominently used by reshape operator.
# Just pass the input as it is so that reshape_like can be used there.
print("Shape: Differently implemented in NNVM as a bypass (dummy operator)")
return inputs[0]
class Cast(OnnxOpConverter):
""" Operator converter for Cast.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr)
@classmethod
def _impl_v5(cls, inputs, attr, params):
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
attr['to'] = TENSOR_TYPE_TO_NP_TYPE[attr['to']]
except ImportError as e:
raise ImportError(
"Unable to import onnx.mapping which is required {}".format(e))
return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr)
class Unsqueeze(OnnxOpConverter):
""" Operator converter for Unsqueeze.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
for axes in attr['axes']:
inputs[0] = _sym.expand_dims(inputs[0], axis=axes, num_newaxis=1)
return inputs[0]
class Split(OnnxOpConverter):
""" Operator converter for Split.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
attr['indices_or_sections'] = []
index = 0
for i in attr['split'][:-1]:
index += i
attr['indices_or_sections'].append(index)
return AttrCvt(
op_name='split',
ignores=['split'])(inputs, attr, params)
class Slice(OnnxOpConverter):
""" Operator converter for Slice.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if isinstance(attr['starts'], int):
attr['starts'] = (attr['starts'],)
attr['ends'] = (attr['ends'],)
try:
# Update the starts and ends according to axes if required.
if isinstance(attr['axes'], int):
attr['axes'] = (attr['axes'],)
if (max(attr['axes']) + 1) != len(attr['axes']):
new_axes = []
new_starts = []
new_ends = []
pop_index = 0
for i in range(max(attr['axes']) + 1):
if i in attr['axes']:
new_axes.append(i)
new_starts.append(attr['starts'][pop_index])
new_ends.append(attr['ends'][pop_index])
pop_index += 1
else:
new_axes.append(i)
new_starts.append(0)
new_ends.append(np.iinfo(np.int32).max)
attr['axes'] = new_axes
attr['starts'] = new_starts
attr['ends'] = new_ends
except KeyError:
pass
return AttrCvt(op_name='strided_slice',
transforms={'starts': 'begin',
'ends': 'end'},
ignores=['axes'])(inputs, attr)
class Gather(OnnxOpConverter):
""" Operator converter for Gather.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 0)
return AttrCvt(op_name='take',
extras={'axis':axis})(inputs, attr)
class LRN(OnnxOpConverter):
""" Operator converter for Local Response Normalization.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
"""LRN support only NCHW format
https://github.com/onnx/onnx/blob/master/docs/Operators.md#LRN
"""
axis = 1
alpha = attr.get('alpha', 0.0001)
beta = attr.get('beta', 0.75)
bias = attr.get('bias', 1.0)
nsize = attr.get('size')
return _sym.lrn(inputs[0], size=nsize, axis=axis,
alpha=alpha, beta=beta, bias=bias)
class Maximum(OnnxOpConverter):
""" Operator converter for Maximum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, list) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
_max = inputs[0]
for i in range(1, len(inputs)):
_max = AttrCvt(op_name='broadcast_max')([_max, inputs[i]], {})
return _max
class Minimum(OnnxOpConverter):
""" Operator converter for Minimum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, list) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
_min = inputs[0]
for i in range(1, len(inputs)):
_min = AttrCvt(op_name='broadcast_min')([_min, inputs[i]], {})
return _min
class Mean(OnnxOpConverter):
""" Operator converter for Mean.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, list) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
count = len(inputs)
_sum = inputs[0]
for i in range(1, count):
_sum = AttrCvt(op_name='broadcast_add')([_sum, inputs[i]], {})
return _sum / count
class HardSigmoid(OnnxOpConverter):
""" Operator converter for HardSigmoid.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr.get('alpha', 0.2)
beta = attr.get('beta', 0.5)
transformX = (inputs[0] * alpha) + beta
attr = {'a_min':0, 'a_max':1}
return AttrCvt(op_name='clip')([transformX], attr)
class ArgMax(OnnxOpConverter):
""" Operator converter for ArgMax.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 0)
keepdims = attr.get('keepdims', True)
attr = {'axis':axis, 'keepdims':keepdims}
return AttrCvt(op_name='argmax')(inputs, attr)
class ArgMin(OnnxOpConverter):
""" Operator converter for ArgMin.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 0)
keepdims = attr.get('keepdims', True)
attr = {'axis':axis, 'keepdims':keepdims}
return AttrCvt(op_name='argmin')(inputs, attr)
class Softmax(OnnxOpConverter):
""" Operator converter for Softmax.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# set default value when axis is not set in the model
if 'axis' not in attr:
attr['axis'] = 1
return AttrCvt(
op_name='softmax',
transforms={
'axis': ('axis', 1),
})(inputs, attr, params)
class ConstantFill(OnnxOpConverter):
""" Operator converter for ConstantFill.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
is_full = True
num_inputs = len(inputs)
if 'shape' in attr:
if num_inputs > 0:
raise ImportError(
"Can't set shape and input tensor at a time")
shape = attr.pop('shape')
else:
if num_inputs == 0:
raise ImportError(
"Either shape attribute or input should be set")
if 'input_as_shape' in attr and attr['input_as_shape']:
shape = params[inputs[0].list_output_names()[0]].asnumpy()
else:
is_full = False
if not is_full:
if 'extra_shape' in attr:
raise ImportError(
"Extra Shape not supported with fill_like")
out = AttrCvt(
op_name='full_like',
transforms={'value': 'fill_value'},
ignores=['dtype'])(inputs, attr)
return _sym.cast(out, dtype=attr['dtype'].decode("utf-8"))
if 'extra_shape' in attr:
shape = shape + attr.pop('extra_shape')
return AttrCvt(
op_name='full',
transforms={'value': 'fill_value'},
extras={'shape':shape})(inputs, attr)
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
def _get_convert_map(opset):
return {
# defs/experimental
'Identity': Renamer('copy'),
# 'Affine'
'ThresholdedRelu': ThresholdedRelu.get_converter(opset),
'ScaledTanh': ScaledTanh.get_converter(opset),
'ParametricSoftplus': ParametricSoftPlus.get_converter(opset),
'ConstantFill': ConstantFill.get_converter(opset),
# 'GivenTensorFill'
'FC': AttrCvt('dense', ignores=['axis', 'axis_w']),
'Scale': Scale.get_converter(opset),
# 'GRUUnit'
# 'ATen'
'ImageScaler': ImageScaler.get_converter(opset),
# 'MeanVarianceNormalization'
# 'Crop'
# 'Embedding'
'Upsample' : Upsample.get_converter(opset),
'SpatialBN': BatchNorm.get_converter(opset),
# defs/generator
# 'Constant' # Implemented
# 'RandomUniform'
# 'RandomNormal'
# 'RandomUniformLike'
# 'RandomNormalLike'
# defs/logical
# defs/math
'Add': Add.get_converter(opset),
'Sub': Sub.get_converter(opset),
'Mul': Mul.get_converter(opset),
'Div': Div.get_converter(opset),
'Neg': Renamer('negative'),
'Abs': Absolute.get_converter(opset),
'Reciprocal': Reciprocal.get_converter(opset),
'Floor': Renamer('floor'),
'Ceil': Renamer('ceil'),
'Sqrt': Renamer('sqrt'),
'Relu': Renamer('relu'),
'LeakyRelu': Renamer('leaky_relu'),
'Selu': Selu.get_converter(opset),
'Elu': Elu.get_converter(opset),
'Exp': Renamer('exp'),
'Log': Renamer('log'),
'Tanh': Renamer('tanh'),
'Pow': Renamer('broadcast_pow'),
'PRelu': Prelu.get_converter(opset),
'Sigmoid': Renamer('sigmoid'),
'HardSigmoid': HardSigmoid.get_converter(opset),
'Max': Maximum.get_converter(opset),
'Min': Minimum.get_converter(opset),
'Sum': Sum.get_converter(opset),
'Mean': Mean.get_converter(opset),
'Clip': AttrCvt('clip', transforms={'min': 'a_min', 'max': 'a_max'}),
# softmax default axis is different in onnx
'Softmax': Softmax.get_converter(opset),
'LogSoftmax': AttrCvt('log_softmax', {'axis': ('axis', 1)}),
# 'Hardmax'
'Softsign': Softsign.get_converter(opset),
'SoftPlus': SoftPlus.get_converter(opset),
'Gemm': Gemm.get_converter(opset),
'MatMul': Renamer('matmul'),
# defs/nn
'AveragePool': AveragePool.get_converter(opset),
'MaxPool': MaxPool.get_converter(opset),
'Conv': Conv.get_converter(opset),
'ConvTranspose': ConvTranspose.get_converter(opset),
'GlobalAveragePool': Renamer('global_avg_pool2d'),
'GlobalMaxPool': Renamer('global_max_pool2d'),
'BatchNormalization': BatchNorm.get_converter(opset),
# 'InstanceNormalization'
# 'LpNormalization'
'Dropout': AttrCvt('dropout', {'ratio': 'rate'}, ignores=['is_test']),
'Flatten': Renamer('flatten'),
'LRN': LRN.get_converter(opset),
# defs/reduction
'ReduceMax': AttrCvt('max', {'axes': 'axis'}),
'ReduceMin': AttrCvt('min', {'axes': 'axis'}),
'ReduceSum': AttrCvt('sum', {'axes': 'axis'}),
'ReduceMean': AttrCvt('mean', {'axes': 'axis'}),
# 'ReduceProd'
# 'ReduceLogSumExp'
'ArgMax': ArgMax.get_converter(opset),
'ArgMin': ArgMin.get_converter(opset),
# defs/tensor
'Cast': Cast.get_converter(opset),
'Reshape': Reshape.get_converter(opset),
'Concat': Renamer('concatenate'),
'Split': Split.get_converter(opset),
'Slice': Slice.get_converter(opset),
'Transpose': AttrCvt('transpose', {'perm': 'axes'}),
'Gather': Gather.get_converter(opset),
'Squeeze': AttrCvt('squeeze', {'axes': 'axis'}),
'Unsqueeze': Unsqueeze.get_converter(opset),
'Pad': Pad.get_converter(opset),
'Shape': Shape.get_converter(opset),
}
class GraphProto(object):
"""A helper class for handling nnvm graph copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
"""
def __init__(self):
self._nodes = {}
self._params = {}
self._renames = {}
self._num_input = 0
self._num_param = 0
def from_onnx(self, graph, opset):
"""Construct nnvm nodes from onnx graph.
The inputs from onnx graph is vague, only providing "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
opset : opset version
Returns
-------
sym : nnvm.sym.Symbol
The returned nnvm symbol
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
# parse network inputs to nnvm, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
self._params[init_tensor.name] = self._parse_array(init_tensor)
for i in graph.input:
# from onnx v0.2, GraphProto.input has type ValueInfoProto,
# and the name is 'i.name'
i_name = self._parse_value_proto(i)
if i_name in self._params:
# i is a param instead of input
self._num_param += 1
self._params[i_name] = self._params.pop(i_name)
self._nodes[i_name] = _sym.Variable(
name=i_name, shape=self._params[i_name].shape)
else:
self._num_input += 1
self._nodes[i_name] = _sym.Variable(name=i_name)
# get list of unsupported ops
convert_map = _get_convert_map(opset)
unsupported_ops = set()
for node in graph.node:
op_name = node.op_type
if op_name not in convert_map and \
op_name != 'Constant' and \
op_name not in _identity_list:
unsupported_ops.add(op_name)
if unsupported_ops:
msg = 'The following operators are not supported for frontend ONNX: '
msg += ', '.join(unsupported_ops)
raise tvm.error.OpNotImplemented(msg)
# construct nodes, nodes are stored as directed acyclic graph
for node in graph.node:
op_name = node.op_type
attr = self._parse_attr(node.attribute)
inputs = [self._nodes[self._renames.get(i, i)] for i in node.input]
if op_name == "Constant":
t_proto = self._parse_attr(node.attribute)["value"]
self._num_param += 1
self._params[node.output[0]] = self._parse_array(t_proto)
self._nodes[node.output[0]] = _sym.Variable(name=node.output[0],
shape=list(t_proto.dims))
else:
op = self._convert_operator(op_name, inputs, attr, opset)
node_output = self._fix_outputs(op_name, node.output)
assert len(node_output) == len(op.list_output_names()), (
"Number of output mismatch {} vs {} in {}.".format(
len(node_output), len(op.list_output_names()), op_name))
for k, i in zip(list(node_output), range(len(node_output))):
self._nodes[k] = op[i]
# now return the outputs
out = [self._nodes[self._parse_value_proto(i)] for i in graph.output]
if len(out) > 1:
out = _sym.Group(out)
else:
out = out[0]
return out, self._params
def _parse_value_proto(self, value_proto):
"""Parse ValueProto or raw str."""
try:
name = value_proto.name
except AttributeError:
name = value_proto
return name
def _parse_array(self, tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError as e:
raise ImportError(
"Unable to import onnx which is required {}".format(e))
np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims))
return tvm.nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['t']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['tensors']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['g']:
if a.HasField(f):
raise NotImplementedError(
"Filed {} is not supported in nnvm.".format(f))
for f in ['graphs']:
if list(getattr(a, f)):
raise NotImplementedError(
"Filed {} is not supported in nnvm.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs
def _convert_operator(self,
op_name,
inputs,
attrs,
opset,
identity_list=None,
convert_map=None):
"""Convert from onnx operator to nnvm operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
inputs : list of nnvm.Symbol
List of input symbols.
attrs : dict
Dict of operator attributes
opset : int
Opset version
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to nnvm, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
sym : nnvm.Symbol
Converted nnvm Symbol
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _get_convert_map(opset)
if op_name in identity_list:
sym = get_nnvm_op(op_name)(*inputs, **attrs)
elif op_name in convert_map:
sym = convert_map[op_name](inputs, attrs, self._params)
else:
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend ONNX.')
return sym
def _fix_outputs(self, op_name, outputs):
"""A hack to handle dropout or similar operator that have more than one out
in ONNX.
"""
if op_name == 'Dropout':
if len(outputs) == 1:
return outputs
# TODO(zhreshold): support dropout mask?
outputs = outputs[:-1]
return outputs
def from_onnx(model):
"""Load onnx graph which is a python protobuf object into nnvm graph.
The companion parameters will be handled automatically.
The inputs from onnx graph is vague, only providing "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
Parameters
----------
model : protobuf object
ONNX ModelProto after ONNX v1.1.0
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.ndarray
Dict of converted parameters stored in tvm.ndarray format
"""
g = GraphProto()
graph = model.graph
try:
opset = model.opset_import[0].version if model.opset_import else 1
except AttributeError:
opset = 1
sym, params = g.from_onnx(graph, opset)
return sym, params
| apache-2.0 | 4,637,215,337,602,989,000 | 33.796724 | 100 | 0.559345 | false |
nealchenzhang/Py4Invst | Backtest_Futures/execution.py | 1 | 2231 | # -*- coding: utf-8 -*-
# execution.py
from abc import ABCMeta, abstractmethod
import datetime
import queue
from Backtest_Futures.event import FillEvent, OrderEvent
class ExecutionHandler(object):
"""
The ExecutionHandler abstract class handles the interaction
between a set of order objects generated by a FoF and
the ultimate set of Fill objects that actually occur in the
market.
The handlers can be used to subclass simulated brokerages
or live brokerages, with identical interfaces. This allows
strategies to be backtested in a very similar manner to the
live trading engine.
"""
__metaclass__ = ABCMeta
@abstractmethod
def execute_order(self, event):
"""
Takes an Order event and executes it, providing
a Fill event that gets placed onto the Events queue.
:param event: Contains an Event object with order information.
:return:
"""
raise NotImplementedError("Should implement execute_order()")
class SimulatedExecutionHandler(ExecutionHandler):
"""
The simulated execution handler simply converts all order
objects into their equivalent fill objects automatically
without latency, slippage or fill-ratio issues.
This allows a straightforward "first go" test of any strategy,
before implementation with a more sophisticated execution
handler.
"""
def __init__(self, events):
"""
Initializes the handler, setting the event queues
up internally.
:param events: The Queue of Event objects.
"""
self.events = events
def execute_order(self, event):
"""
Simply converts Order objects into Fill objects naively,
i.e., without any latency, slippage or fill ration problems.
:param event: Contains an Event object with order information.
:return:
"""
if event.type == 'ORDER':
fill_event = FillEvent(datetime.datetime.utcnow(),
event.symbol, 'CHINA',
event.quantity, event.direction,
event.position_type, None)
self.events.put(fill_event) | mit | -8,232,657,005,454,368,000 | 30 | 70 | 0.647692 | false |
dsheets4/Trajic | tmt/MakeSmallTestFile.py | 1 | 2422 | # TaxiVis product for analyzing trends in taxi trajectories.
# Copyright (C) 2012 David Sheets ([email protected])
#
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from optparse import OptionParser
# -----------------------------------------------------------------------------
# Main() start here
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Parse out the command line options for the script.
parser = OptionParser()
parser.add_option("-i", "--input", dest="inputFile",
help="CSV format taxi input file", metavar="FILE")
parser.add_option("-o", "--output", dest="outputFile",
help="CSV format taxi output file", metavar="FILE")
parser.add_option("-n", "--num-lines", dest="nLines",
help="Numer of lines to write to the test file", metavar="DIR")
(options, args) = parser.parse_args()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Process some of the command line inputs
nLinesToProcess = int(options.nLines)
inFile = open(options.inputFile, 'r')
outFile = open(options.outputFile, 'w')
print( "Processing test file to contain %d lines." % (nLinesToProcess) )
print( "Output file is: %s" % (options.outputFile) )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
nRecProcessed = 0 # Total number of records in the input file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Read the input file line by line
for line in inFile:
# Determine if we can stop processing the file.
if nRecProcessed >= nLinesToProcess:
break
# Track the number of records that were processed from the file
nRecProcessed += 1
# Write the line to the output file
outFile.write("%s" % line)
| gpl-3.0 | 4,561,901,100,396,659,700 | 37.444444 | 79 | 0.578035 | false |
Clemson-DPA/dpa-pipe | setup.py | 1 | 2409 | """Python setuptools installer module"""
# -----------------------------------------------------------------------------
from codecs import open
from os import pardir, path
from setuptools import setup, find_packages
# -----------------------------------------------------------------------------
AUTHOR = "Clemson Digital Production Arts Program",
AUTHOR_EMAIL = "[email protected]"
CLASSIFIERS = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Education",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: POSIX",
"Programming Language :: Python :: 2.7",
"Topic :: Education",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
]
DESCRIPTION = "DPA pipeline front end API"
INSTALL_REQUIRES = [
"colorama",
"ordereddict",
"parsedatetime",
"python-dateutil",
"PyYAML",
"requests",
"rpyc",
"Sphinx",
"sphinx-rtd-theme",
]
KEYWORDS = "production pipeline framework",
LICENSE = 'MIT'
NAME = 'dpa-pipe'
PACKAGE_EXCLUDES = [
'dpa_site',
]
SCRIPTS = [
'bin/dpa',
'bin/dpa_houdini',
'bin/dpa_uncompress',
'bin/dpa_ribrender',
]
URL = "" # XXX once uploaded to git or bitbucket, set this
# -----------------------------------------------------------------------------
# path to this file's directory
PROJECT_ROOT = path.normpath(path.join(path.abspath(__file__), pardir))
# get a list of python packages to install
PACKAGES = find_packages(exclude=PACKAGE_EXCLUDES)
# get the long description
with open(path.join(PROJECT_ROOT, 'README.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
# fetch __version__ from the python package
exec(open(path.join(PROJECT_ROOT, 'dpa', '__init__.py')).read())
VERSION = __version__
# -----------------------------------------------------------------------------
setup(
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
install_requires=INSTALL_REQUIRES,
include_package_data=True,
keywords=KEYWORDS,
license=LICENSE,
long_description=LONG_DESCRIPTION,
name=NAME,
packages=PACKAGES,
scripts=SCRIPTS,
url=URL,
version=VERSION,
)
| mit | -4,348,415,758,490,599,400 | 24.09375 | 79 | 0.580739 | false |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/sfntly/src/cpp/tools/font_data_generator_xml.py | 7 | 2406 | # Copyright 2011 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limit
"""Generate XML font test data using ttLib."""
import hashlib
import struct
import xml.dom.minidom as minidom
from fontTools import ttLib
class FontDataGeneratorXML(object):
"""Generates the XML description for a font."""
def __init__(self, table_data_generators, font_path):
self.table_data_generators = table_data_generators
self.font_path = font_path
def Generate(self):
"""Creates font a DOM with data for every table.
Uses |table_data_generators| to plug in XML generators.
Returns:
A DOM ready for serialization.
"""
doc = minidom.getDOMImplementation().createDocument(None,
'font_test_data', None)
root_element = doc.documentElement
# We need to set the path of the font as if in the root source directory
# The assumption is that we have a '../' prefix
root_element.setAttribute('path', self.font_path[3:])
h = hashlib.new('sha1')
h.update(open(self.font_path, 'r').read())
root_element.setAttribute('sha1', h.hexdigest())
font = ttLib.TTFont(self.font_path)
# There is always a postscript name for Windows_BMP
name_record = font['name'].getName(6, 3, 1)
root_element.setAttribute('post_name',
self.Unpack(name_record.string))
for (name, table_data_generator) in self.table_data_generators:
name += '_table'
table_element = doc.createElement(name)
root_element.appendChild(table_element)
table_data_generator.Generate(font, doc, table_element)
return doc
def Unpack(self, name):
"""Returns every other byte from name to comprensate for padding."""
unpack_format = 'xc' * (len(name)/2)
# This is string.join, which is deprecated :(
return reduce(lambda a, h: a + h, struct.unpack(unpack_format, name), '')
| gpl-3.0 | 3,879,662,110,680,641,500 | 37.190476 | 79 | 0.682461 | false |
MacHu-GWU/sqlite4dummy-project | sqlite4dummy/tests/sqlite3_in_python/syntax/test_UPDATE.py | 1 | 1070 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from pprint import pprint as ppt
import unittest
import sqlite3
import pickle
class Unittest(unittest.TestCase):
def test_UPDATE(self):
connect = sqlite3.connect(":memory:")
cursor = connect.cursor()
# http://www.w3schools.com/sql/sql_create_table.asp
create_table_sql = \
"""
CREATE TABLE employee
(
_id INTEGER PRIMARY KEY NOT NULL,
role TEXT,
name TEXT,
profile BLOB
)
"""
cursor.execute(create_table_sql)
data = [(1, "coder", "John", None), (2, "sales", "Mike", None)]
cursor.executemany("INSERT INTO employee VALUES (?,?,?,?)", data)
cursor.execute(
"UPDATE employee SET role = ?, profile = ? WHERE _id = ?",
("manager", pickle.dumps({"age": 32}), 2))
ppt(cursor.execute("SELECT * FROM employee").fetchall())
if __name__ == "__main__":
unittest.main() | mit | -4,189,850,799,309,553,700 | 27.945946 | 73 | 0.539252 | false |
WhisperSystems/Signal-iOS | Scripts/sds_codegen/sds_common.py | 1 | 1652 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
SDS_JSON_FILE_EXTENSION = '.sdsjson'
def fail(*args):
error = ' '.join(str(arg) for arg in args)
raise Exception(error)
git_repo_path = os.path.abspath(subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).strip())
print 'git_repo_path:', git_repo_path
def sds_to_relative_path(path):
path = os.path.abspath(path)
if not path.startswith(git_repo_path):
fail('Unexpected path:', path)
path = path[len(git_repo_path):]
if path.startswith(os.sep):
path = path[len(os.sep):]
return path
def sds_from_relative_path(path):
return os.path.join(git_repo_path, path)
def clean_up_generated_code(text):
# Remove trailing whitespace.
lines = text.split('\n')
lines = [line.rstrip() for line in lines]
text = '\n'.join(lines)
# Compact newlines.
while '\n\n\n' in text:
text = text.replace('\n\n\n', '\n\n')
# Ensure there's a trailing newline.
return text.strip() + '\n'
def clean_up_generated_swift(text):
return clean_up_generated_code(text)
def clean_up_generated_objc(text):
return clean_up_generated_code(text)
def pretty_module_path(path):
path = os.path.abspath(path)
if path.startswith(git_repo_path):
path = path[len(git_repo_path):]
return path
def write_text_file_if_changed(file_path, text):
if os.path.exists(file_path):
with open(file_path, 'rt') as f:
oldText = f.read()
if oldText == text:
return
with open(file_path, 'wt') as f:
f.write(text)
| gpl-3.0 | 8,872,971,926,036,813,000 | 24.415385 | 105 | 0.616223 | false |
tushargit/aakashlab | ac/forms.py | 1 | 10612 | from django import forms
from django.contrib.auth.models import User
# import from ac models
from ac.models import Contact
from ac.models import Coordinator, AakashCentre, User
from ac.models import Project, TeamMember, Mentor ,Manager
from captcha.fields import ReCaptchaField
class ContactForm(forms.ModelForm):
name = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder': 'Your name*.'}),
help_text="Enter your name.", required=True)
email = forms.EmailField(
widget=forms.TextInput(attrs={'class':'form-control',
'placeholder': 'Enter valid email*.'}),
help_text="Enter Email.", required=True)
message = forms.CharField(
widget=forms.Textarea(attrs={'class':'form-control',
'placeholder': 'Please write your message*.',
'rows': 4}),
help_text="Please write your message.", required=True)
captcha = ReCaptchaField(attrs={'theme': 'clean'})
class Meta:
model = Contact
fields = ['name', 'email', 'message', 'captcha']
class AakashCentreForm(forms.ModelForm):
"""Register Aakash Centre."""
ac_id = forms.IntegerField(label="Aakash Centre ID",
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Aakash Centre ID or RC ID*.'}),
help_text="", required=True)
quantity = forms.IntegerField(
label = 'Number of tablets received at your Center(0 if you don\'t know).',
widget= forms.TextInput(
attrs={'class': 'form-control',
'value': '0',
'placeholder': 'Number of tablets received at your centre(Optional).'}),
help_text="", required=False)
name = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder': 'Centre name*.'}),
help_text="", required=True)
city = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder': 'City*.'}),
help_text="", required=True)
state = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder': 'State*.'}),
help_text="", required=True)
class Meta:
model = AakashCentre
fields = ['ac_id', 'quantity', 'name', 'city', 'state']
class CoordinatorForm(forms.ModelForm):
"""Register Coordinator Form."""
contact = forms.CharField(
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Coordinator contact number*.'}),
help_text="", required=True)
picture = forms.ImageField(label='Profile picture',
widget = forms.FileInput(
attrs={'placeholder': 'Coordinator picture.'}),
required=False)
class Meta:
model = Coordinator
fields = ['contact', 'picture']
class UserForm(forms.ModelForm):
username = forms.CharField(label='Username',
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Username to login*.'}),
help_text="", required=True,
error_messages={'required':'Username is required.'})
first_name = forms.CharField(
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Coordinator first name*.'}),
help_text="", required=True,
error_messages={'required':'First name is required.'})
last_name = forms.CharField(
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Coordinator last name*.'}),
help_text="", required=True,
error_messages={'required':'Last name is required.'})
email = forms.CharField(
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Coordinator valid email*.'}),
help_text="", required=True,
error_messages={'required':'Valid Email address is required.'})
password = forms.CharField(
widget=forms.PasswordInput(
attrs={'class': 'form-control',
'placeholder': 'Coordinator password*.'}),
help_text="", required=True,
error_messages={'required':'Password is missing.'})
class Meta:
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'password']
class ProjectForm(forms.ModelForm):
"""Form to add new project.
"""
name = forms.CharField(label='Project name',
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Project name*.'}),
help_text="", required=True,
error_messages={'required':'Project name is required.'})
summary = forms.CharField(label='Summary',
widget= forms.Textarea(
attrs={'class': 'form-control', 'rows': '3',
'placeholder': 'Summary of the project*.'}),
help_text="", required=True,
error_messages={'required':'Summary is required.'})
ac = forms.ModelChoiceField(
label='Centre',
cache_choices=True,
widget = None,
queryset = AakashCentre.objects.all().order_by('name'),
empty_label = None,
help_text="", required=True,
error_messages={'required':'Aakash centre is required.'})
src_url = forms.URLField(
label='Source code URL',
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Valid URL of source code.'}),
error_messages={'invalid': 'Enter valid URL.'},
required=False)
doc_url = forms.URLField(
label='Documentation URL',
widget = forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Valid URL where docs are hosted.'}),
error_messages={'invalid': 'Enter valid URL.'},
required=False)
doc_file = forms.FileField(
label = 'Documentation file.',
widget = forms.FileInput(),
help_text = 'Upload documentation.',
required=False)
additional_url = forms.URLField(
label='Additional URL',
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Additional URL where project related files are hosted.'}),
required=False)
apk = forms.FileField(
label='APK',
help_text = 'Upload APK.',
error_messages={'required': 'APK is required.'},
required=True)
logo = forms.ImageField(
label = 'Logo',
help_text = 'Upload project logo.',
required=False)
class Meta:
model = Project
fields = ['name', 'summary', 'ac', 'src_url', 'doc_url',
'doc_file', 'additional_url', 'apk', 'logo']
def clean_doc_file(self):
"""Limit doc_file upload size."""
if self.cleaned_data['doc_file']:
doc_file = self.cleaned_data['doc_file']
if doc_file._size/(1024*1024) <= 5: # < 5MB
return doc_file
else:
raise forms.ValidationError("Filesize should be less than 5MB.")
def clean_apk(self):
"""Limit APK upload size."""
if self.cleaned_data['apk']:
apk = self.cleaned_data['apk']
if apk:
if apk._size/(1024*1024) <= 12: # < 5MB
return apk
else:
raise forms.ValidationError("APK file max. size is 12MB.")
else:
raise forms.ValidationError("Not a valid APK!")
class MemberForm(forms.ModelForm):
"""Project member form.
"""
member_name = forms.CharField(
label = 'Member name',
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Team member name*.'}),
help_text="", required=False,
error_messages={'required':'Member name is required.'})
member_email = forms.EmailField(
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Enter valid email.'}),
help_text="", required=False,
error_messages={'required': 'Valid Email address is required.'})
class Meta:
model = TeamMember
fields = ['member_name', 'member_email']
class MentorForm(forms.ModelForm):
"""Mentor form.
"""
mentor_name = forms.CharField(
label = 'Mentor\'s name',
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Mentor name*.'}),
help_text="", required=False,
error_messages={'required':'Mentor name is required.'})
mentor_email = forms.EmailField(
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Enter valid email.'}),
help_text="", required=False,
error_messages={'required': 'Valid Email address is required.'})
class Meta:
model = Mentor
fields = ['mentor_name', 'mentor_email']
#created manager form
class ManagerForm(forms.ModelForm):
"""Project manager form.
"""
manager_name = forms.CharField(
label = 'Project Manager name',
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Project manager name*.'}),
help_text="", required=False,
error_messages={'required':'Manager name is required.'})
manager_email = forms.EmailField(
label = 'Project Manager Email',
widget= forms.TextInput(
attrs={'class': 'form-control',
'placeholder': 'Enter valid email.'}),
help_text="", required=False,
error_messages={'required': 'Valid Email address is required.'})
class Meta:
model = Manager
fields = ['manager_name', 'manager_email']
class Agreement(forms.Form):
"""Terms & Conditions.
"""
agree = forms.BooleanField(
widget=forms.CheckboxInput(),
label="This Project will be always be licensed \
under GNU GPL v3 or later",
required=True,
error_messages={'required': 'You must agree to terms and conditions.'},)
| gpl-3.0 | 7,588,071,567,527,484,000 | 34.972881 | 93 | 0.558707 | false |
jeremiah-c-leary/vhdl-style-guide | vsg/token/selected_force_assignment.py | 1 | 1364 |
from vsg import parser
class with_keyword(parser.keyword):
'''
unique_id = selected_force_assignment : with_keyword
'''
def __init__(self, sString):
parser.keyword.__init__(self, sString)
class select_keyword(parser.keyword):
'''
unique_id = selected_force_assignment : select_keyword
'''
def __init__(self, sString):
parser.keyword.__init__(self, sString)
class target(parser.target):
'''
unique_id = selected_force_assignment : target
'''
def __init__(self, sString):
parser.target.__init__(self, sString)
class assignment(parser.assignment):
'''
unique_id = selected_force_assignment : assignment
'''
def __init__(self, sString):
parser.assignment.__init__(self, sString)
class force_keyword(parser.keyword):
'''
unique_id = selected_force_assignment : force_keyword
'''
def __init__(self, sString):
parser.keyword.__init__(self, sString)
class semicolon(parser.semicolon):
'''
unique_id = selected_force_assignment : semicolon
'''
def __init__(self, sString=';'):
parser.semicolon.__init__(self)
class question_mark(parser.question_mark):
'''
unique_id = selected_force_assignment : question_mark
'''
def __init__(self, sString='?'):
parser.question_mark.__init__(self)
| gpl-3.0 | -1,488,912,512,271,898,400 | 19.984615 | 58 | 0.611437 | false |
danielhers/ucca | uccaapp/export_units_by_filter.py | 1 | 7955 | import argparse
import os
import sys
import urllib.request
from itertools import product
from ucca import layer1, convert
from uccaapp.download_task import TaskDownloader
desc = "Get all units according to a specified filter. Units that meet any of the filters are output."
CONSECUTIVE = "CONSECUTIVE"
SUBSEQUENCE = "SUBSEQUENCE"
SUBSET = "SUBSET"
def read_amr_roles(role_type):
file_name = "have-" + role_type + "-role-91-roles-v1.06.txt"
if not os.path.exists(file_name):
url = r"http://amr.isi.edu/download/lists/" + file_name
try:
urllib.request.urlretrieve(url, file_name)
except OSError as e:
raise IOError("Must download %s and have it in the current directory when running the script" % url) from e
with open(file_name) as f:
return [line.split()[1] for line in map(str.strip, f) if line and not line.startswith(("#", "MAYBE"))]
AMR_ROLE = {role for role_type in ("org", "rel") for role in read_amr_roles(role_type)}
TOKEN_CLASSES = {
"[ROLE]": AMR_ROLE
}
def get_top_level_ancestor(node):
"""
Traverses the passage upwards until a unit which is immediately below the root is reached
:param node:
:return:
"""
# if node is already the root, return it
if not node.fparent:
return node
parent = node
while parent.fparent.fparent:
parent = parent.fparent
return parent
def tokens_match(unit_tokens, query_tokens, mode):
"""
:param unit_tokens: candidate unit tokens, as a list of strings
:param query_tokens: list of lists of tokens to look for, each list representing alternatives for a position
:param mode: CONSECUTIVE, SUBSET, SUBSEQUENCE
:return whether query_tokens is contained in unit_tokens
"""
if mode == SUBSET:
return any(set(query).issubset(unit_tokens) for query in product(*query_tokens))
indices = []
for alternatives in query_tokens:
index = None
for alternative in alternatives:
try:
index = unit_tokens.index(alternative)
except ValueError:
pass
if index is None:
return False
indices.append(index)
if mode == CONSECUTIVE:
return indices == list(range(indices[0], indices[-1] + 1))
elif mode == SUBSEQUENCE:
return indices == sorted(indices)
raise ValueError("Invalid option for token mode: " + mode)
## def main(output = None, comment = False, sentence_level = False, categories = (), tokens = (), tokens_mode = CONSECUTIVE,
## case_insensitive = False, tokens_by_file = False, remotes = False, write = False, **kwargs):
## if tokens_by_file:
## with open(tokens[0]) as f:
## token_lists = [line.strip().split() for line in f]
## elif tokens != ():
## token_lists = [tokens]
## else:
## token_lists = ()
## filtered_nodes = []
## for passage, task_id, user_id in TaskDownloader(**kwargs).download_tasks(write=False, **kwargs):
## if sentence_level:
## cur_passages = convert.split2sentences(passage)
## all_nodes = [p.layer(layer1.LAYER_ID).heads[0] for p in cur_passages]
## else:
## all_nodes = list(passage.layer(layer1.LAYER_ID).all)
## for node in all_nodes:
## if comment and node.extra.get("remarks"):
## filtered_nodes.append(("comment",node,task_id,user_id))
## if remotes and len([n for n in node.outgoing if n.attrib.get("remote")]) > 0:
## filtered_nodes.append(("remotes", node, task_id, user_id))
## if token_lists and not node.attrib.get("implicit"):
## for token_list in token_lists:
## unit_tokens = [t.text for t in node.get_terminals(punct=True)]
## if case_insensitive:
## unit_tokens = [x.lower() for x in unit_tokens]
## token_list = [x.lower() for x in token_list]
## if tokens_match(unit_tokens, token_list, tokens_mode):
## filtered_nodes.append(('TOKENS', node, task_id, user_id))
## else:
## all_tags = [c.tag for edge in node for c in edge.categories]
## intersection = set(categories).intersection(all_tags)
def filter_nodes(categories=(), tokens=(), tokens_mode=CONSECUTIVE, case_insensitive=False, comment=False,
sentence_level=False, remotes=False, **kwargs):
for passage, task_id, user_id in TaskDownloader(**kwargs).download_tasks(**kwargs):
for node in [p.layer(layer1.LAYER_ID).heads[0] for p in convert.split2sentences(passage)] if sentence_level \
else passage.layer(layer1.LAYER_ID).all:
if comment and node.extra.get("remarks"):
yield "comment", node, task_id, user_id
if remotes:
if node.attrib.get("implicit"):
yield 'IMPLICIT', node, task_id, user_id
for e in node.incoming:
if e.attrib.get("remote"):
yield 'REMOTE', e.parent, task_id, user_id
if tokens and not node.attrib.get("implicit"):
unit_tokens = [t.text for t in node.get_terminals(punct=True)]
if case_insensitive:
unit_tokens = [x.lower() for x in unit_tokens]
tokens = [x.lower() for x in tokens]
if tokens_match(unit_tokens, tokens, tokens_mode):
yield 'TOKENS', node, task_id, user_id
elif categories:
intersection = set(categories).intersection(c.tag for e in node for c in e.categories)
if intersection:
yield str(intersection), node, task_id, user_id
def main(output=None, tokens=(), **kwargs):
kwargs["write"] = False
f = open(output, 'w', encoding="utf-8") if output else sys.stdout
expanded_tokens = [TOKEN_CLASSES.get(token, [token]) for token in tokens]
for filter_type, node, task_id, user_id in filter_nodes(tokens=expanded_tokens, **kwargs):
ancestor = get_top_level_ancestor(node)
print(filter_type, task_id, user_id, node.extra.get("tree_id"), node.to_text(),
ancestor, str(node.extra.get("remarks")).replace("\n", "|"), file=f, sep="\t", flush=True)
if output:
f.close()
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser(description=desc)
TaskDownloader.add_arguments(argument_parser)
argument_parser.add_argument("--output", help="output file name")
argument_parser.add_argument("--categories", nargs="+", default=(),
help="Abbreviations of the names of the categories to filter by")
argument_parser.add_argument("--tokens", nargs="+", default=(),
help="Tokens to filter by")
argument_parser.add_argument("--tokens-by-file", action="store_true",
help="tokens will be specified in a file instead of in the command line. Each line consists of space delimited list of tokens.")
argument_parser.add_argument("--tokens-mode", default=CONSECUTIVE,
help="mode of search for the tokens: CONSECUTIVE,SUBSEQUENCE,SUBSET")
argument_parser.add_argument("--sentence-level", action="store_true",
help="output sentences rather than units")
argument_parser.add_argument("--case-insensitive", action="store_true",
help="make tokens search case insensitive")
argument_parser.add_argument("--comment", action="store_true", help="Output all the units that have comments")
argument_parser.add_argument("--remotes", action="store_true", help="Output all the units that have remote children")
main(**vars(argument_parser.parse_args()))
| gpl-3.0 | -7,023,550,245,908,607,000 | 46.071006 | 161 | 0.603017 | false |
zenodo/invenio | invenio/modules/pages/models.py | 1 | 3345 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Pages module models."""
from datetime import datetime
from invenio.ext.sqlalchemy import db
class Page(db.Model):
"""Represents a page."""
__tablename__ = 'pages'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True, autoincrement=True)
"""Page identifier."""
url = db.Column(db.String(100), unique=True, nullable=False)
"""Page url."""
title = db.Column(db.String(200), nullable=True)
"""Page title."""
content = db.Column(
db.Text().with_variant(db.Text(length=2**32-2), 'mysql'),
nullable=True)
"""Page content. Default is pages/templates/default.html"""
description = db.Column(db.String(200), nullable=True)
"""Page description."""
template_name = db.Column(db.String(70), nullable=True)
"""Page template name. Default is cfg["PAGES_DEFAULT_TEMPLATE"]."""
created = db.Column(db.DateTime(), nullable=False, default=datetime.now)
"""Page creation date."""
last_modified = db.Column(db.DateTime(), nullable=False,
default=datetime.now, onupdate=datetime.now)
"""Page last modification date."""
def __repr__(self):
"""Page representation.
Used on Page admin view in inline model.
:returns: unambiguous page representation.
"""
return "URL: %s, title: %s" % (self.url, self.title)
class PageList(db.Model):
"""Represent association between page and list."""
__tablename__ = 'pagesLIST'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True, autoincrement=True)
"""PageList identifier."""
list_id = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(Page.id), nullable=False)
"""Id of a list."""
page_id = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(Page.id), nullable=False)
"""Id of a page."""
order = db.Column(db.Integer(15, unsigned=True), nullable=False)
list = db.relationship(Page,
backref=db.backref("pages",
cascade="all, delete-orphan"),
foreign_keys=[list_id])
"""Relation to the list."""
page = db.relationship(Page,
backref=db.backref("lists",
cascade="all, delete-orphan"),
foreign_keys=[page_id])
"""Relation to the page."""
__all__ = ['Page', 'PageList']
| gpl-2.0 | 185,426,025,631,271,140 | 32.45 | 76 | 0.609268 | false |
craighiller/serendipity | main.py | 1 | 12699 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import jinja2
import os
import logging
import texter
import re
from webapp2_extras import sessions
import imgur
from wish_model import Wish
from user_model import User
from google.appengine.ext import db
import twilio.twiml
import random
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
config = {}
config['webapp2_extras.sessions'] = {
'secret_key': 'my-super-secret-key',
}
class BaseHandler(webapp2.RequestHandler):
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
# Returns a session using the default cookie key.
return self.session_store.get_session()
class MainHandler(BaseHandler):
def get(self):
template_values = {'session':self.session}
template_values['recent_wishes'] = Wish.gql("WHERE status = 'fulfilled' ORDER BY updated").fetch(8)
template = jinja_environment.get_template("views/home.html")
self.response.out.write(template.render(template_values))
class WishHandler(BaseHandler):
def get(self):
template_values = {'session':self.session}
template_values['wish'] = Wish.get(self.request.get("key"))
template_values['flash'] = self.request.get('flash')
template = jinja_environment.get_template("views/wish.html")
self.response.out.write(template.render(template_values))
class MakeAWishHandler(BaseHandler):
def get(self):
if 'authenticated' not in self.session.keys() or not self.session['authenticated']:
return self.redirect('/login?redirect=true')
template_values = {'session':self.session}
template = jinja_environment.get_template("views/make_a_wish.html")
self.response.out.write(template.render(template_values))
def post(self):
money = 0
if self.request.get("cache_money"):
money = re.sub('[,$ ]', '', self.request.get("cache_money"))
wish = Wish(
tagline=self.request.get("tagline"),
details=self.request.get("details"),
type_of_request=self.request.get("type_of_request"),
location=self.request.get("location"),
status="requested",
user_key=self.session['user_name'],
cache_money=float(money)
)
wish.put()
self.redirect('/user?id=' + str(wish.user_key) + '&flash=You made a wish!')
class WishIndexHandler(BaseHandler):
def get(self):
template_values = {'session':self.session}
search = self.request.get("status")
types = self.request.get_all('type_of_request')
if not types:
types = ['food', 'animal', 'chores', 'material things', 'other']
template_values['types'] = types
else:
template_values['types'] = types
if not search:
search = 'requested'
template_values['search'] = search
if search == 'all':
template_values['wishes'] = Wish.gql("WHERE type_of_request IN :1", types)
else:
template_values['wishes'] = Wish.gql("WHERE status = :1 and type_of_request IN :2", search, types)
template = jinja_environment.get_template("views/fulfill_a_wish.html")
self.response.out.write(template.render(template_values))
def post(self):
if not self.session['authenticated']:
return self.redirect('/login?redirect=true')
template_values = {'session':self.session}
wish = Wish.get(self.request.get("key"))
if self.request.get('delete'):
wish.status = 'requested'
wish.user_fulfiller_key = None
flash = 'You are no longer fulfilling ' + wish.tagline
elif self.request.get('confirm'):
wish.status = 'fulfilled'
fulfiller = User.get_by_key_name(wish.user_fulfiller_key)
wisher = User.get_by_key_name(wish.user_key)
fulfiller.money_raised += wish.cache_money
wisher.money_donated += wish.cache_money
fulfiller.put()
wisher.put()
flash = 'Your wish of ' + wish.tagline + ' has been fulfilled!'
else:
wish.status = 'in progress'
wish.user_fulfiller_key = self.session['user_name']
flash = 'Fulfilling ' + wish.tagline
wish.put()
return self.redirect('/wish?key=' + str(wish.key()) + '&flash=' + flash)
class UserHandler(BaseHandler):
def get(self):
template_values = {'session':self.session}
template_values['user'] = User.gql("WHERE name = :1", self.request.get('id')).fetch(1)[0] # shady, get the user w/ username
template_values['unfulfilled'] = Wish.gql("WHERE user_key = :1 AND status != 'fulfilled'", self.request.get('id'))
template_values['fulfilled'] = Wish.gql("WHERE user_key = :1 AND status = 'fulfilled'", self.request.get('id'))
template_values['to_complete'] = Wish.gql("WHERE user_fulfiller_key = :1 AND status != 'fulfilled'", self.request.get('id'))
template_values['completed'] = Wish.gql("WHERE user_fulfiller_key = :1 AND status = 'fulfilled'", self.request.get('id'))
template_values['flash'] = self.request.get('flash')
template = jinja_environment.get_template("views/user.html")
self.response.out.write(template.render(template_values))
class UserIndexHandler(BaseHandler):
def get(self):
template_values = {'session':self.session}
template_values['users'] = User.gql("ORDER BY money_donated DESC")
template = jinja_environment.get_template("views/users.html")
self.response.out.write(template.render(template_values))
class LoginHandler(BaseHandler):
def get(self):
template = jinja_environment.get_template("views/login.html")
template_values = {"denied": False, 'session':self.session}
if self.request.get('redirect'):
template_values["denied"] = True
self.response.out.write(template.render(template_values))
def post(self):
username = self.request.get("username")
password = self.request.get("password")
cur_user = User.get_by_key_name(username)
template = jinja_environment.get_template("views/login.html")
if cur_user == None:
template_values = {"denied": True, 'session':self.session}
self.response.out.write(template.render(template_values))
return
if cur_user.password == password:
# terrible authentication hacks, sorry Wagner
self.session['user_name'] = username
self.session['num'] = cur_user.phone_number
self.session['authenticated'] = True
self.redirect('/')
else:
self.session['authenticated'] = False
template_values = {"denied": True, 'session':self.session}
self.response.out.write(template.render(template_values))
class SignupHandler(BaseHandler):
def get(self):
template = jinja_environment.get_template("views/signup.html")
template_values = {"denied": False, 'session':self.session}
self.response.out.write(template.render(template_values))
def post(self):
username = self.request.get("username")
password = self.request.get("password")
opt_in = True if self.request.get("receive_text") else False
num = texter.num_parse(self.request.get("phonenumber"))
cur_user = User.get_by_key_name(username)
template = jinja_environment.get_template("views/signup.html")
if cur_user:
template_values = {'session':self.session}
template_values['flash'] = 'Oops that username is taken!'
self.response.out.write(template.render(template_values))
return
cur_user = User.get_or_insert(username, name=username, phone_number = num, password=password, text_opt_in = opt_in, money_donated=0.0, money_raised=0.0)
# no authentication hacks, sorry Wagner
self.session['user_name'] = username
self.session['num'] = num
self.session['authenticated'] = True
self.redirect('/')
class LogoutHandler(BaseHandler):
def get(self):
self.session['user_name'] = ""
self.session['num'] = ""
self.session['authenticated'] = False
self.redirect('/')
class ProfileHandler(BaseHandler):
def get(self):
template_values = {'session':self.session}
template = jinja_environment.get_template("views/profile.html")
self.response.out.write(template.render(template_values))
class goodbyeHandler(BaseHandler):
def get(self):
for wish in Wish.all():
wish.delete()
for wish in User.all():
wish.delete()
self.redirect("/")
class twimlHandler(BaseHandler):
# Will work when called in production, sample request is:
"""
/twiml?ToCountry=US&ToState=NJ&SmsMessageSid=SM3fec99a49092c1f42acc022222e0d288&NumMedia=0&ToCity=RED+BANK&FromZip=07748&SmsSid=SM3fec99a49092c1f42acc022222e0d288&FromState=NJ&SmsStatus=received&FromCity=MIDDLETOWN&Body=Hi&FromCountry=US&To=%2B17329454001&ToZip=08830&MessageSid=SM3fec99a49092c1f42acc022222e0d288&AccountSid=AC16b8cb7d55a29a0425c18637b3398b71&From=%2B17325333935&ApiVersion=2010-04-01
"""
def get(self):
body = self.request.get("Body")
from_num = texter.num_parse(self.request.get("From"))
# If you want to insta send back a message, but I think this is useless
#resp = twilio.twiml.Response()
#resp.message("thank you come again")
#self.response.out.write(str(resp))
class goodmorningHandler(BaseHandler):
def get(self):
for user in User.all():
if user.text_opt_in:
self.response.out.write("<b>"+user.name+"</b></br>")
# Take three random wishes that are not from the user
potential_wishes = [wish for wish in Wish.all() if wish.user_key != user.name]
user_wishes = random.sample(potential_wishes, min(3, len(potential_wishes)))
for wish in user_wishes:
self.response.out.write(wish.tagline+"<br>")
texter.send_message(user.phone_number, "Consider fulfilling " + wish.tagline + " on http://hackserendipity.appspot.com/")
class picHandler(BaseHandler):
def get(self):
self.response.out.write("""<html>
<body>
<form action="pics"
enctype="multipart/form-data" method="post">
<p>
Type some text (if you like):<br>
<input type="text" name="textline" size="30">
</p>
<p>
Please specify a file, or a set of files:<br>
<input type="file" name="datafile" size="40">
</p>
<div>
<input type="submit" value="Send">
</div>
</form>
</body>
</html>""")
def post(self):
name = self.request.get("textline")
img = self.request.get("datafile")
self.response.out.write(imgur.upload(img, name))
app = webapp2.WSGIApplication([
('/', MainHandler),
('/wish', WishHandler),
('/make_a_wish', MakeAWishHandler),
('/fulfill_a_wish', WishIndexHandler),
('/login', LoginHandler),
('/signup', SignupHandler),
('/logout', LogoutHandler),
('/users', UserIndexHandler),
('/user', UserHandler),
('/profile', ProfileHandler),
('/twiml', twimlHandler),
('/goodmorning', goodmorningHandler),
('/goodbyeFriends', goodbyeHandler),
('/pics', picHandler )
], debug=True, config=config)
| mit | -6,922,116,196,989,017,000 | 38.933962 | 405 | 0.624065 | false |
antchain/antchain.org | web/tx.py | 1 | 17060 | # -*- coding:utf-8 -*-
import math
import web
def GetMinerTransactionResult(result) :
html = ''
html = html + '<div class="container">\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("MinerTransaction") +'</b></div>\n'
html = html + '<div class="column"><a href="/tx/'+ result['txid'] + '">' + result['txid'] + '</a></div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Time") +'</div>\n'
html = html + '<div class="column">' + web.GetLocalTime(result['time']) + '</div>\n'
html = html + '<div class="column column-offset-50">'+ _("System Fee") + ' ' + result['sys_fee'] + ' ' + _("ANC") + '</div>\n'
html = html + '</div>\n'
html = html + '</div>\n'
return html
def GetCoinByTxVin(vin) :
result = web.collection_coins.find_one({"txid":vin['txid'],"n":vin['vout']})
return result
def GetCoinByTxVout(txid,vout) :
results = web.collection_coins.find({"txid":txid,"n":vout['n']}).sort("height",-1)
if results[0]['state'] & web.CoinState.Spent == web.CoinState.Spent :
return results[0]
else :
return None
def GetCoinsByTransactionResult(result,address,lens) :
html = ''
for i in range(0,lens) :
html = html + '<div class="row">\n'
if len(result['vin']) > i :
coinResult = GetCoinByTxVin(result['vin'][i])
html = html + '<div class="column column-10"><a href="/tx/' + coinResult['txid'] + '"><-</a></div>\n'
if coinResult['address'] == address :
html = html + '<div class="column column-25"><a href="/address/' + coinResult['address'] + '"><b>' + coinResult['address'] + '</b></a></div><div class="column column-15">-' + str(coinResult['value']) + ' ' + web.GetAssetName(coinResult['asset']) + '</div>'
else :
html = html + '<div class="column column-25"><a href="/address/' + coinResult['address'] + '">' + coinResult['address'] + '</a></div><div class="column column-15">-' + str(coinResult['value']) + ' ' + web.GetAssetName(coinResult['asset']) + '</div>'
else :
html = html + '<div class="column column-10"></div>\n'
html = html + '<div class="column column-25"></div><div class="column column-15"></div>\n'
if len(result['vout']) > i :
if result['vout'][i]['address'] == address :
html = html + '<div class="column column-25"><a href="/address/' + result['vout'][i]['address'] + '"><b>' + result['vout'][i]['address'] + '</b></a></div><div class="column column-15">+'+ str(result['vout'][i]['value']) + ' ' + web.GetAssetName(result['vout'][i]['asset']) + '</div>\n'
else :
html = html + '<div class="column column-25"><a href="/address/' + result['vout'][i]['address'] + '">' + result['vout'][i]['address'] + '</a></div><div class="column column-15">+'+ str(result['vout'][i]['value']) + ' ' + web.GetAssetName(result['vout'][i]['asset']) + '</div>\n'
coinResult = GetCoinByTxVout(result['txid'],result['vout'][i])
if coinResult != None :
html = html + '<div class="column column-10"><a href="/tx/'+ coinResult['spent_txid'] + '">-></a></div>\n'
else :
html = html + '<div class="column column-10"></div>\n'
else :
html = html + '<div class="column column-25"></div><div class="column column-15"></div>\n'
html = html + '<div class="column column-10"></div>\n'
html = html + '</div>\n'
return html
def GetContractTransactionResult(result,address) :
html = ''
html = html + '<br/>\n'
lens = 0
len_in = len(result['vin'])
len_out = len(result['vout'])
if len_in > len_out :
lens = len_in
else :
lens = len_out
html = html + '<div class="container">\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("ContractTransaction") + '</b></div>\n'
html = html + '<div class="column"><a href="/tx/'+ result['txid'] + '">' + result['txid'] + '</a></div>\n'
html = html + '</div>\n'
#html = html + '<div class="row">\n'
#html = html + '<table style="padding-left:1em;padding-right:1em;" width="80%" border="0" cellpadding="3" cellspacing="0">\n'
html = html + GetCoinsByTransactionResult(result,address,lens)
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Time") + '</div>\n'
html = html + '<div class="column">' + web.GetLocalTime(result['time']) + '</div>\n'
html = html + '<div class="column column-offset-50">'+ _("System Fee") + ' ' + result['sys_fee'] + ' ' + _("ANC") + '</div>\n'
html = html + '</div>\n'
#html = html + '</table>\n'
#html = html + '</div>\n'
html = html + '</div>\n'
return html
def GetClaimTransactionResult(result,address) :
html = ''
html = html + '<div class="container">\n'
lens = len(result['vout'])
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("ClaimTransaction") +'</b></div>\n'
html = html + '<div class="column"><a href="/tx/'+ result['txid'] + '">' + result['txid'] + '</a></div>\n'
html = html + '</div>\n'
html = html + GetCoinsByTransactionResult(result,address,lens)
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Time") +'</div>\n'
html = html + '<div class="column">' + web.GetLocalTime(result['time']) + '</div>\n'
html = html + '<div class="column column-offset-50">'+ _("System Fee") + ' ' + result['sys_fee'] + ' ' + _("ANC") + '</div>\n'
html = html + '</div>\n'
html = html + '</div>\n'
return html
def GetRegisterTransactionResult(result,address) :
html = ''
html = html + '<div class="container">\n'
lens = 0
len_in = len(result['vin'])
len_out = len(result['vout'])
if len_in > len_out :
lens = len_in
else :
lens = len_out
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("RegisterTransaction") +'</b></div>\n'
html = html + '<div class="column"><a href="/tx/'+ result['txid'] + '">' + result['txid'] + '</a></div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Asset Name") +'</div>\n'
html = html + '<div class="column"><a href="/asset/'+ result['txid'] + '">' + web.GetAssetNameByAsset(result['asset']) + '</a></div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Asset Type") +'</div>\n'
html = html + '<div class="column">' + result['asset']['type'] + '</div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Asset Amount") +'</div>\n'
html = html + '<div class="column">' + web.GetAssetAmount(result['asset']['amount']) + '</div>\n'
html = html + '</div>\n'
html = html + GetCoinsByTransactionResult(result,address,lens)
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Time") +'</div>\n'
html = html + '<div class="column">' + web.GetLocalTime(result['time']) + '</div>\n'
html = html + '<div class="column column-offset-50">'+ _("System Fee") + ' ' + result['sys_fee'] + ' ' + _("ANC") + '</div>\n'
html = html + '</div>\n'
html = html + '</div>\n'
return html
def GetIssueTransactionResult(result,address) :
html = ''
html = html + '<div class="container">\n'
lens = len(result['vout'])
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("IssueTransaction") +'</b></div>\n'
html = html + '<div class="column"><a href="/tx/'+ result['txid'] + '">' + result['txid'] + '</a></div>\n'
html = html + '</div>\n'
asset = web.GetAssetByTxid(result['vout'][0]['asset'])
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Asset Name") +'</div>\n'
html = html + '<div class="column"><a href="/asset/'+ result['txid'] + '">' + web.GetAssetNameByAsset(asset) + '</a></div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Asset Type") +'</div>\n'
html = html + '<div class="column">' + asset['type'] + '</div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Asset Amount") +'</div>\n'
html = html + '<div class="column">' + web.GetAssetAmount(asset['amount']) + '</div>\n'
html = html + '</div>\n'
html = html + GetCoinsByTransactionResult(result,address,lens)
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Time") +'</div>\n'
html = html + '<div class="column">' + web.GetLocalTime(result['time']) + '</div>\n'
html = html + '<div class="column column-offset-50">'+ _("System Fee") + ' ' + result['sys_fee'] + ' ' + _("ANC") + '</div>\n'
html = html + '</div>\n'
html = html + '</div>\n'
return html
def GetEnrollmentTransactionResult(result,address) :
html = ''
html = html + '<div class="container">\n'
lens = 0
len_in = len(result['vin'])
len_out = len(result['vout'])
if len_in > len_out :
lens = len_in
else :
lens = len_out
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("EnrollmentTransaction") +'</b></div>\n'
html = html + '<div class="column"><a href="/tx/'+ result['txid'] + '">' + result['txid'] + '</a></div>\n'
html = html + '</div>\n'
html = html + GetCoinsByTransactionResult(result,address,lens)
html = html + '<div class="row">\n'
html = html + '<div class="column column-10">'+ _("Time") +'</div>\n'
html = html + '<div class="column">' + web.GetLocalTime(result['time']) + '</div>\n'
html = html + '<div class="column column-offset-50">'+ _("System Fee") + ' ' + result['sys_fee'] + ' ' + _("ANC") + '</div>\n'
html = html + '</div>\n'
html = html + '</div>\n'
return html
def GetTxResultInternal(result,address) :
html = ''
if result['type'] == 'MinerTransaction' :
html = html + GetMinerTransactionResult(result)
elif result['type'] == 'ContractTransaction' :
html = html + GetContractTransactionResult(result,address)
elif result['type'] == 'ClaimTransaction' :
html = html + GetClaimTransactionResult(result,address)
elif result['type'] == 'RegisterTransaction' :
html = html + GetRegisterTransactionResult(result,address)
elif result['type'] == 'IssueTransaction' :
html = html + GetIssueTransactionResult(result,address)
elif result['type'] == 'EnrollmentTransaction' :
html = html + GetEnrollmentTransactionResult(result,address)
return html
def GetTxResult(result) :
html = ''
html = html + '<div class="container">\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("Txid") +'</b></div><div class="column"><b>' + result['txid'] + '</b></div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("Type") +'</b></div><div class="column"><a href="/tx/' + result['type'] + '">' + _(result['type']) + '</a></div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("Time") +'</b></div><div class="column">' + web.GetLocalTime(result['time']) + '</div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("Fee") +'</b></div><div class="column">' + result['sys_fee'] + ' ' + _("ANC") + '</div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("TxSize") +'</b></div><div class="column">' + str(result['size']) + ' Bytes</div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("TxVersion") +'</b></div><div class="column">' + str(result['version']) + '</div>\n'
html = html + '</div>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-10"><b>'+ _("From Height") +'</b></div><div class="column"><a href="/block/' + str(result['height']) + '">' + str(result['height']) + '</a></div>\n'
html = html + '</div>\n'
html = html + '</div>\n'
html = html + '<br/>\n'
html = html + GetTxResultInternal(result,None)
return html
def GetTxByHashInternal(txid) :
html = web.GetHeader("tx")
result = web.collection_txs.find_one({"txid":txid})
if result :
html = html + GetTxResult(result)
else :
html = html + _("Transaction Not Found!")
html = html + web.GetFooter()
return html
def GetTxInternal(txtype,page,listnum) :
if page <= 0 :
return 'page index begin: 1'
start = (page-1)*listnum
html = ''
html = html + '<div class="container">\n'
html = html + '<table width="80%" border="0" cellpadding="3" cellspacing="0">\n'
html = html + '<tr align="left">\n'
html = html + '<th>'+ _("Type") +'</th><th>'+ _("Txid") +'</th><th>'+ _("Height") +'</th><th>'+ _("In/Out") +'</th><th>'+ _("System Fee") +'</th><th>'+ _("Size") +'</th><th>'+ _("Time") +'</th>' + '<br/>\n'
html = html + '</tr>\n'
if txtype == None :
results = web.collection_txs.find({"type":{"$ne":"MinerTransaction"}}).sort("height",-1).limit(listnum).skip(start)
else :
results = web.collection_txs.find({"type":txtype}).sort("height",-1).limit(listnum).skip(start)
for tx in results :
html = html + '<tr>\n'
html = html + '<td>' + '<a href="/tx/' + tx['type'] + '">' + _(tx['type']) + '</a> </td>\n'
html = html + '<td>' + '<a href="/tx/' + tx['txid'] + '">' + tx['txid'] + '</a> </td>\n'
html = html + '<td>' + '<a href="/block/' + str(tx['height']) + '">' + str(tx['height']) + '</a> ' + '</td>\n'
html = html + '<td>' + str(len(tx['vin'])) + '/' + str(len(tx['vout'])) + ' </td>\n'
html = html + '<td>' + str(tx['size']) + ' Bytes </td>\n'
html = html + '<td>' + str(tx['size']) + ' Bytes </td>\n'
html = html + '<td>' + web.GetLocalTime(tx['time']) + ' </td>\n'
html = html + '</tr>\n'
html = html + '</table>\n'
html = html + '</div>\n'
return html
def GetTxPagination(txtype,page) :
html = ''
html = html + '<div name="pages" align="center">\n'
if txtype == None :
count = web.collection_txs.find({"type":{"$ne":"MinerTransaction"}}).count()
else :
count = web.collection_txs.find({"type":txtype}).count()
if count == 0 :
return ''
pages = count / web.TX_PER_PAGE
if count % web.TX_PER_PAGE != 0 :
pages = pages + 1
if page <= 4 :
displaystart = 1
else :
if page - 4 > 1 :
displaystart = page - 4
else :
displaystart = 1
if page >= pages - 4 and pages > 9 :
displaystart = pages - 9
displayend = pages
else :
if pages <= 9 :
displayend = pages
else :
displayend = displaystart + 9
if txtype == None :
html = html + '<a href="/tx/page/' + str(1) + '"><<</a> '
for i in range(displaystart,displayend+1) :
if i != page :
html = html + '<a href="/tx/page/' + str(i) + '">' + str(i) + '</a> '
else :
html = html + str(i) + ' '
html = html + '<a href="/tx/page/' + str(pages) + '">>></a> '
else :
html = html + '<a href="/tx/' + txtype + '/page/' + str(1) + '"><<</a> '
for i in range(displaystart,displayend+1) :
if i != page :
html = html + '<a href="/tx/' + txtype + '/page/' + str(i) + '">' + str(i) + '</a> '
else :
html = html + str(i) + ' '
html = html + '<a href="/tx/' + txtype + '/page/' + str(pages) + '">>></a> '
html = html + '<br/>\n'
html = html + '</div>\n'
return html
def GetTxPage(txtype,page) :
html = web.GetHeader("tx")
html = html + '<div name="tx" align="center">\n'
html = html + '<br/><br/>\n'
html = html + '<h2>'+ _("Transaction Information") +'</h2>\n'
html = html + '<div class="container">\n'
results = web.collection_txs.find({"type":{"$ne":"MinerTransaction"}}).distinct("type")
count = len(results)
row = int(math.ceil(count / 4))
r = 0
for i in range(0, row+1) :
html = html + '<div class="row">\n'
html = html + '<div class="column column-20"></div>\n'
for j in range(0,4) :
if i==0 and j==0 :
if txtype == None :
html = html + '<div class="column column-15"><a href="/tx"><b>[' + _('All Transaction') + ']</b></a></div>\n'
else :
html = html + '<div class="column column-15"><a href="/tx">[' + _('All Transaction') + ']</a></div>\n'
continue
if r >= count or results[r] == "MinerTransaction":
html = html + '<div class="column column-15"></div>\n'
elif txtype == results[r] :
html = html + '<div class="column column-15"><a href="/tx/' + results[r] + '"><b>[' + _(results[r]) + ']</b></a></div>\n'
else :
html = html + '<div class="column column-15"><a href="/tx/' + results[r] + '">[' + _(results[r]) + ']</a></div>\n'
r = r + 1
html = html + '<div class="column column-20"></div>\n'
html = html + '</div>\n'
html = html + '</div>\n'
html = html + '<br/>\n'
html = html + '<br/>\n'
if txtype != None :
html = html + '<h4>- '+ _(txtype) +' -</h4>\n'
Pagination = GetTxPagination(txtype,page)
html = html + Pagination
html = html + GetTxInternal(txtype,page,web.BLOCK_PER_PAGE)
html = html + '<br/>\n'
html = html + Pagination
html = html + '</div>\n'
html = html + web.GetFooter()
return html
| mit | -3,990,035,989,149,739,500 | 35.375267 | 290 | 0.56524 | false |
Arzaroth/CelestiaSunrise | celestia/utility/ponies.py | 1 | 10899 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File: ponies.py
# by Arzaroth Lekva
# [email protected]
#
from collections import OrderedDict
PONY_LIST = OrderedDict([
('Pony_Ace', 'Ace'),
('Pony_Action_Shot', 'Action Shot'),
('Pony_AKYearling', 'AKYearling'),
('Pony_Aloe', 'Aloe'),
('Pony_Apple_Bloom', 'Apple Bloom'),
('Pony_Apple_Bottoms', 'Apple Bottoms'),
('Pony_Apple_Bumpkin', 'Apple Bumpkin'),
('Pony_Apple_Cider', 'Apple Cider'),
('Pony_Apple_Cinnamon', 'Apple Cinnamon'),
('Pony_Apple_Cobbler', 'Apple Cobbler'),
('Pony_Apple_Dumpling', 'Apple Dumpling'),
('Pony_Apple_Honey', 'Apple Honey'),
('Pony_Apple_Leaves', 'Apple Leaves'),
('Pony_Apple_Pie', 'Apple Pie'),
('Pony_Apple_Rose', 'Apple Rose'),
('Pony_Apple_Stars', 'Apple Stars'),
('Pony_Apple_Strudel', 'Apple Strudel'),
('Pony_Applefritter', 'Apple Fritter'),
('Pony_Applejack', 'Applejack'),
('Pony_Architecture_Unicorn', 'Architecture Unicorn'),
('Pony_Aunt_Applesauce', 'Aunt Applesauce'),
('Pony_Aunt_Orange', 'Aunt Orange'),
('Pony_Babs_Seed', 'Babs Seed'),
('Pony_Banana_Bliss', 'Banana Bliss'),
('Pony_Beauty_Brass', 'Beauty Brass'),
('Pony_Berry_Punch', 'Berry Punch'),
('Pony_Big_Daddy', 'Big Daddy McColt'),
('Pony_Big_Mac', 'Big Macintosh'),
('Pony_Big_Shot', 'Big Shot'),
('Pony_Blue_moon', 'Blue Moon'),
('Pony_Bon_Bon', 'Bon Bon'),
('Pony_Braeburn', 'Braeburn'),
('Pony_Bright_Unicorn', 'Bright Unicorn'),
('Pony_Bulk_Biceps', 'Bulk Biceps'),
('Pony_Candy_Apples', 'Candy Apples'),
('Pony_Caramel', 'Caramel'),
('Pony_Caramel_Apple', 'Caramel Apple'),
('Pony_Charity_Kindheart', 'Charity Kindheart'),
('Pony_Cheerilee', 'Cheerilee'),
('Pony_Cheese_Sandwich', 'Cheese Sandwich'),
('Pony_Cherry_Fizzy', 'Cherry Fizzy'),
('Pony_Cherry_Jubilee', 'Cherry Jubilee'),
('Pony_CherryBerry', 'Cherry Berry'),
('Pony_Claude_the_Puppeteer', 'Claude the Puppeteer'),
('Pony_Clear_Skies', 'Clear Skies'),
('Pony_Clumsy_Clownspony', 'Clumsy Clownspony'),
('Pony_Coco_Crusoe', 'Coco Crusoe'),
('Pony_Coco_Pommel', 'Coco Pommel'),
('Pony_Comet_Tail', 'Comet Tail'),
('Pony_Compass_Star', 'Compass Star'),
('Pony_Conductor', 'Conductor'),
('Pony_Countess_Coloratura', 'Countess Coloratura'),
('Pony_Crescent_Moon', 'Crescent Moon'),
('Pony_Curio_Shopkeeper', 'Curio Shopkeeper'),
('Pony_Daisy', 'Daisy'),
('Pony_Dancer_Pony_1', 'Coloratura\'s Rocker'),
('Pony_Dancer_Pony_2', 'Coloratura\'s Stylist'),
('Pony_Dancer_Pony_3', 'Coloratura\'s Choreographer'),
('Pony_Dancer_Pony_4', 'Coloratura\'s Hype Pony'),
('Pony_Dancer_Pony_5', 'Coloratura\'s Lyricist'),
('Pony_Dancer_Pony_6', 'Coloratura\'s Breakdancer'),
('Pony_Dancing_Clownspony', 'Dancing Clownspony'),
('Pony_Daring', 'Daring Do'),
('Pony_Diamond_Tiara', 'Diamond Tiara'),
('Pony_Discord', 'Discord'),
('Pony_Dj_Pon3', 'Dj Pon3 (Ponyville)'),
('Pony_Dj_Pon3_Canterlot', 'Dj Pon3 (Canterlot)'),
('Pony_Double_Diamond', 'Double Diamond'),
('Pony_DrWhooves', 'Dr. Hooves'),
('Pony_Eclaire_Creme', 'Eclaire Creme'),
('Pony_Eff_Stop', 'Eff Stop'),
('Pony_Elite_Male', 'Elite Pony'),
('Pony_Emerald_Gem', 'Emerald Gem'),
('Pony_Emerald_Green', 'Emerald Green'),
('Pony_Fancypants', 'Fancypants'),
('Pony_Fast_Clip', 'Fast Clip'),
('Pony_Fashion_Plate', 'Fashion Plate'),
('Pony_Fashionable_Unicorn', 'Fashionable Unicorn'),
('Pony_Featherweight', 'Featherweight'),
('Pony_Filthy_Rich', 'Filthy Rich'),
('Pony_Fine_Line', 'Fine Line'),
('Pony_FireChief', 'Dinky Doo (Fire Chief)'),
('Pony_Flam', 'Flam'),
('Pony_Flash_Sentry', 'Flash Sentry'),
('Pony_Flashy_Pony', 'Flashy Pony'),
('Pony_Fleetfoot', 'Fleetfoot'),
('Pony_Fleur_Dis_Lee', 'Fleur Dis Lee'),
('Pony_Flim', 'Flim'),
('Pony_Fluttershy', 'Fluttershy'),
('Pony_Forsythia', 'Forsythia'),
('Pony_Four_Step', 'Four Step'),
('Pony_Frederic', 'Frederic'),
('Pony_Gala_Appleby', 'Gala Appleby'),
('Pony_Gilda', 'Gilda'),
('Pony_Gleeful_Clownspony', 'Gleeful Clownspony'),
('Pony_Golden_Delicious', 'Golden Delicious'),
('Pony_Golden_Harvest', 'Golden Harvest'),
('Pony_Goldie_Delicious', 'Goldie Delicious'),
('Pony_Goth_Unicorn', 'Goth Unicorn'),
('Pony_Grampa_Gruff', 'Grampa Gruff'),
('Pony_Granny_Smith', 'Granny Smith'),
('Pony_Green_Jewel', 'Green Jewel'),
('Pony_Greta', 'Greta'),
('Pony_Griffon_Shopkeeper', 'Griffon Shopkeeper'),
('Pony_Gustave_le_Grand', 'Gustave le Grand'),
('Pony_Half_Baked_Apple', 'Half Baked Apple'),
('Pony_Hayseed_Turnip_Truck', 'Hayseed Turnip Truck'),
('Pony_Hoity_Toity', 'Hoity Toity'),
('Pony_Horticultural_Pegasus', 'Horticultural Pegasus'),
('Pony_Jeff_Letrotski', 'Jeff Letrotski'),
('Pony_Jet_Set', 'Jet Set'),
('Pony_Jigging_Clownspony', 'Jigging Clownspony'),
('Pony_Joe', 'Joe'),
('Pony_Jokester_Clownspony', 'Jokester Clownspony'),
('Pony_Junebug', 'Junebug'),
('Pony_Junior_Deputy', 'Junior Deputy'),
('Pony_King_Sombra', 'King Sombra'),
('Pony_Lassoing_Clownspony', 'Lassoing Clownspony'),
('Pony_Lemon_Hearts', 'Lemon Hearts'),
('Pony_Lemony_Gem', 'Lemony Gem'),
('Pony_Li_I_Griffon', 'Li\'l Griffon'),
('Pony_Lightning_Dust', 'Lightning Dust'),
('Pony_Lily_Valley', 'Lily Valley'),
('Pony_Limestone_Pie', 'Limestone Pie'),
('Pony_Lotus_Blossom', 'Lotus Blossom'),
('Pony_Lovestruck', 'Lovestruck'),
('Pony_Lucky_Clover', 'Lucky Clover'),
('Pony_Lucky_Dreams', 'Lucky Dreams'),
('Pony_Luna_Guard', 'Luna Guard'),
('Pony_Lyra', 'Lyra'),
('Pony_Lyrica', 'Lyrica'),
('Pony_Ma_Hooffield', 'Ma Hooffield'),
('Pony_Magnum', 'Hondo Flanks (Magnum)'),
('Pony_Mane_Goodall', 'Mane Goodall'),
('Pony_Mane_iac', 'Mane-iac'),
('Pony_Manehattan_Delegate', 'Manehattan Delegate'),
('Pony_Marble_Pie', 'Marble Pie'),
('Pony_Maud_Pie', 'Maud Pie'),
('Pony_Mayor', 'Mayor'),
('Pony_Minuette', 'Minuette'),
('Pony_Misty_Fly', 'Misty Fly'),
('Pony_Moondancer', 'Moondancer'),
('Pony_Mr_Breezy', 'Mr. Breezy'),
('Pony_Mr_Cake', 'Mr. Cake'),
('Pony_Mr_Greenhooves', 'Mr. Greenhooves'),
('Pony_Mr_Waddle', 'Mr. Waddle'),
('Pony_Mrs_Cake', 'Mrs. Cake'),
('Pony_MsHarshwhinny', 'Ms. Harshwhinny'),
('Pony_Musical_Clownspony', 'Musical Clownspony'),
('Pony_Neon_Lights', 'Neon Lights'),
('Pony_Nerdpony', 'Nerdpony'),
('Pony_Night_Glider', 'Night Glider'),
('Pony_Noteworthy', 'Noteworthy'),
('Pony_Nurse_Redheart', 'Nurse Redheart'),
('Pony_Octavia', 'Octavia'),
('Pony_Open_Skies', 'Open Skies'),
('Pony_Parish', 'Parish'),
('Pony_Party_Favor', 'Party Favor'),
('Pony_Peachy_Pie', 'Peachy Pie'),
('Pony_Peachy_Sweet', 'Peachy Sweet'),
('Pony_Pearl', 'Cookie Crumbles (Betty Bouffant)'),
('Pony_Perfect_Pace', 'Perfect Pace'),
('Pony_Pest_Control_Pony', 'Pest Control Pony'),
('Pony_Photofinish', 'Photo Finish'),
('Pony_Pinkie_Pie', 'Pinkie Pie'),
('Pony_Pinkiepies_Dad', 'Igneous Rock'),
('Pony_Pinkiepies_Mom', 'Cloudy Quartz'),
('Pony_Pipsqueak', 'Pipsqueak'),
('Pony_Posh_Unicorn', 'Posh Unicorn'),
('Pony_Press_Pass', 'Press Pass'),
('Pony_Prim_Hemline', 'Prim Hemline'),
('Pony_Prince_Blueblood', 'Prince Blueblood'),
('Pony_Princess_Cadence', 'Princess Cadence'),
('Pony_Princess_Celestia', 'Princess Celestia'),
('Pony_Princess_Luna', 'Princess Luna'),
('Pony_Professor', 'Bill Neigh (Professor)'),
('Pony_Public_Works_Pony', 'Public Works Pony'),
('Pony_Purple_Wave', 'Purple Wave'),
('Pony_Quake', 'Quake'),
('Pony_Rainbow_Dash', 'Rainbow Dash'),
('Pony_Randolph', 'Randolph the Butler'),
('Pony_Rara', 'Rara'),
('Pony_Rare_Find', 'Rare Find'),
('Pony_Rarity', 'Rarity'),
('Pony_Red_Delicious', 'Red Delicious'),
('Pony_Red_Gala', 'Red Gala'),
('Pony_Renfairpony', 'Richard (the) Hoovenheart'),
('Pony_Royal_Guard', 'Royal Guard'),
('Pony_Royal_Pin', 'Royal Pin'),
('Pony_Royal_Ribbon', 'Royal Ribbon'),
('Pony_Royal_Riff', 'Royal Riff'),
('Pony_Rumble', 'Rumble'),
('Pony_Sapphire_Shores', 'Sapphire Shores'),
('Pony_Sassy_Saddles', 'Sassy Saddles'),
('Pony_Savoir_Fare', 'Savoir Fare'),
('Pony_Scootaloo', 'Scootaloo'),
('Pony_Sea_Swirl', 'Sea Swirl'),
('Pony_Senior_Deputy', 'Senior Deputy'),
('Pony_Shadow_Surprise', 'The Shadowbolts'),
('Pony_Sheriff_Silverstar', 'Sheriff Silverstar'),
('Pony_Shining_Armour', 'Shining Armour'),
('Pony_Shooting_Star', 'Shooting Star'),
('Pony_Silver_Shill', 'Silver Shill'),
('Pony_Silver_Spanner', 'Silver Spanner'),
('Pony_Silver_Spoon', 'Silver Spoon'),
('Pony_Snails', 'Snails'),
('Pony_Snappy_Scoop', 'Reporter Pony (Snappy Scoop)'),
('Pony_Snips', 'Snips'),
('Pony_Soarin', 'Soarin'),
('Pony_Spike', 'Spike'),
('Pony_Spitfire', 'Spitfire'),
('Pony_Spoiled_Rich', 'Spoiled Rich'),
('Pony_Sprinkle_Stripe', 'Sprinkle Stripe'),
('Pony_Starlight_Glimmer', 'Starlight Glimmer'),
('Pony_Studious_Delegate', 'Studious Delegate'),
('Pony_Sugar_Belle', 'Sugar Belle'),
('Pony_Sunny_Daze', 'Sunny Daze'),
('Pony_Sunsetshimmer', 'Sunset Shimmer'),
('Pony_Sunshower', 'Sunshower'),
('Pony_Suri_Polomare', 'Suri Polomare'),
('Pony_Svengallop', 'Svengallop'),
('Pony_Swan_Song', 'Swan Song'),
('Pony_Sweetiebelle', 'Sweetie Belle'),
('Pony_Thunderlane', 'Thunderlane'),
('Pony_Toe_Tapper', 'Toe Tapper'),
('Pony_Torch_Song', 'Torch Song'),
('Pony_Tracy_Flash', 'Photographer Pony (Tracy Flash)'),
('Pony_Traveling_Gentlecolt', 'Traveling Gentlecolt'),
('Pony_Traveling_Mare', 'Traveling Mare'),
('Pony_Traveling_Pony', 'Traveling Pony'),
('Pony_Tree_Hugger', 'Tree Hugger'),
('Pony_Trenderhoof', 'Trenderhoof'),
('Pony_Trixie', 'Trixie'),
('Pony_Trouble_Shoes', 'Trouble Shoes'),
('Pony_Truffle', "Teacher's Pet"),
('Pony_Twilight_Sparkle', 'Twilight Sparkle'),
('Pony_Twilight_Velvet', 'Twilight Velvet'),
('Pony_Twilights_Dad', "Night Light (Twilight's Dad)"),
('Pony_Twinkleshine', 'Twinkleshine'),
('Pony_Twist', 'Twist'),
('Pony_Uncle_Orange', 'Uncle Orange'),
('Pony_Unicorn_Guard', 'Unicorn Guard'),
('Pony_Unicorn_Painter', 'Unicorn Painter'),
('Pony_Uppercrust', 'Upper Crust'),
('Pony_Walter', 'Walter (Bowling Pony)'),
('Pony_Wensley', 'Wensley'),
('Pony_Whinnyapolis_Delegate', 'Whinnyapolis Delegate'),
('Pony_Wild_Fire', 'Wild Fire'),
('Pony_Wind_Rider', 'Wind Rider'),
('Pony_Zecora', 'Zecora'),
('Pony_Zipporwhill', 'Zipporwhill')
])
| bsd-2-clause | -2,655,956,969,015,951,400 | 40.441065 | 60 | 0.606753 | false |
msincenselee/vnpy | vnpy/app/risk_manager/ui/widget.py | 1 | 5448 | from vnpy.event import EventEngine
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtWidgets
from ..engine import APP_NAME
class RiskManager(QtWidgets.QDialog):
""""""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.rm_engine = main_engine.get_engine(APP_NAME)
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("交易风控")
# Create widgets
self.active_combo = QtWidgets.QComboBox()
self.active_combo.addItems(["停止", "启动"])
self.flow_limit_spin = RiskManagerSpinBox()
self.flow_clear_spin = RiskManagerSpinBox()
self.size_limit_spin = RiskManagerSpinBox()
self.trade_limit_spin = RiskManagerSpinBox()
self.active_limit_spin = RiskManagerSpinBox()
self.cancel_limit_spin = RiskManagerSpinBox()
self.percent_limit_spin = RiskManagerSpinBox()
self.ratio_active_limit_spin = RiskManagerSpinBox()
self.reject_limit_percent_spin = RiskManagerSpinBox()
self.cancel_limit_percent_spin = RiskManagerSpinBox()
self.trade_hold_active_limit_spin = RiskManagerSpinBox()
self.trade_hold_percent_limit_spin = RiskManagerSpinBox()
save_button = QtWidgets.QPushButton("保存")
save_button.clicked.connect(self.save_setting)
# Form layout
form = QtWidgets.QFormLayout()
form.addRow("风控运行状态", self.active_combo)
form.addRow("委托流控上限(笔)", self.flow_limit_spin)
form.addRow("委托流控清空(秒)", self.flow_clear_spin)
form.addRow("单笔委托上限(数量)", self.size_limit_spin)
form.addRow("总成交上限(笔)", self.trade_limit_spin)
form.addRow("活动委托上限(笔)", self.active_limit_spin)
form.addRow("合约撤单上限(笔)", self.cancel_limit_spin)
form.addRow("资金仓位上限(%)", self.percent_limit_spin)
form.addRow("激活废单/撤单(笔)", self.ratio_active_limit_spin)
form.addRow("废单比上限(%)", self.reject_limit_percent_spin)
form.addRow("撤单比上限(%)", self.cancel_limit_percent_spin)
form.addRow("激活成交/持仓比阈值(笔)", self.trade_hold_active_limit_spin)
form.addRow("成交/持仓比上限(%)", self.trade_hold_percent_limit_spin)
form.addRow(save_button)
self.setLayout(form)
# Set Fix Size
hint = self.sizeHint()
self.setFixedSize(hint.width() * 1.2, hint.height())
def save_setting(self):
""""""
active_text = self.active_combo.currentText()
if active_text == "启动":
active = True
else:
active = False
setting = {
"active": active,
"order_flow_limit": self.flow_limit_spin.value(),
"order_flow_clear": self.flow_clear_spin.value(),
"order_size_limit": self.size_limit_spin.value(),
"trade_limit": self.trade_limit_spin.value(),
"active_order_limit": self.active_limit_spin.value(),
"order_cancel_limit": self.cancel_limit_spin.value(),
"percent_limit": self.percent_limit_spin.value(),
"ratio_active_order_limit": self.ratio_active_limit_spin.value(),
"cancel_ratio_percent_limit": self.cancel_limit_percent_spin.value(),
"reject_ratio_percent_limit": self.reject_limit_percent_spin.value(),
"trade_hold_active_limit": self.trade_hold_active_limit_spin.value(),
"trade_hold_percent_limit": self.trade_hold_percent_limit_spin.value()
}
self.rm_engine.update_setting(setting)
self.rm_engine.save_setting()
self.close()
def update_setting(self):
""""""
setting = self.rm_engine.get_setting()
if setting["active"]:
self.active_combo.setCurrentIndex(1)
else:
self.active_combo.setCurrentIndex(0)
self.flow_limit_spin.setValue(setting["order_flow_limit"])
self.flow_clear_spin.setValue(setting["order_flow_clear"])
self.size_limit_spin.setValue(setting["order_size_limit"])
self.trade_limit_spin.setValue(setting["trade_limit"])
self.active_limit_spin.setValue(setting["active_order_limit"])
self.cancel_limit_spin.setValue(setting["order_cancel_limit"])
self.percent_limit_spin.setValue(setting.get('percent_limit', 100))
self.ratio_active_limit_spin.setValue(setting.get('ratio_active_order_limit', 500))
self.cancel_limit_percent_spin.setValue(setting.get('cancel_ratio_percent_limit', 90))
self.reject_limit_percent_spin.setValue(setting.get('reject_ratio_percent_limit', 90))
self.trade_hold_active_limit_spin.setValue(setting.get('trade_hold_active_limit', 1000))
self.trade_hold_percent_limit_spin.setValue(setting.get("trade_hold_percent_limit", 300))
def exec_(self):
""""""
self.update_setting()
super().exec_()
class RiskManagerSpinBox(QtWidgets.QSpinBox):
""""""
def __init__(self, value: int = 0):
""""""
super().__init__()
self.setMinimum(0)
self.setMaximum(1000000)
self.setValue(value)
| mit | 756,416,297,767,593,700 | 37.955224 | 97 | 0.624904 | false |
bt3gl/Numerical-Methods-for-Physics | others/ODES/verlet-converge.py | 1 | 3194 | # integrate the equations of motion of a pendulum, w/o the small angle
# approximation
import numpy
import pylab
import math
# global parameters
g = 9.81 # gravitational acceleration [m/s]
L = 9.81 # length of pendulum [m]
class pendulumHistory:
""" simple container to store the pendulum history """
def __init__(self):
self.t = None
self.theta = None
self.omega = None
def energy(self):
""" return the energy (per unit mass) """
return 0.5*L**2*self.omega**2 - g*L*numpy.cos(self.theta)
def rhs(theta, omega):
""" equations of motion for a pendulum
dtheta/dt = omega
domega/dt = - (g/L) sin theta """
return omega, -(g/L)*numpy.sin(theta)
def intVVerlet(theta0, dt, tmax, rhs):
""" integrate the equations of motion using Euler-Cromer """
# initial conditions
t = 0.0
theta = theta0
omega = 0.0 # at the maximum angle, the angular velocity is 0
# store the history for plotting
tPoints = [t]
thetaPoints = [theta]
omegaPoints = [omega]
while (t < tmax):
# get the RHS at time-level n
thetadot, omegadot = rhs(theta, omega)
thetanew = theta + dt*thetadot + 0.5*dt**2*omegadot
# get the RHS with the updated theta -- omega doesn't matter
# here, since we only need thetadot and omega doesn't affect
# that.
thetadot_np1, omegadot_np1 = rhs(thetanew, omega)
omeganew = omega + 0.5*dt*(omegadot + omegadot_np1)
t += dt
# store
tPoints.append(t)
thetaPoints.append(thetanew)
omegaPoints.append(omeganew)
# set for the next step
theta = thetanew; omega = omeganew
# return a pendulumHistory object with the trajectory
H = pendulumHistory()
H.t = numpy.array(tPoints)
H.theta = numpy.array(thetaPoints)
H.omega = numpy.array(omegaPoints)
return H
# 10 degree pendulum
theta0 = numpy.radians(10.0)
# period estimate
T = 2.0*math.pi*math.sqrt(L/g)*(1.0 + theta0**2/16.0)
print "period = ", T
tmax = 10.0*T
#dts = [0.5, 0.25, 0.125, 0.06125, 0.030625]
dts = [1, 0.1, 0.01, 0.001, 0.0001]
err = []
for dt in dts:
HVVerlet = intVVerlet(theta0, dt, tmax, rhs)
E = HVVerlet.energy()
err.append(abs(E[-1]-E[0])/abs(E[0]))
print dt, abs(E[-1]-E[0])/abs(E[-1])
pylab.plot(HVVerlet.t, HVVerlet.theta, label="dt = %f" % (dt))
leg = pylab.legend(frameon=False)
ltext = leg.get_texts()
pylab.setp(ltext, fontsize='small')
pylab.savefig("Verlet-theta-dt.png")
pylab.clf()
pylab.scatter(dts, err)
pylab.plot(numpy.array(dts), err[0]*(dts[0]/numpy.array(dts))**-2, label="2nd order scaling")
pylab.plot(numpy.array(dts), err[0]*(dts[0]/numpy.array(dts))**-4, ls=":", label="4th order scaling")
pylab.plot(numpy.array(dts), err[0]*(dts[0]/numpy.array(dts))**-6, ls="--", label="6th order scaling")
leg = pylab.legend(loc=2, frameon=False)
ltext = leg.get_texts()
pylab.setp(ltext, fontsize='small')
ax = pylab.gca()
ax.set_xscale('log')
ax.set_yscale('log')
pylab.xlim(0.00001, 1.0)
pylab.ylim(1.e-12, 1.e-1)
pylab.savefig("Verlet-E-converge.png")
| apache-2.0 | 320,792,559,437,235,260 | 21.814286 | 102 | 0.617095 | false |
realitix/vulk | vulk/graphic/d2/batch.py | 1 | 29169 | '''BaseBatch module
BaseBatch is used by SpriteBatch and BlockBatch.
'''
from abc import ABC, abstractmethod
from os import path
import math
from vulk import PATH_VULK_SHADER
from vulk import vulkanconstant as vc
from vulk import vulkanobject as vo
from vulk import vulkanutil as vu
from vulk.graphic import mesh as me
from vulk.graphic import uniform
from vulk.math.matrix import ProjectionMatrix, TransformationMatrix, Matrix4
class BaseBatch(ABC):
def __init__(self, context, size=1000, shaderprogram=None,
out_view=None):
"""Initialize BaseBatch
Args:
context (VulkContext)
size (int): Max number of blocks in one batch
shaderprogram (ShaderProgram): Custom shader program
clear (list[float]): 4 `float` (r,g,b,a) or `None`
out_view (ImageView): Out image view to render into
**Note: By default, `BaseBatch` doesn't clear `out_image`, you have
to fill `clear` to clear `out_image`**
**Note: By default, out image is the context `final_image`, you can
override this behavior with the `out_view` parameter**
"""
# ShaderProgram
if not shaderprogram:
shaderprogram = self.get_default_shaderprogram(context)
self.shaderprogram = shaderprogram
# Stored parameters
self.custom_out_view = out_view is not None
self.out_view = out_view if out_view else context.final_image_view
# Init rendering attributes
self.mesh = self.init_mesh(context, size)
self.init_indices(size)
self.uniformblock = self.init_uniform(context)
self.cbpool = self.init_commandpool(context)
self.descriptorpool = self.init_descriptorpool(context)
self.descriptorlayout = self.init_descriptorlayout(context)
self.pipelinelayout = self.init_pipelinelayout(context)
self.renderpass = self.init_renderpass(context)
self.pipeline = self.init_pipeline(context)
self.framebuffer = self.init_framebuffer(context)
# Others attributes
self.drawing = False
self.context = None
self.projection_matrix = ProjectionMatrix()
self.projection_matrix.to_orthographic_2d(
0, 0, context.width, context.height)
self.transform_matrix = TransformationMatrix()
self.combined_matrix = Matrix4()
self.idx = 0
self.matrices_dirty = True
self.reload_count = context.reload_count
@abstractmethod
def init_descriptorlayout(self, context):
"""Initialize and return descriptor layout
Args:
context (VulkContext)
Returns:
DescriptorSetLayout
"""
pass
def reload(self, context):
"""Reload the spritebatch
Args:
context (VulkContext)
"""
# Reload projection matrix
self.projection_matrix.to_orthographic_2d(
0, 0, context.width, context.height)
self.matrices_dirty = True
# Reload out view
if not self.custom_out_view:
self.out_view = context.final_image_view
# Reload renderpass, pipeline and framebuffer
self.renderpass.destroy(context)
self.renderpass = self.init_renderpass(context)
self.pipeline.destroy(context)
self.pipeline = self.init_pipeline(context)
self.framebuffer.destroy(context)
self.framebuffer = self.init_framebuffer(context)
# Update reload count
self.reload_count = context.reload_count
def init_indices(self, size):
'''Initialize mesh's indices.
It's done only at initialization for better performance.
*Parameters:*
- `size`: Number of blocks to handle
'''
j = 0
indices = self.mesh.indices_array
for i in range(0, size * 6, 6):
indices[i] = j
indices[i + 1] = j + 1
indices[i + 2] = j + 2
indices[i + 3] = j + 2
indices[i + 4] = j + 3
indices[i + 5] = j
j += 4
def init_uniform(self, context):
'''Initialize `BlockBatch` uniforms.
It contains only the `combined_matrix` but you can extend it to add
uniforms.
*Parameters:*
- `context`: `VulkContext`
'''
matrix_attribute = uniform.UniformAttribute(
uniform.UniformShapeType.MATRIX4,
vc.DataType.SFLOAT32)
uniform_attributes = uniform.UniformAttributes([matrix_attribute])
return uniform.UniformBlock(context, uniform_attributes)
def init_commandpool(self, context):
return vu.CommandBufferSynchronizedPool(context)
def init_renderpass(self, context):
'''Initialize `BlockBatch` renderpass
*Parameters:*
- `context`: `VulkContext`
'''
attachment = vo.AttachmentDescription(
self.out_view.image.format, vc.SampleCount.COUNT_1,
vc.AttachmentLoadOp.LOAD, vc.AttachmentStoreOp.STORE,
vc.AttachmentLoadOp.DONT_CARE,
vc.AttachmentStoreOp.DONT_CARE,
vc.ImageLayout.COLOR_ATTACHMENT_OPTIMAL,
vc.ImageLayout.COLOR_ATTACHMENT_OPTIMAL)
subpass = vo.SubpassDescription([vo.AttachmentReference(
0, vc.ImageLayout.COLOR_ATTACHMENT_OPTIMAL)],
[], [], [], [])
dependency = vo.SubpassDependency(
vc.SUBPASS_EXTERNAL,
vc.PipelineStage.COLOR_ATTACHMENT_OUTPUT, vc.Access.NONE, 0,
vc.PipelineStage.COLOR_ATTACHMENT_OUTPUT,
vc.Access.COLOR_ATTACHMENT_READ | vc.Access.COLOR_ATTACHMENT_WRITE
)
return vo.Renderpass(context, [attachment], [subpass], [dependency])
def init_pipelinelayout(self, context):
'''Initialize pipeline layout
*Parameters:*
- `context`: `VulkContext`
'''
return vo.PipelineLayout(context, [self.descriptorlayout])
def init_pipeline(self, context):
'''Initialize pipeline
Here we are to set the Vulkan pipeline.
*Parameters:*
- `context`: `VulkContext`
'''
# Vertex attribute
vertex_description = vo.VertexInputBindingDescription(
0, self.mesh.attributes.size, vc.VertexInputRate.VERTEX)
vk_attrs = []
for attr in self.mesh.attributes:
vk_attrs.append(vo.VertexInputAttributeDescription(
attr.location, 0, attr.format, attr.offset))
vertex_input = vo.PipelineVertexInputState(
[vertex_description], vk_attrs)
input_assembly = vo.PipelineInputAssemblyState(
vc.PrimitiveTopology.TRIANGLE_LIST)
# Viewport and Scissor
viewport = vo.Viewport(0, 0, context.width, context.height, 0, 1)
scissor = vo.Rect2D(vo.Offset2D(0, 0),
vo.Extent2D(context.width, context.height))
viewport_state = vo.PipelineViewportState([viewport], [scissor])
# Rasterization
rasterization = vo.PipelineRasterizationState(
False, vc.PolygonMode.FILL, 1, vc.CullMode.BACK,
vc.FrontFace.COUNTER_CLOCKWISE, 0, 0, 0)
# Disable multisampling
multisample = vo.PipelineMultisampleState(
False, vc.SampleCount.COUNT_1, 0)
# Disable depth
depth = None
# Enable blending
blend_attachment = vo.PipelineColorBlendAttachmentState(
True, vc.BlendFactor.SRC_ALPHA,
vc.BlendFactor.ONE_MINUS_SRC_ALPHA, vc.BlendOp.ADD,
vc.BlendFactor.SRC_ALPHA, vc.BlendFactor.ONE_MINUS_SRC_ALPHA,
vc.BlendOp.ADD, vc.ColorComponent.R | vc.ColorComponent.G | vc.ColorComponent.B | vc.ColorComponent.A # noqa
)
blend = vo.PipelineColorBlendState(
False, vc.LogicOp.COPY, [blend_attachment], [0, 0, 0, 0])
dynamic = None
return vo.Pipeline(
context, self.shaderprogram.stages, vertex_input, input_assembly,
viewport_state, rasterization, multisample, depth,
blend, dynamic, self.pipelinelayout, self.renderpass)
def init_framebuffer(self, context):
'''Create the framebuffer with the final_image (from context)
*Parameters:*
- `context`: `VulkContext`
'''
return vo.Framebuffer(
context, self.renderpass, [self.out_view],
context.width, context.height, 1)
def begin(self, context, semaphores=None):
'''Begin drawing sprites
*Parameters:*
- `context`: `VulkContext`
- `semaphore`: `list` of `Semaphore` to wait on before
starting all drawing operations
**Note: `context` is borrowed until `end` call**
'''
if self.drawing:
raise Exception("Currently drawing")
if self.reload_count != context.reload_count:
raise Exception("Batch not reloaded, can't draw")
if self.matrices_dirty:
self.upload_matrices(context)
self.drawing = True
# Keep the context only during rendering and release it at `end` call
self.context = context
self.cbpool.begin(context, semaphores)
def end(self):
'''End drawing of sprite
*Parameters:*
- `context`: `VulkContext`
*Returns:*
`Semaphore` signaled when all drawing operations in
`SpriteBatch` are finished
'''
if not self.drawing:
raise Exception("Not currently drawing")
self.flush()
self.drawing = False
self.context = None
return self.cbpool.end()
def upload_matrices(self, context):
'''
Compute combined matrix from transform and projection matrix.
Then upload combined matrix.
*Parameters:*
- `context`: `VulkContext`
'''
self.combined_matrix.set(self.projection_matrix)
self.combined_matrix.mul(self.transform_matrix)
self.uniformblock.set_uniform(0, self.combined_matrix.values)
self.uniformblock.upload(context)
self.matrices_dirty = False
def update_transform(self, matrix):
'''Update the transfrom matrix with `matrix`
*Parameters:*
- `matrix`: `Matrix4`
**Note: This function doesn't keep a reference to the matrix,
it only copies data**
'''
self.transform_matrix.set(matrix)
self.matrices_dirty = True
def update_projection(self, matrix):
'''Update the projection matrix with `matrix`
*Parameters:*
- `matrix`: `Matrix4`
**Note: This function doesn't keep a reference to the matrix,
it only copies data**
'''
self.projection_matrix.set(matrix)
self.matrices_dirty = True
class BlockProperty():
"""Allow to set properties for a draw call"""
def __init__(self):
"""
x, y: position
width, height: size
colors: color of each point (top-left, top-right, bot-right, bot-left)
scale: x and y scale
rotation: rotation in clockwise
"""
self.x = 0
self.y = 0
self.width = 0
self.height = 0
self.colors = [[1] * 4] * 4
self.scale = [1] * 2
self.rotation = 0
self.border_widths = [0] * 4
self.border_radius = [0] * 4
self.border_colors = [[1] * 4] * 4
class BlockBatch(BaseBatch):
"""
BlockBatch allows to batch lot of block (small and stylized quad) into
minimum of draw calls.
"""
def __init__(self, context, size=1000, shaderprogram=None,
out_view=None):
super().__init__(context, size, shaderprogram, out_view)
# Init rendering attributes
self.descriptorsets = self.init_descriptorsets(context)
def init_mesh(self, context, size):
'''Initialize the Mesh handling blocks
*Parameters:*
- `context`: `VulkContext`
- `size`: Number of blocks to handle
'''
vertex_attributes = me.VertexAttributes([
# Position
me.VertexAttribute(0, vc.Format.R32G32_SFLOAT),
# Texture UV
me.VertexAttribute(1, vc.Format.R32G32_SFLOAT),
# Color
me.VertexAttribute(2, vc.Format.R32G32B32A32_SFLOAT),
# Border widths
me.VertexAttribute(3, vc.Format.R32G32B32A32_SFLOAT),
# Border color (top)
me.VertexAttribute(4, vc.Format.R32G32B32A32_SFLOAT),
# Border color (right)
me.VertexAttribute(5, vc.Format.R32G32B32A32_SFLOAT),
# Border color (bottom)
me.VertexAttribute(6, vc.Format.R32G32B32A32_SFLOAT),
# Border color (left)
me.VertexAttribute(7, vc.Format.R32G32B32A32_SFLOAT),
# Border radius
me.VertexAttribute(8, vc.Format.R32G32B32A32_SFLOAT)
])
return me.Mesh(context, size * 4, size * 6, vertex_attributes)
def init_descriptorpool(self, context):
# Only 1 uniform buffer
size = 1
pool_sizes = [vo.DescriptorPoolSize(
vc.DescriptorType.UNIFORM_BUFFER, size)]
return vo.DescriptorPool(context, pool_sizes, size)
def init_descriptorlayout(self, context):
ubo_descriptor = vo.DescriptorSetLayoutBinding(
0, vc.DescriptorType.UNIFORM_BUFFER, 1,
vc.ShaderStage.VERTEX, None)
bindings = [ubo_descriptor]
return vo.DescriptorSetLayout(context, bindings)
def init_descriptorsets(self, context):
"""Create the descriptor set (for mat4)"""
descriptorsets = self.descriptorpool.allocate_descriptorsets(
context, 1, [self.descriptorlayout])
descriptorub_info = vo.DescriptorBufferInfo(
self.uniformblock.uniform_buffer.final_buffer, 0,
self.uniformblock.size)
descriptorub_write = vo.WriteDescriptorSet(
descriptorsets[0], 0, 0, vc.DescriptorType.UNIFORM_BUFFER,
[descriptorub_info])
vo.update_descriptorsets(context, [descriptorub_write], [])
return descriptorsets
def get_default_shaderprogram(self, context):
'''Generate a basic shader program if none given
*Parameters:*
- `context`: `VulkContext`
'''
vs = path.join(PATH_VULK_SHADER, "block.vs.glsl")
fs = path.join(PATH_VULK_SHADER, "block.fs.glsl")
shaders_mapping = {
vc.ShaderStage.VERTEX: vs,
vc.ShaderStage.FRAGMENT: fs
}
return vo.ShaderProgramGlslFile(context, shaders_mapping)
def flush(self):
'''Flush all draws to graphic card.
Currently, `flush` register and submit command.
*Parameters:*
- `context`: `VulkContext`
'''
if not self.idx:
return
if not self.drawing:
raise Exception("Not currently drawing")
# Upload mesh data
self.mesh.upload(self.context)
# Compute indices count
blocks_in_batch = self.idx / 4 # 4 idx per vertex
indices_count = int(blocks_in_batch) * 6
# Register commands
with self.cbpool.pull() as cmd:
width = self.context.width
height = self.context.height
cmd.begin_renderpass(
self.renderpass,
self.framebuffer,
vo.Rect2D(vo.Offset2D(0, 0),
vo.Extent2D(width, height)),
[]
)
cmd.bind_pipeline(self.pipeline)
self.mesh.bind(cmd)
cmd.bind_descriptor_sets(self.pipelinelayout, 0,
self.descriptorsets, [])
self.mesh.draw(cmd, 0, indices_count)
cmd.end_renderpass()
self.idx = 0
def draw(self, properties):
'''
Draw a block with `properties`
*Parameters:*
- `properties`: `BlockProperty`
'''
if not self.drawing:
raise Exception("Not currently drawing")
width = properties.width * properties.scale[0]
height = properties.height * properties.scale[1]
x = properties.x
y = properties.y
x2 = x + width
y2 = y + height
p1x, p2x, p3x, p4x = x, x, x2, x2
p1y, p2y, p3y, p4y = y, y2, y2, y
rotation = properties.rotation
if rotation:
cos = math.cos(rotation)
sin = math.sin(rotation)
# Set coordinates at origin to do a proper rotation
w1 = -width / 2
w2 = width / 2
h1 = -height / 2
h2 = height / 2
x1 = cos * w1 - sin * h1
y1 = sin * w1 + cos * h1
x2 = cos * w1 - sin * h2
y2 = sin * w1 + cos * h2
x3 = cos * w2 - sin * h2
y3 = sin * w2 + cos * h2
x4 = x1 + (x3 - x2)
y4 = y3 - (y2 - y1)
x1 += p1x
x2 += p1x
x3 += p1x
x4 += p1x
y1 += p1y
y2 += p1y
y3 += p1y
y4 += p1y
else:
x1, x2, x3, x4 = p1x, p2x, p3x, p4x
y1, y2, y3, y4 = p1y, p2y, p3y, p4y
c = properties.colors
bw = properties.border_widths
bct = properties.border_colors[0]
bcr = properties.border_colors[1]
bcb = properties.border_colors[2]
bcl = properties.border_colors[3]
br = properties.border_radius
for val in [([x1, y1], [0, 0], c[0], bw, bct, bcr, bcb, bcl, br),
([x2, y2], [0, 1], c[3], bw, bct, bcr, bcb, bcl, br),
([x3, y3], [1, 1], c[2], bw, bct, bcr, bcb, bcl, br),
([x4, y4], [1, 0], c[1], bw, bct, bcr, bcb, bcl, br)]:
self.mesh.set_vertex(self.idx, val)
self.idx += 1
class SpriteBatchDescriptorPool():
'''
Manage pool of descriptor sets dedicated to spritebatch textures.
Theses sets contain uniform buffer and texture.
'''
def __init__(self, descriptorpool, descriptorlayout):
self.descriptorsets = []
self.descriptorset_id = -1
self.descriptorpool = descriptorpool
self.descriptorlayout = descriptorlayout
def pull(self, context):
self.descriptorset_id += 1
try:
descriptorset = self.descriptorsets[self.descriptorset_id]
except IndexError:
descriptorset = self.descriptorpool.allocate_descriptorsets(
context, 1, [self.descriptorlayout])[0]
self.descriptorsets.append(descriptorset)
return descriptorset
def reset(self):
self.descriptorset_id = -1
class SpriteBatch(BaseBatch):
'''
SpriteBatch allows to batch lot of sprites (small quad) into minimum
of draw calls.
'''
def __init__(self, context, size=1000, shaderprogram=None,
out_view=None):
super().__init__(context, size, shaderprogram, out_view)
self.dspool = self.init_dspool()
self.last_texture = None
def init_mesh(self, context, size):
'''Initialize the Mesh handling sprites
*Parameters:*
- `context`: `VulkContext`
- `size`: Number of sprites to handle
'''
vertex_attributes = me.VertexAttributes([
# Position
me.VertexAttribute(0, vc.Format.R32G32_SFLOAT),
# Texture UV
me.VertexAttribute(1, vc.Format.R32G32_SFLOAT),
# Color
me.VertexAttribute(2, vc.Format.R32G32B32A32_SFLOAT)
])
return me.Mesh(context, size * 4, size * 6, vertex_attributes)
def init_descriptorpool(self, context):
'''Create the descriptor pool
*Parameters:*
- `context`: `VulkContext`
'''
size = 8
type_uniform = vc.DescriptorType.UNIFORM_BUFFER
type_sampler = vc.DescriptorType.COMBINED_IMAGE_SAMPLER
pool_sizes = [
vo.DescriptorPoolSize(type_uniform, size),
vo.DescriptorPoolSize(type_sampler, size)
]
return vo.DescriptorPool(context, pool_sizes, size)
def init_descriptorlayout(self, context):
'''Initialize descriptor layout for one uniform and one texture
*Parameters:*
- `context`: `VulkContext`
'''
ubo_descriptor = vo.DescriptorSetLayoutBinding(
0, vc.DescriptorType.UNIFORM_BUFFER, 1,
vc.ShaderStage.VERTEX, None)
texture_descriptor = vo.DescriptorSetLayoutBinding(
1, vc.DescriptorType.COMBINED_IMAGE_SAMPLER, 1,
vc.ShaderStage.FRAGMENT, None)
layout_bindings = [ubo_descriptor, texture_descriptor]
return vo.DescriptorSetLayout(context, layout_bindings)
def init_dspool(self):
return SpriteBatchDescriptorPool(self.descriptorpool,
self.descriptorlayout)
def get_default_shaderprogram(self, context):
'''Generate a basic shader program if nono given
*Parameters:*
- `context`: `VulkContext`
'''
vs = path.join(PATH_VULK_SHADER, "spritebatch.vs.glsl")
fs = path.join(PATH_VULK_SHADER, "spritebatch.fs.glsl")
shaders_mapping = {
vc.ShaderStage.VERTEX: vs,
vc.ShaderStage.FRAGMENT: fs
}
return vo.ShaderProgramGlslFile(context, shaders_mapping)
def get_descriptor(self, context, texture):
'''Update descriptor set containing texture
*Parameters:*
- `context`: `VulkContext`
- `texture`: `RawTexture` to update
'''
descriptorset = self.dspool.pull(context)
descriptorub_info = vo.DescriptorBufferInfo(
self.uniformblock.uniform_buffer.final_buffer, 0,
self.uniformblock.size)
descriptorub_write = vo.WriteDescriptorSet(
descriptorset, 0, 0, vc.DescriptorType.UNIFORM_BUFFER,
[descriptorub_info])
descriptorimage_info = vo.DescriptorImageInfo(
texture.sampler, texture.view,
vc.ImageLayout.SHADER_READ_ONLY_OPTIMAL)
descriptorimage_write = vo.WriteDescriptorSet(
descriptorset, 1, 0, vc.DescriptorType.COMBINED_IMAGE_SAMPLER,
[descriptorimage_info])
vo.update_descriptorsets(
context, [descriptorub_write, descriptorimage_write], [])
return descriptorset
def end(self):
semaphore = super().end()
self.dspool.reset()
return semaphore
def flush(self):
"""Flush all draws to graphic card
Currently, flush register and submit command.
Args:
context (VulkContext)
"""
if not self.idx:
return
if not self.drawing:
raise Exception("Not currently drawing")
# Upload mesh data
self.mesh.upload(self.context)
# Bind texture
descriptorset = self.get_descriptor(self.context, self.last_texture)
# Compute indices count
sprites_in_batch = self.idx / 4 # 4 idx per vertex
indices_count = int(sprites_in_batch) * 6
# Register commands
with self.cbpool.pull() as cmd:
width = self.context.width
height = self.context.height
cmd.begin_renderpass(
self.renderpass,
self.framebuffer,
vo.Rect2D(vo.Offset2D(0, 0),
vo.Extent2D(width, height)),
[]
)
cmd.bind_pipeline(self.pipeline)
self.mesh.bind(cmd)
cmd.bind_descriptor_sets(self.pipelinelayout, 0,
[descriptorset], [])
self.mesh.draw(cmd, 0, indices_count)
cmd.end_renderpass()
self.idx = 0
def draw(self, texture, x, y, width=0, height=0, u=0, v=0, u2=1, v2=1,
r=1, g=1, b=1, a=1, scale_x=1, scale_y=1, rotation=0):
'''
Draw `texture` at position x, y of size `width`, `height`
*Parameters:*
- `texture`: `RawTexture`
- `x`: X position
- `y`: Y position
- `width`: Width
- `heigth`: Height
- `u`: U texture coordinate
- `v`: V texture coordinate
- `r`: Red channel
- `g`: Green channel
- `b`: Blue channel
- `a`: Alpha channel
- `scale_x`: Scaling on x axis
- `scale_y`: Scaling on y axis
- `rotation`: Rotation in radian (clockwise)
**Note: if width and height are set to 0, we take the image size**
'''
if not self.drawing:
raise Exception("Not currently drawing")
if self.last_texture is not texture:
self.flush()
if not width and not height:
width = texture.width
height = texture.height
self.last_texture = texture
width *= scale_x
height *= scale_y
x2 = x + width
y2 = y + height
p1x, p2x, p3x, p4x = x, x, x2, x2
p1y, p2y, p3y, p4y = y, y2, y2, y
if rotation:
cos = math.cos(rotation)
sin = math.sin(rotation)
# Set coordinates at origin to do a proper rotation
w1 = -width / 2
w2 = width / 2
h1 = -height / 2
h2 = height / 2
x1 = cos * w1 - sin * h1
y1 = sin * w1 + cos * h1
x2 = cos * w1 - sin * h2
y2 = sin * w1 + cos * h2
x3 = cos * w2 - sin * h2
y3 = sin * w2 + cos * h2
x4 = cos * w2 - sin * h1
y4 = sin * w2 + cos * h1
x_abs = p1x + width / 2
y_abs = p1y + height / 2
x1 += x_abs
x2 += x_abs
x3 += x_abs
x4 += x_abs
y1 += y_abs
y2 += y_abs
y3 += y_abs
y4 += y_abs
else:
x1, x2, x3, x4 = p1x, p2x, p3x, p4x
y1, y2, y3, y4 = p1y, p2y, p3y, p4y
for val in [([x1, y1], [u, v], [r, g, b, a]),
([x2, y2], [u, v2], [r, g, b, a]),
([x3, y3], [u2, v2], [r, g, b, a]),
([x4, y4], [u2, v], [r, g, b, a])]:
self.mesh.set_vertex(self.idx, val)
self.idx += 1
def draw_region(self, region, x, y, width, height, r=1, g=1, b=1, a=1,
scale_x=1, scale_y=1, rotation=0):
'''
Draw `region` at position x, y of size `width`, `height`
*Parameters:*
- `region`: `TextureRegion`
- `x`: X position
- `y`: Y position
- `width`: Width
- `heigth`: Height
- `r`: Red channel
- `g`: Green channel
- `b`: Blue channel
- `a`: Alpha channel
- `scale_x`: Scaling on x axis
- `scale_y`: Scaling on y axis
- `rotation`: Rotation in radian (clockwise)
'''
u = region.u
v = region.v
u2 = region.u2
v2 = region.v2
self.draw(region.texture, x, y, width, height, u, v, u2, v2,
r, g, b, a, scale_x, scale_y, rotation)
class CharBatch(SpriteBatch):
"""CharBatch allows to batch chars into minimum of draw calls."""
def __init__(self, context, size=1000, shaderprogram=None,
out_view=None):
super().__init__(context, size, shaderprogram, out_view)
self.dspool = self.init_dspool()
self.last_texture = None
def get_default_shaderprogram(self, context):
'''Generate a basic shader program if nono given
*Parameters:*
- `context`: `VulkContext`
'''
vs = path.join(PATH_VULK_SHADER, "distancefieldfont.vs.glsl")
fs = path.join(PATH_VULK_SHADER, "distancefieldfont.fs.glsl")
shaders_mapping = {
vc.ShaderStage.VERTEX: vs,
vc.ShaderStage.FRAGMENT: fs
}
return vo.ShaderProgramGlslFile(context, shaders_mapping)
def draw_char(self, fontdata, char, x, y, r=1., g=1., b=1., a=1.,
scale_x=1., scale_y=1., rotation=0.):
"""Draw a char
Args:
fontdata (FontData): Data on font
char (str): One character to draw
size (float): Size
x (int): X position
y (int): Y position
r (float): Red channel
g (float): Green channel
b (float): Blue channel
a (float): Alpha channel
scale_x (float): Scaling on x axis
scale_y (float): Scaling on y axis
rotation (float): Rotation in radian (clockwise)
"""
region = fontdata.get_region(char)
width, height = fontdata.get_sizes(char)
super().draw_region(region, x, y, width, height, r, g,
b, a, scale_x, scale_y, rotation)
| apache-2.0 | -6,750,403,882,061,250,000 | 30.602384 | 120 | 0.56536 | false |
openstax/openstax-cms | salesforce/tests.py | 1 | 8541 | import vcr
import unittest
from django.conf import settings
from django.core.management import call_command
from django.test import LiveServerTestCase, TestCase
from six import StringIO
from django.core.exceptions import ValidationError
from salesforce.models import Adopter, SalesforceSettings, MapBoxDataset, Partner, AdoptionOpportunityRecord, PartnerReview
from salesforce.views import Salesforce
from salesforce.salesforce import Salesforce as SF
from salesforce.serializers import PartnerSerializer, AdoptionOpportunityRecordSerializer
from rest_framework import status
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
from wagtail.tests.utils import WagtailPageTests
class AdopterTest(TestCase):
def create_adopter(self, sales_id="123", name="test", description="test", website="https://rice.edu"):
return Adopter.objects.create(sales_id=sales_id, name=name, description=description, website=website)
def test_adopter_creation(self):
adopter = self.create_adopter()
self.assertTrue(isinstance(adopter, Adopter))
self.assertEqual(adopter.__str__(), adopter.name)
class PartnerTest(APITestCase, TestCase):
def setUp(self):
with vcr.use_cassette('fixtures/vcr_cassettes/partners.yaml'):
call_command('update_partners')
for partner in Partner.objects.all():
partner.visible_on_website = True
partner.save()
def test_did_update_partners(self):
self.assertGreater(Partner.objects.all().count(), 0)
def test_partners_api_get_all_partners(self):
response = self.client.get('/apps/cms/api/salesforce/partners/', format='json')
partners = Partner.objects.all()
serializer = PartnerSerializer(partners, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_partners_api_get_one_partner(self):
random_partner = Partner.objects.order_by("?").first()
response = self.client.get('/apps/cms/api/salesforce/partners/{}/'.format(random_partner.pk), format='json')
serializer = PartnerSerializer(random_partner)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_partners_invalid_partner(self):
invalid_partner_id = Partner.objects.order_by("id").last().id + 1
response = self.client.get('/apps/cms/api/salesforce/partners/{}/'.format(invalid_partner_id), format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_can_add_review(self):
review = PartnerReview.objects.create(partner=Partner.objects.first(),
rating=5,
review="This is a great resource.",
submitted_by_name="Test McTester",
submitted_by_account_id=2)
self.assertEqual(review.review, "This is a great resource.")
def test_partners_include_review_data(self):
random_partner = Partner.objects.order_by("?").first()
response = self.client.get('/apps/cms/api/salesforce/partners/{}/'.format(random_partner.pk), format='json')
self.assertIn('reviews', response.data)
self.assertIn('average_rating', response.data)
self.assertIn('rating_count', response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_all_partners_no_reviews(self):
response = self.client.get('/apps/cms/api/salesforce/partners/', format='json')
self.assertNotIn('reviews', response.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_can_only_submit_one_review_per_user(self):
random_partner = Partner.objects.order_by("?").first()
data = {"partner": random_partner.id, "rating": 4, "submitted_by_name": "Some User", "submitted_by_account_id": 2}
response = self.client.post('/apps/cms/api/salesforce/reviews/', data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
data = {"partner": random_partner.id, "rating": 4, "submitted_by_name": "Some User",
"submitted_by_account_id": 2}
response = self.client.post('/apps/cms/api/salesforce/reviews/', data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_can_delete_review(self):
review = PartnerReview.objects.create(
partner=Partner.objects.order_by("?").first(),
rating=5,
submitted_by_name="O. Staxly",
submitted_by_account_id=2
)
data = { "id": review.id }
response = self.client.delete('/apps/cms/api/salesforce/reviews/', data, format='json')
self.assertEqual(response.data['status'], 'Deleted')
class AdoptionOpportunityTest(APITestCase, TestCase):
def setUp(self):
with vcr.use_cassette('fixtures/vcr_cassettes/opportunities.yaml'):
call_command('update_opportunities')
def test_did_update_opportunities(self):
self.assertGreater(AdoptionOpportunityRecord.objects.all().count(), 0)
def test_get_opportunity(self):
random_adoption = AdoptionOpportunityRecord.objects.order_by("?").first()
response = self.client.get('/apps/cms/api/salesforce/renewal/{}/'.format(random_adoption.account_id), format='json')
self.assertEqual(response.status_code, 200)
def test_update_opportunity(self):
factory = APIRequestFactory()
random_adoption = AdoptionOpportunityRecord.objects.order_by("?").filter(verified=False).first()
initial_confirm_count = random_adoption.confirmed_yearly_students
data = {'confirmed_yearly_students': 1000, 'id': random_adoption.id}
response = self.client.post('/apps/cms/api/salesforce/renewal/{}/'.format(random_adoption.account_id), data, format='json')
self.assertEqual(response.status_code, 200)
updated_adoption = AdoptionOpportunityRecord.objects.get(id=random_adoption.id)
self.assertTrue(updated_adoption.verified)
self.assertNotEqual(updated_adoption.confirmed_yearly_students, initial_confirm_count)
self.assertEqual(updated_adoption.confirmed_yearly_students, 1000)
class SalesforceTest(LiveServerTestCase, WagtailPageTests):
def setUp(self):
super(WagtailPageTests, self).setUp()
super(LiveServerTestCase, self).setUp()
def create_salesforce_setting(self, username="test", password="test", security_token="test",
sandbox=True):
return SalesforceSettings.objects.create(username=username, password=password, security_token=security_token,
sandbox=sandbox)
def test_salesforce_setting_creation(self):
setting = self.create_salesforce_setting()
self.assertTrue(isinstance(setting, SalesforceSettings))
self.assertEqual(setting.__str__(), setting.username)
def test_can_only_create_one_instance(self):
setting1 = self.create_salesforce_setting()
with self.assertRaises(ValidationError):
self.create_salesforce_setting(username="test2", password="test2", security_token="test2", sandbox=False)
def test_database_query(self):
with vcr.use_cassette('fixtures/vcr_cassettes/contact.yaml'):
sf = SF()
contact_info = sf.query(
"SELECT Id FROM Contact")
self.assertIsNot(
contact_info, None)
def test_update_adopters_command(self):
out = StringIO()
with vcr.use_cassette('fixtures/vcr_cassettes/adopter.yaml'):
call_command('update_adopters', stdout=out)
self.assertIn("Success", out.getvalue())
def tearDown(self):
super(WagtailPageTests, self).tearDown()
super(LiveServerTestCase, self).tearDown()
class MapboxTest(TestCase):
def create_mapbox_setting(self, name="test", tileset_id="test", style_url="test"):
return MapBoxDataset.objects.create(name=name, tileset_id=tileset_id, style_url=style_url)
def test_mapbox_setting_creation(self):
setting = self.create_mapbox_setting()
self.assertTrue(isinstance(setting, MapBoxDataset))
self.assertEqual(setting.__str__(), setting.name)
| agpl-3.0 | -827,217,586,751,664,900 | 45.928571 | 131 | 0.671116 | false |
buck06191/BayesCMD | bayescmd/abc/dtaidistance/dtw_visualisation.py | 1 | 6156 | """
dtaidistance.dtw - Dynamic Time Warping
__author__ = "Wannes Meert"
__copyright__ = "Copyright 2016 KU Leuven, DTAI Research Group"
__license__ = "APL"
..
Part of the DTAI distance code.
Copyright 2016 KU Leuven, DTAI Research Group
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import logging
import math
import numpy as np
logger = logging.getLogger("be.kuleuven.dtai.distance")
dtaidistance_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir)
try:
from . import dtw_c
except ImportError:
# logger.info('C library not available')
dtw_c = None
try:
from tqdm import tqdm
except ImportError:
logger.info('tqdm library not available')
tqdm = None
def plot_warp(from_s, to_s, new_s, path, filename=None):
import matplotlib.pyplot as plt
import matplotlib as mpl
fig, ax = plt.subplots(nrows=3, ncols=1, sharex=True, sharey=True)
ax[0].plot(from_s, label="From")
ax[0].legend()
ax[1].plot(to_s, label="To")
ax[1].legend()
transFigure = fig.transFigure.inverted()
lines = []
line_options = {'linewidth': 0.5, 'color': 'orange', 'alpha': 0.8}
for r_c, c_c in path:
if r_c < 0 or c_c < 0:
continue
coord1 = transFigure.transform(ax[0].transData.transform([r_c, from_s[r_c]]))
coord2 = transFigure.transform(ax[1].transData.transform([c_c, to_s[c_c]]))
lines.append(mpl.lines.Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, **line_options))
ax[2].plot(new_s, label="From-warped")
ax[2].legend()
for i in range(len(to_s)):
coord1 = transFigure.transform(ax[1].transData.transform([i, to_s[i]]))
coord2 = transFigure.transform(ax[2].transData.transform([i, new_s[i]]))
lines.append(mpl.lines.Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, **line_options))
fig.lines = lines
if filename:
plt.savefig(filename)
return fig, ax
def plot_warping(s1, s2, path, filename=None):
import matplotlib.pyplot as plt
import matplotlib as mpl
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, sharey=True)
ax[0].plot(s1)
ax[1].plot(s2)
transFigure = fig.transFigure.inverted()
lines = []
line_options = {'linewidth': 0.5, 'color': 'orange', 'alpha': 0.8}
for r_c, c_c in path:
if r_c < 0 or c_c < 0:
continue
coord1 = transFigure.transform(ax[0].transData.transform([r_c, s1[r_c]]))
coord2 = transFigure.transform(ax[1].transData.transform([c_c, s2[c_c]]))
lines.append(mpl.lines.Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, **line_options))
fig.lines = lines
if filename:
plt.savefig(filename)
return fig, ax
def plot_warpingpaths(s1, s2, paths, best_path, filename=None, shownumbers=False):
"""Plot the series and the optimal path.
:param s1: Series 1
:param s2: Series 2
:param paths: Warping paths matrix
:param filename: Filename to write the image to
"""
from matplotlib import pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import FuncFormatter
ratio = max(len(s1), len(s2))
min_y = min(np.min(s1), np.min(s2))
max_y = max(np.max(s1), np.max(s2))
fig = plt.figure(figsize=(7.5, 10), frameon=True)
gs = gridspec.GridSpec(2, 2, wspace=1, hspace=1,
left=0, right=1.0, bottom=0, top=1.0,
height_ratios=[1, 6],
width_ratios=[1, 6])
max_s2_x = np.max(s2)
max_s2_y = len(s2)
max_s1_x = np.max(s1)
min_s1_x = np.min(s1)
max_s1_y = len(s1)
p = best_path
def format_fn2_x(tick_val, tick_pos):
return max_s2_x - tick_val
def format_fn2_y(tick_val, tick_pos):
return int(max_s2_y - tick_val)
ax0 = fig.add_subplot(gs[0, 0])
ax0.set_axis_off()
ax0.text(0, 0, "Dist = {:.4f}".format(paths[p[-1][0], p[-1][1]]))
ax0.xaxis.set_major_locator(plt.NullLocator())
ax0.yaxis.set_major_locator(plt.NullLocator())
ax1 = fig.add_subplot(gs[0, 1:])
ax1.set_ylim([min_y, max_y])
ax1.set_axis_off()
ax1.xaxis.tick_top()
# ax1.set_aspect(0.454)
ax1.plot(range(len(s2)), s2, ".-")
ax1.xaxis.set_major_locator(plt.NullLocator())
ax1.yaxis.set_major_locator(plt.NullLocator())
ax2 = fig.add_subplot(gs[1:, 0])
ax2.set_xlim([-max_y, -min_y])
ax2.set_axis_off()
# ax2.set_aspect(0.8)
# ax2.xaxis.set_major_formatter(FuncFormatter(format_fn2_x))
# ax2.yaxis.set_major_formatter(FuncFormatter(format_fn2_y))
ax2.xaxis.set_major_locator(plt.NullLocator())
ax2.yaxis.set_major_locator(plt.NullLocator())
ax2.plot(-s1, range(max_s1_y, 0, -1), ".-")
ax3 = fig.add_subplot(gs[1:, 1:])
# ax3.set_aspect(1)
ax3.matshow(paths[1:, 1:])
# ax3.grid(which='major', color='w', linestyle='-', linewidth=0)
# ax3.set_axis_off()
py, px = zip(*p)
ax3.plot(px, py, ".-", color="red")
# ax3.xaxis.set_major_locator(plt.NullLocator())
# ax3.yaxis.set_major_locator(plt.NullLocator())
for r in range(1, paths.shape[0]):
for c in range(1, paths.shape[1]):
ax3.text(c - 1, r - 1, "{:.2f}".format(paths[r, c]))
gs.tight_layout(fig, pad=1.0, h_pad=1.0, w_pad=1.0)
# fig.subplots_adjust(hspace=0, wspace=0)
ax = fig.axes
if filename:
plt.savefig(filename)
return fig, ax
| gpl-2.0 | -4,601,631,592,461,294,000 | 33.391061 | 86 | 0.613385 | false |
armab/st2 | st2actions/tests/unit/test_paramiko_remote_script_runner.py | 1 | 5238 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bson
from mock import patch, Mock, MagicMock
import unittest2
# XXX: There is an import dependency. Config needs to setup
# before importing remote_script_runner classes.
import st2tests.config as tests_config
tests_config.parse_args()
from st2common.util import jsonify
from st2actions.runners.remote_script_runner import ParamikoRemoteScriptRunner
from st2actions.runners.ssh.parallel_ssh import ParallelSSHClient
from st2common.exceptions.ssh import InvalidCredentialsException
from st2common.exceptions.ssh import NoHostsConnectedToException
from st2common.models.system.paramiko_script_action import ParamikoRemoteScriptAction
from st2common.constants.action import LIVEACTION_STATUS_FAILED
from st2tests.fixturesloader import FixturesLoader
__all__ = [
'ParamikoScriptRunnerTestCase'
]
FIXTURES_PACK = 'generic'
TEST_MODELS = {
'actions': ['a1.yaml']
}
MODELS = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
ACTION_1 = MODELS['actions']['a1.yaml']
class ParamikoScriptRunnerTestCase(unittest2.TestCase):
@patch('st2actions.runners.ssh.parallel_ssh.ParallelSSHClient', Mock)
@patch.object(jsonify, 'json_loads', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'run', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={}))
def test_cwd_used_correctly(self):
remote_action = ParamikoRemoteScriptAction(
'foo-script', bson.ObjectId(),
script_local_path_abs='/home/stanley/shiz_storm.py',
script_local_libs_path_abs=None,
named_args={}, positional_args=['blank space'], env_vars={},
on_behalf_user='svetlana', user='stanley',
private_key='---SOME RSA KEY---',
remote_dir='/tmp', hosts=['localhost'], cwd='/test/cwd/'
)
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner._parallel_ssh_client = ParallelSSHClient(['localhost'], 'stanley')
paramiko_runner._run_script_on_remote_host(remote_action)
exp_cmd = "cd /test/cwd/ && /tmp/shiz_storm.py 'blank space'"
ParallelSSHClient.run.assert_called_with(exp_cmd,
timeout=None)
@patch('st2actions.runners.ssh.parallel_ssh.ParallelSSHClient', Mock)
@patch.object(ParallelSSHClient, 'run', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={}))
def test_username_only_ssh(self):
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner.runner_parameters = {'username': 'test_user', 'hosts': 'localhost'}
self.assertRaises(InvalidCredentialsException, paramiko_runner.pre_run)
def test_username_invalid_private_key(self):
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner.runner_parameters = {
'username': 'test_user',
'hosts': 'localhost',
'private_key': 'invalid private key',
}
paramiko_runner.context = {}
self.assertRaises(NoHostsConnectedToException, paramiko_runner.pre_run)
@patch('st2actions.runners.ssh.parallel_ssh.ParallelSSHClient', Mock)
@patch.object(ParallelSSHClient, 'run', MagicMock(return_value={}))
@patch.object(ParallelSSHClient, 'connect', MagicMock(return_value={}))
def test_top_level_error_is_correctly_reported(self):
# Verify that a top-level error doesn't cause an exception to be thrown.
# In a top-level error case, result dict doesn't contain entry per host
paramiko_runner = ParamikoRemoteScriptRunner('runner_1')
paramiko_runner.runner_parameters = {
'username': 'test_user',
'hosts': 'localhost'
}
paramiko_runner.action = ACTION_1
paramiko_runner.liveaction_id = 'foo'
paramiko_runner.entry_point = 'foo'
paramiko_runner.context = {}
paramiko_runner._cwd = '/tmp'
paramiko_runner._copy_artifacts = Mock(side_effect=Exception('fail!'))
status, result, _ = paramiko_runner.run(action_parameters={})
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertEqual(result['failed'], True)
self.assertEqual(result['succeeded'], False)
self.assertTrue('Failed copying content to remote boxes' in result['error'])
| apache-2.0 | 2,243,674,468,910,821,400 | 45.353982 | 91 | 0.697213 | false |
kbase/catalog | lib/biokbase/catalog/registrar.py | 1 | 39288 | import codecs
import datetime
import json
import os
import pprint
import re
import shutil
import subprocess
import time
import traceback
from urllib.parse import urlparse
import semantic_version
import yaml
from docker import APIClient as DockerClient
from docker.tls import TLSConfig as DockerTLSConfig
from biokbase.catalog.local_function_reader import LocalFunctionReader
from biokbase.narrative_method_store.client import NarrativeMethodStore
class Registrar:
# params is passed in from the controller, should be the same as passed into the spec
# db is a reference to the Catalog DB interface (usually a MongoCatalogDBI instance)
def __init__(self, params, registration_id, timestamp, username, is_admin, token, db, temp_dir,
docker_base_url, docker_registry_host, nms_url, nms_admin_token, module_details,
ref_data_base, kbase_endpoint, prev_dev_version):
self.db = db
self.params = params
# at this point, we assume git_url has been checked
self.git_url = params['git_url']
self.registration_id = registration_id
self.timestamp = timestamp
self.username = username
self.is_admin = is_admin
self.token = token
self.db = db
self.temp_dir = temp_dir
self.docker_base_url = docker_base_url
self.docker_registry_host = docker_registry_host
self.nms_url = nms_url
self.nms = NarrativeMethodStore(self.nms_url, token=nms_admin_token)
self.local_function_reader = LocalFunctionReader()
# (most) of the mongo document for this module snapshot before this registration
self.module_details = module_details
self.log_buffer = []
self.last_log_time = time.time() # in seconds
self.log_interval = 1.0 # save log to mongo every second
self.ref_data_base = ref_data_base
self.kbase_endpoint = kbase_endpoint
self.prev_dev_version = prev_dev_version
def start_registration(self):
try:
self.logfile = codecs.open(f'{self.temp_dir}/registration.log.{self.registration_id}',
'w', 'utf-8')
self.log(f'Registration started on {datetime.datetime.now()} by {self.username}')
self.log('Registration ID: ' + str(self.registration_id))
self.log('Registration Parameters: ' + str(self.params))
##############################
# 1 - clone the repo into the temp directory that should already be reserved for us
self.set_build_step('cloning git repo')
if not os.path.isdir(os.path.join(self.temp_dir, self.registration_id)):
raise ('Directory for the git clone was not allocated! '
'This is an internal catalog server error, please report this problem.')
basedir = os.path.join(self.temp_dir, self.registration_id, 'module_repo')
parsed_url = urlparse(self.git_url)
self.log('Attempting to clone into: ' + basedir)
self.log('git clone ' + self.git_url)
subprocess.check_call(['git', 'clone', self.git_url, basedir])
# try to get hash from repo
git_commit_hash = subprocess.check_output(
['git', 'log', '--pretty=%H', '-n', '1'], cwd=basedir).decode().strip()
self.log('current commit hash at HEAD:' + git_commit_hash)
if 'git_commit_hash' in self.params:
if self.params['git_commit_hash']:
git_commit_hash = self.params['git_commit_hash'].strip()
self.log('git checkout ' + git_commit_hash)
subprocess.check_call(['git', 'checkout', '--quiet', git_commit_hash],
cwd=basedir)
# check if this was a git_commit_hash that was already released- if so, we abort for now (we could just update the dev tag in the future)
for r in self.module_details['release_version_list']:
if r['git_commit_hash'] == git_commit_hash:
raise ValueError('The specified commit is already released. '
'You cannot reregister that commit version or image.')
# do the same for beta versions for now
if self.module_details['current_versions'].get('beta') is not None:
if self.module_details['current_versions']['beta']['git_commit_hash'] == git_commit_hash:
raise ValueError('The specified commit is already registered and in beta. '
'You cannot reregister that commit version or image.')
##############################
# 2 - sanity check (things parse, files exist, module_name matches, etc)
self.set_build_step('reading files and performing basic checks')
self.sanity_checks_and_parse(basedir, git_commit_hash)
##############################
# 2.5 - dealing with git releases .git/config.lock, if it still exists after 5s then kill it
###### should no longer need this after switching to subprocess
# git_config_lock_file = os.path.join(basedir, ".git", "config.lock")
# if os.path.exists(git_config_lock_file):
# self.log('.git/config.lock exists, waiting 5s for it to release')
# time.sleep(5)
# if os.path.exists(git_config_lock_file):
# self.log('.git/config.lock file still there, we are just going to delete it....')
# os.remove(git_config_lock_file)
##############################
# 3 docker build - in progress
# perhaps make this a self attr?
module_name_lc = self.get_required_field_as_string(self.kb_yaml,
'module-name').strip().lower()
self.image_name = self.docker_registry_host + '/kbase:' + module_name_lc + '.' + str(
git_commit_hash)
ref_data_folder = None
ref_data_ver = None
compilation_report = None
if not Registrar._TEST_WITHOUT_DOCKER:
# timeout set to 24 hours because we often get timeouts if multiple people try to
# push at the same time
docker_timeout = 86400
if len(str(self.docker_base_url)) > 0:
dockerclient = DockerClient(base_url=str(self.docker_base_url),
timeout=docker_timeout)
else:
# docker base URL is not set in config, let's use Docker-related env-vars in this case
docker_host = os.environ['DOCKER_HOST']
if docker_host is None or len(docker_host) == 0:
raise ValueError('Docker host should be defined either in configuration '
'(docker-base-url property) or in DOCKER_HOST environment variable')
docker_tls_verify = os.environ['DOCKER_TLS_VERIFY']
if docker_host.startswith('tcp://'):
docker_protocol = "http"
if (docker_tls_verify is not None) and docker_tls_verify == '1':
docker_protocol = "https"
docker_host = docker_host.replace('tcp://', docker_protocol + '://')
docker_cert_path = os.environ['DOCKER_CERT_PATH']
docker_tls = False
if (docker_cert_path is not None) and len(docker_cert_path) > 0:
docker_tls = DockerTLSConfig(verify=False,
client_cert=(docker_cert_path + '/cert.pem',
docker_cert_path + '/key.pem'))
self.log(
"Docker settings from environment variables are used: docker-host = " + docker_host +
", docker_cert_path = " + str(docker_cert_path))
dockerclient = DockerClient(base_url=docker_host, timeout=docker_timeout,
version='auto', tls=docker_tls)
# look for docker image
# this tosses cookies if image doesn't exist, so wrap in try, and build if try reports "not found"
# self.log(str(dockerclient.inspect_image(repo_name)))
# if image does not exist, build and set state
self.set_build_step('building the docker image')
# imageId is not yet populated properly
imageId = self.build_docker_image(dockerclient, self.image_name, basedir)
# check if reference data version is defined in kbase.yml
if 'data-version' in self.kb_yaml:
ref_data_ver = str(self.kb_yaml['data-version']).strip()
if ref_data_ver:
ref_data_folder = module_name_lc
target_ref_data_dir = os.path.join(self.ref_data_base, ref_data_folder,
ref_data_ver)
if os.path.exists(target_ref_data_dir):
self.log(f"Reference data for {ref_data_folder}/{ref_data_ver} was "
f"already prepared, initialization step is skipped")
else:
self.set_build_step(
'preparing reference data (running init entry-point), ' +
'ref-data version: ' + ref_data_ver)
self.prepare_ref_data(dockerclient, self.image_name,
self.ref_data_base, ref_data_folder,
ref_data_ver, basedir, self.temp_dir,
self.registration_id,
self.token, self.kbase_endpoint)
self.set_build_step('preparing compilation report')
self.log('Preparing compilation report.')
# Trying to extract compilation report with line numbers of funcdefs from docker image.
# There is "report" entry-point command responsible for that. In case there are any
# errors we just skip it.
compilation_report = self.prepare_compilation_report(dockerclient, self.image_name,
basedir,
self.temp_dir,
self.registration_id,
self.token,
self.kbase_endpoint)
if compilation_report is None:
raise ValueError(
'Unable to generate a compilation report, which is now required, so your '
'registration cannot continue. If you have been successfully registering '
'this module already, this means that you may need to update to the latest'
' version of the KBase SDK and rebuild your makefile.')
self.local_function_reader.finish_validation(compilation_report)
self.log('Report complete')
self.set_build_step('pushing docker image to registry')
self.push_docker_image(dockerclient, self.image_name)
else:
self.log('IN TEST MODE!! SKIPPING DOCKER BUILD AND DOCKER REGISTRY UPDATE!!')
# 4 - Update the DB
self.set_build_step('updating the catalog')
self.update_the_catalog(basedir, ref_data_folder, ref_data_ver, compilation_report)
self.build_is_complete()
except Exception as e:
# set the build state to error and log it
self.set_build_error(str(e))
self.log(traceback.format_exc(), is_error=True)
self.log('BUILD_ERROR: ' + str(e), is_error=True)
if self.prev_dev_version:
self.log(f"Reverting dev version to git_commit_hash="
f"{self.prev_dev_version['git_commit_hash']}, version="
f"{self.prev_dev_version['version']}, git_commit_message="
f"{self.prev_dev_version['git_commit_message']}")
self.db.update_dev_version(self.prev_dev_version, git_url=self.git_url)
finally:
self.flush_log_to_db()
self.logfile.close()
self.cleanup()
def sanity_checks_and_parse(self, basedir, git_commit_hash):
# check that files exist
yaml_filename = 'kbase.yaml'
if not os.path.isfile(os.path.join(basedir, 'kbase.yaml')):
if not os.path.isfile(os.path.join(basedir, 'kbase.yml')):
raise ValueError('kbase.yaml file does not exist in repo, but is required!')
else:
yaml_filename = 'kbase.yml'
# parse some stuff, and check for things
with codecs.open(os.path.join(basedir, yaml_filename), 'r', "utf-8",
errors='ignore') as kb_yaml_file:
kb_yaml_string = kb_yaml_file.read()
self.kb_yaml = yaml.load(kb_yaml_string)
self.log('=====kbase.yaml parse:')
self.log(pprint.pformat(self.kb_yaml))
self.log('=====end kbase.yaml')
module_name = self.get_required_field_as_string(self.kb_yaml, 'module-name').strip()
module_description = self.get_required_field_as_string(self.kb_yaml,
'module-description').strip()
version = self.get_required_field_as_string(self.kb_yaml, 'module-version').strip()
# must be a semantic version
if not semantic_version.validate(version):
raise ValueError('Invalid version string in kbase.yaml - must be in semantic version '
'format. See http://semver.org')
service_language = self.get_required_field_as_string(self.kb_yaml,
'service-language').strip()
owners = self.get_required_field_as_list(self.kb_yaml, 'owners')
service_config = self.get_optional_field_as_dict(self.kb_yaml, 'service-config')
if service_config:
# validate service_config parameters
if 'dynamic-service' in service_config:
if not type(service_config['dynamic-service']) == type(True):
raise ValueError(
'Invalid service-config in kbase.yaml - "dynamic-service" property must be'
' a boolean "true" or "false".')
# module_name must match what exists (unless it is not yet defined)
if 'module_name' in self.module_details:
if self.module_details['module_name'] != module_name:
raise ValueError(
'kbase.yaml file module_name field has changed since last version! ' +
'Module names are permanent- if this is a problem, contact a kbase admin.')
else:
# This must be the first registration, so the module must not exist yet
self.check_that_module_name_is_valid(module_name)
# associate the module_name with the log file for easier searching (if we fail sooner, then the module name
# cannot be used to lookup this log)
self.db.set_build_log_module_name(self.registration_id, module_name)
# you can't remove yourself from the owners list, or register something that you are not an owner of
if self.username not in owners and self.is_admin is False:
raise ValueError(f'Your kbase username ({self.username}) must be in the owners list in'
f' the kbase.yaml file.')
# OPTIONAL TODO: check if all the users are on the owners list? not necessarily required, because we
# do a check during registration of the person who started the registration...
# TODO: check for directory structure, method spec format, documentation, version
self.validate_method_specs(basedir)
# initial validation of the local function specifications
lf_report = self.local_function_reader.parse_and_basic_validation(basedir,
self.module_details,
module_name, version,
git_commit_hash)
self.log(self.local_function_reader.report_to_string_for_log(lf_report))
if len(lf_report['functions_errored']) > 0:
raise ValueError('Errors exist in local function specifications.')
# return the parse so we can figure things out later
return self.kb_yaml
def check_that_module_name_is_valid(self, module_name):
if self.db.is_registered(module_name=module_name):
raise ValueError('Module name (in kbase.yaml) is already registered. '
'Please specify a different name and try again.')
if self.db.module_name_lc_exists(module_name_lc=module_name.lower()):
raise ValueError('The case-insensitive module name (in kbase.yaml) is not unique. '
'Please specify a different name.')
# only allow alphanumeric and underscore
if not re.match(r'^[A-Za-z0-9_]+$', module_name):
raise ValueError('Module names must be alphanumeric characters (including underscores)'
' only, with no spaces.')
def update_the_catalog(self, basedir, ref_data_folder, ref_data_ver, compilation_report):
# get the basic info that we need
commit_hash = subprocess.check_output(
['git', 'log', '--pretty=%H', '-n', '1'], cwd=basedir).decode().strip()
commit_message = subprocess.check_output(
['git', 'log', '--pretty=%B', '-n', '1'], cwd=basedir).decode().strip()
module_name = self.get_required_field_as_string(self.kb_yaml, 'module-name')
module_description = self.get_required_field_as_string(self.kb_yaml, 'module-description')
version = self.get_required_field_as_string(self.kb_yaml, 'module-version')
service_language = self.get_required_field_as_string(self.kb_yaml, 'service-language')
owners = self.get_required_field_as_list(self.kb_yaml, 'owners')
service_config = self.get_optional_field_as_dict(self.kb_yaml, 'service-config')
# first update the module name, which is now permanent, if we haven't already
if ('module_name' not in self.module_details) or (
'module_name_lc' not in self.module_details):
error = self.db.set_module_name(self.git_url, module_name)
if error is not None:
raise ValueError(
'Unable to set module_name - there was an internal database error.' + error)
# TODO: Could optimize by combining all these things into one mongo call, but for now this is easier.
# Combining it into one call would just mean that this update happens as a single transaction, but a partial
# update for now that fails midstream is probably not a huge issue- we can always reregister.
# next update the basic information
info = {
'description': module_description,
'language': service_language
}
if service_config and service_config['dynamic-service']:
info['dynamic_service'] = 1
else:
info['dynamic_service'] = 0
local_functions = self.local_function_reader.extract_lf_records()
if len(local_functions) > 0:
info['local_functions'] = 1
else:
info['local_functions'] = 0
self.log('new info: ' + pprint.pformat(info))
error = self.db.set_module_info(info, git_url=self.git_url)
if error is not None:
raise ValueError(
'Unable to set module info - there was an internal database error: ' + str(error))
# next update the owners
ownersListForUpdate = []
for o in owners:
# TODO: add some validation that the username is a valid kbase user
ownersListForUpdate.append({'kb_username': o})
self.log('new owners list: ' + pprint.pformat(ownersListForUpdate))
error = self.db.set_module_owners(ownersListForUpdate, git_url=self.git_url)
if error is not None:
raise ValueError(f'Unable to set module owners - '
f'there was an internal database error: {error}')
# finally update the actual dev version info
narrative_methods = []
if os.path.isdir(os.path.join(basedir, 'ui', 'narrative', 'methods')):
for m in os.listdir(os.path.join(basedir, 'ui', 'narrative', 'methods')):
if os.path.isdir(os.path.join(basedir, 'ui', 'narrative', 'methods', m)):
narrative_methods.append(m)
if len(local_functions) > 0:
self.log('Saving local function specs:')
self.log(pprint.pformat(local_functions))
error = self.db.save_local_function_specs(local_functions)
if error is not None:
raise ValueError(
'There was an error saving local function specs, DB says: ' + str(error))
new_version = {
'module_name': module_name.strip(),
'module_name_lc': module_name.strip().lower(),
'module_description': module_description,
'released': 0,
'released_timestamp': None,
'notes': '',
'timestamp': self.timestamp,
'registration_id': self.registration_id,
'version': version,
'git_commit_hash': commit_hash,
'git_commit_message': commit_message,
'narrative_methods': narrative_methods,
'local_functions': self.local_function_reader.extract_lf_names(),
'docker_img_name': self.image_name,
'compilation_report': compilation_report
}
if ref_data_ver:
new_version['data_folder'] = ref_data_folder
new_version['data_version'] = ref_data_ver
if service_config and service_config['dynamic-service']:
new_version['dynamic_service'] = 1
else:
new_version['dynamic_service'] = 0
self.log('new dev version object: ' + pprint.pformat(new_version))
error = self.db.update_dev_version(new_version)
if error is not None:
raise ValueError(f'Unable to update dev version - '
f'there was an internal database error: {error}')
# push to NMS
self.log('registering specs with NMS')
self.log("NMS: " + commit_hash)
self.nms.register_repo({'git_url': self.git_url, 'git_commit_hash': commit_hash})
self.log('\ndone')
# done!!!
def validate_method_specs(self, basedir):
self.log('validating narrative method specifications')
if os.path.isdir(os.path.join(basedir, 'ui', 'narrative', 'methods')):
for m in os.listdir(os.path.join(basedir, 'ui', 'narrative', 'methods')):
if os.path.isdir(os.path.join(basedir, 'ui', 'narrative', 'methods', m)):
self.log(' - validating method: ' + m)
# first grab the spec and display files, which are required
method_path = os.path.join(basedir, 'ui', 'narrative', 'methods', m)
if not os.path.isfile(os.path.join(method_path, 'spec.json')):
raise ValueError(f'Invalid narrative method specification ({m}): '
f'No spec.json file defined.')
if not os.path.isfile(os.path.join(method_path, 'display.yaml')):
raise ValueError(f'Invalid narrative method specification ({m}): '
f'No spec.json file defined.')
with codecs.open(os.path.join(method_path, 'spec.json'), 'r', "utf-8",
errors='ignore') as spec_json_file:
spec_json = spec_json_file.read()
with codecs.open(os.path.join(method_path, 'display.yaml'), 'r', "utf-8",
errors='ignore') as display_yaml_file:
display_yaml = display_yaml_file.read()
# gather any extra html files
extra_files = {}
for extra_file_name in os.listdir(os.path.join(method_path)):
if not os.path.isfile(os.path.join(method_path, extra_file_name)):
continue
if not extra_file_name.endswith('.html'):
continue
with codecs.open(os.path.join(method_path, extra_file_name), 'r', "utf-8",
errors='ignore') as extra_file:
extra_files[extra_file_name] = extra_file.read()
# validate against the NMS target endpoint
result = self.nms.validate_method(
{'id': m, 'spec_json': spec_json, 'display_yaml': display_yaml,
'extra_files': extra_files})
# inspect results
if result['is_valid'] > 0:
self.log(' - valid!')
if 'warnings' in result:
if result['warnings']:
for w in result['warnings']:
self.log(' - warning: ' + w)
else:
self.log(' - not valid!', is_error=True)
if 'errors' in result:
if result['errors']:
for e in result['errors']:
self.log(' - error: ' + e, is_error=True)
else:
self.log(' - error is undefined!', is_error=True)
raise ValueError('Invalid narrative method specification (' + m + ')')
else:
self.log(' - no ui/narrative/methods directory found, '
'so no narrative methods will be deployed')
def get_required_field_as_string(self, kb_yaml, field_name):
if field_name not in kb_yaml:
raise ValueError('kbase.yaml file missing "' + field_name + '" required field')
value = kb_yaml[field_name].strip()
if not value:
raise ValueError(
'kbase.yaml file missing value for "' + field_name + '" required field')
return value
def get_required_field_as_list(self, kb_yaml, field_name):
if field_name not in kb_yaml:
raise ValueError('kbase.yaml file missing "' + field_name + '" required field')
value = kb_yaml[field_name]
if not type(value) is list:
raise ValueError('kbase.yaml file "' + field_name + '" required field must be a list')
return value
def get_optional_field_as_dict(self, kb_yaml, field_name):
if field_name not in kb_yaml:
return None
value = kb_yaml[field_name]
if not type(value) is dict:
raise ValueError('kbase.yaml file "' + field_name + '" optional field must be a dict')
return value
def log(self, message, no_end_line=False, is_error=False):
if no_end_line:
content = message
else:
content = message + '\n'
self.logfile.write(content)
self.logfile.flush()
lines = content.splitlines()
for l in lines:
# add each line to the buffer
if len(l) > 1000:
l = l[0:1000] + ' ... truncated to 1k characters of ' + str(len(l))
self.log_buffer.append({'content': l + '\n', 'error': is_error})
# save the buffer to mongo if enough time has elapsed, or the buffer is more than 1000 lines
if (time.time() - self.last_log_time > self.log_interval) or (len(self.log_buffer) > 1000):
self.flush_log_to_db()
def flush_log_to_db(self):
# todo: if we lose log lines, that's ok. Make sure we handle case if log is larger than mongo doc size
self.db.append_to_build_log(self.registration_id, self.log_buffer)
self.log_buffer = [] # clear the buffer
self.last_log_time = time.time() # reset the log timer
def set_build_step(self, step):
self.db.set_module_registration_state(git_url=self.git_url, new_state='building: ' + step)
self.db.set_build_log_state(self.registration_id, 'building: ' + step)
def set_build_error(self, error_message):
self.db.set_module_registration_state(git_url=self.git_url, new_state='error',
error_message=error_message)
self.db.set_build_log_state(self.registration_id, 'error', error_message=error_message)
def build_is_complete(self):
self.db.set_module_registration_state(git_url=self.git_url, new_state='complete')
self.db.set_build_log_state(self.registration_id, 'complete')
def cleanup(self):
if os.path.isdir(os.path.join(self.temp_dir, self.registration_id)):
shutil.rmtree(os.path.join(self.temp_dir, self.registration_id))
def build_docker_image(self, docker_client, image_name, basedir):
self.log('\nBuilding the docker image for ' + image_name)
# examine stream to determine success/failure of build
imageId = None
for lines in docker_client.build(path=basedir, rm=True, tag=image_name, pull=True):
for line in lines.strip().splitlines():
line_parse = json.loads(line.strip())
if 'stream' in line_parse:
self.log(line_parse['stream'], no_end_line=True)
if 'errorDetail' in line_parse:
self.log(str(line_parse), no_end_line=True)
raise ValueError('Docker build failed: ' + str(line_parse['errorDetail']))
imageId = docker_client.inspect_image(image_name)['Id']
self.log('Docker build successful.')
self.log(' Image Id: ' + str(imageId))
self.log(' Image Name: ' + str(image_name) + '\n\n')
return imageId
def push_docker_image(self, docker_client, image_name):
self.log('\nPushing docker image to registry for ' + image_name)
colon_pos = image_name.rfind(
':') # This logic supports images with "host:port/" prefix for private registry
image = image_name[:colon_pos]
tag = image_name[colon_pos + 1:]
# response = [ line for line in docker_client.push(image, tag=tag, stream=True) ]
# response_stream = response
# self.log(str(response_stream))
# to do: examine stream to determine success/failure of build
for lines in docker_client.push(image, tag=tag, stream=True):
for line in lines.strip().splitlines():
# example line:
# '{"status":"Pushing","progressDetail":{"current":32,"total":32},"progress":"[==================================================\\u003e] 32 B/32 B","id":"da200da4256c"}'
line_parse = json.loads(line)
log_line = ''
if 'id' in line_parse:
log_line += line_parse['id'] + ' - '
if 'status' in line_parse:
log_line += line_parse['status']
if 'progress' in line_parse:
log_line += ' - ' + line_parse['progress']
# if 'progressDetail' in line_parse:
# self.log(' - ' + str(line_parse['progressDetail']),no_end_line=True)
# catch anything unexpected, we should probably throw an error here
for key in line_parse:
if key not in ['id', 'status', 'progress', 'progressDetail']:
log_line += '[' + key + '=' + str(line_parse[key]) + '] '
self.log(log_line)
if 'error' in line_parse:
self.log(str(line_parse), no_end_line=True)
raise ValueError('Docker push failed: ' + str(line_parse['error']))
self.log('done pushing docker image to registry for ' + image_name + '\n')
def run_docker_container(self, dockerclient, image_name, token,
kbase_endpoint, binds, work_dir, command, print_details=False):
cnt_id = None
try:
token_file = os.path.join(work_dir, "token")
with open(token_file, "w") as file:
file.write(token)
config_file = os.path.join(work_dir, "config.properties")
with open(config_file, "w") as file:
file.write("[global]\n" +
"job_service_url = " + kbase_endpoint + "/userandjobstate\n" +
"workspace_url = " + kbase_endpoint + "/ws\n" +
"shock_url = " + kbase_endpoint + "/shock-api\n" +
"kbase_endpoint = " + kbase_endpoint + "\n")
if not binds:
binds = {}
binds[work_dir] = {"bind": "/kb/module/work", "mode": "rw"}
container = dockerclient.create_container(image=image_name, command=command, tty=True,
host_config=dockerclient.create_host_config(
binds=binds))
cnt_id = container.get('Id')
self.log('Running "' + command + '" entry-point command, container Id=' + cnt_id)
if print_details:
self.log("Command details:")
self.log(" Image name: " + image_name)
self.log(" Binds: " + str(binds))
self.log(" KBase-endpoint: " + kbase_endpoint)
self.log(" Necessary files in '" + work_dir + "': 'token', 'config.properties'")
self.log(" Tty: True")
self.log(" Docker command: " + command)
dockerclient.start(container=cnt_id)
stream = dockerclient.logs(container=cnt_id, stdout=True, stderr=True, stream=True)
line = []
for char in stream:
try:
char = char.decode('utf-8')
except AttributeError:
pass
if char == '\r':
continue
if char == '\n':
self.log(''.join(line))
line = []
else:
line.append(char)
if len(line) > 0:
self.log(''.join(line))
finally:
# cleaning up the container
try:
if cnt_id:
dockerclient.remove_container(container=cnt_id, v=True, force=True)
self.log("Docker container (Id=" + cnt_id + ") was cleaned up")
except:
pass
def prepare_ref_data(self, dockerclient, image_name, ref_data_base, ref_data_folder,
ref_data_ver, basedir, temp_dir, registration_id, token, kbase_endpoint):
self.log('\nReference data: creating docker container for initialization')
if not os.path.exists(ref_data_base):
raise ValueError("Reference data network folder doesn't exist: " + ref_data_base)
upper_target_dir = os.path.join(ref_data_base, ref_data_folder)
if not os.path.exists(upper_target_dir):
os.mkdir(upper_target_dir)
temp_ref_data_dir = os.path.join(upper_target_dir, "temp_" + registration_id)
try:
repo_data_dir = os.path.join(basedir, "data")
os.mkdir(temp_ref_data_dir)
binds = {temp_ref_data_dir: {"bind": "/data", "mode": "rw"},
repo_data_dir: {"bind": "/kb/module/data", "mode": "rw"}}
temp_work_dir = os.path.join(temp_dir, registration_id, 'ref_data_workdir')
os.mkdir(temp_work_dir)
self.run_docker_container(dockerclient, image_name, token, kbase_endpoint,
binds, temp_work_dir, 'init', print_details=True)
ready_file = os.path.join(temp_ref_data_dir, "__READY__")
if os.path.exists(ready_file):
target_dir = os.path.join(upper_target_dir, ref_data_ver)
os.rename(temp_ref_data_dir, target_dir)
self.log("Reference data was successfully deployed into " + target_dir)
else:
raise ValueError(
"__READY__ file is not detected in reference data folder, produced data will be discarded")
finally:
# cleaning up temporary ref-data (if not renamed into permanent after success)
try:
if os.path.exists(temp_ref_data_dir):
shutil.rmtree(temp_ref_data_dir)
except:
pass
def prepare_compilation_report(self, dockerclient, image_name, basedir, temp_dir,
registration_id, token, kbase_endpoint):
self.log('\nCompilation report: creating docker container')
try:
temp_work_dir = os.path.join(temp_dir, registration_id, 'report_workdir')
os.mkdir(temp_work_dir)
self.run_docker_container(dockerclient, image_name, token, kbase_endpoint,
None, temp_work_dir, 'report')
report_file = os.path.join(temp_work_dir, 'compile_report.json')
if not os.path.exists(report_file):
self.log("Report file doesn't exist: " + report_file)
return None
else:
with codecs.open(report_file, 'r', 'utf-8', errors='ignore') as f:
return json.load(f)
except Exception as e:
self.log("Error preparing compilation log: " + str(e))
return None
# Temporary flags to test everything except docker
# we should remove once the test rig can fully support docker and an NMS
_TEST_WITHOUT_DOCKER = False
| mit | 7,257,920,725,435,046,000 | 51.73557 | 190 | 0.542481 | false |
EricssonResearch/calvin-base | calvin/runtime/south/storage/twistedimpl/securedht/tests/test_secappend.py | 1 | 9557 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sys
import os
import traceback
import random
import time
import json
import Queue
from twisted.application import service, internet
from twisted.python.log import ILogObserver
from twisted.internet import reactor, task, defer, threads
from threading import Thread
from kademlia import log
from calvin.runtime.south.storage.twistedimpl.securedht.append_server import AppendServer
# _log = get_logger(__name__)
class KNet(object):
def __init__(self, number, server_type=AppendServer):
self.nodes = []
self.boot_strap = None
if not reactor.running:
print "Starting reactor only once"
self.reactor_thread = Thread(target=reactor.run, args=(False,)).start()
for a in xrange(number):
self.nodes.append(ServerApp(server_type))
def start(self):
bootstrap = []
for a in self.nodes:
port, kserver = a.start(0, bootstrap)
if len(bootstrap) < 100:
bootstrap.append(("127.0.0.1", port))
# Wait for them to start
time.sleep(.8)
def stop(self):
for node in self.nodes:
node.stop()
self.nodes = []
time.sleep(1)
def get_rand_node(self):
index = random.randint(0, max(0, len(self.nodes) - 1))
return self.nodes[index]
class ServerApp(object):
def __init__(self, server_type):
self.server_type = server_type
def start(self, port=0, boot_strap=[]):
self.kserver = self.server_type()
self.kserver.bootstrap(boot_strap)
self.port = threads.blockingCallFromThread(reactor, reactor.listenUDP, port, self.kserver.protocol)
print "Starting server:", self.port
time.sleep(.2)
return self.port.getHost().port, self.kserver
def call(self, func, *args, **kwargs):
reactor.callFromThread(func, *args, **kwargs)
def __getattr__(self, name):
class caller:
def __init__(self, f, func):
self.f = f
self.func = func
def __call__(self, *args, **kwargs):
# _log.debug("Calling %s(%s, %s, %s)" %(self.f, self.func, args, kwargs))
return self.func(*args, **kwargs)
if hasattr(self.kserver, name) and callable(getattr(self.kserver, name)):
return caller(self.call, getattr(self.kserver, name))
else:
# Default behaviour
raise AttributeError
def get_port(self):
return self.port
def stop(self):
result = threads.blockingCallFromThread(reactor, self.port.stopListening)
def normal_test(match):
def test(obj):
if obj != match:
print("%s != %s" % (repr(obj), repr(match)))
return obj == match
return test
def json_test(match):
try:
jmatch = json.loads(match)
except:
print("Not JSON in json test!!!")
return False
def test(obj):
try:
jobj = json.loads(obj)
except:
print("Not JSON in json test!!!")
return False
if jobj != jmatch and not isinstance(jobj, list) and not isinstance(jmatch, list):
print("%s != %s" % (repr(jobj), repr(jmatch)))
if isinstance(jobj, list) and isinstance(jmatch, list):
return set(jobj) == set(jmatch)
return jobj == jmatch
return test
def do_sync(func, **kwargs):
test = None
timeout = .2
if 'timeout' in kwargs:
timeout = kwargs.pop('timeout')
if 'test' in kwargs:
test = kwargs.pop('test')
q = Queue.Queue()
def respond(value):
q.put(value)
d = func(**kwargs)
d.addCallback(respond)
try:
a = q.get(timeout=timeout)
except Queue.Empty:
assert False
if test is not None:
assert test(a)
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
def fin():
reactor.callFromThread(reactor.stop)
request.addfinalizer(fin)
print "hejsan"
@pytest.mark.slow
class TestKAppend(object):
test_nodes = 20
def test_append(self, monkeypatch):
a = KNet(self.test_nodes)
a.start()
try:
item = ["apa"]
test_str = json.dumps(item)
# set(["apa"])
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().append, key="kalas", value=test_str, test=normal_test(True))
match_str = json.dumps(item)
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "elefant", "tiger"])
test_str2 = json.dumps(["elefant", "tiger"])
do_sync(a.get_rand_node().append, key="kalas", value=test_str2, test=normal_test(True))
match_str = json.dumps(["apa", "elefant", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "tiger"])
test_str3 = json.dumps(["elefant"])
do_sync(a.get_rand_node().remove, key="kalas", value=test_str3, test=normal_test(True))
match_str = json.dumps(["apa", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "elefant", "tiger"])
test_str2 = json.dumps(["elefant", "tiger"])
do_sync(a.get_rand_node().append, key="kalas", value=test_str2, test=normal_test(True))
match_str = json.dumps(["apa", "elefant", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
# set(["apa", "elefant", "tiger"])
test_str4 = json.dumps(["lejon"])
do_sync(a.get_rand_node().remove, key="kalas", value=test_str4, test=normal_test(True))
match_str = json.dumps(["apa", "elefant", "tiger"])
do_sync(a.get_rand_node().get_concat, key="kalas", test=json_test(match_str))
match_str = json.dumps(item)
do_sync(a.get_rand_node().set, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=json_test(match_str))
# Should fail
do_sync(a.get_rand_node().append, key="kalas", value="apa", test=normal_test(False))
do_sync(a.get_rand_node().set, key="kalas", value="apa", test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
# Should fail
do_sync(a.get_rand_node().append, key="kalas", value="apa", test=normal_test(False))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
finally:
import traceback
traceback.print_exc()
a.stop()
def test_set(self, monkeypatch):
a = KNet(self.test_nodes)
a.start()
try:
do_sync(a.get_rand_node().set, key="kalas", value="apa", test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
for _ in range(10):
test_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
do_sync(a.get_rand_node().set, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test(test_str))
finally:
a.stop()
def test_delete(self, monkeypatch):
a = KNet(self.test_nodes)
a.start()
try:
# Make the nodes know each other
for _ in range(10):
key_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
test_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
do_sync(a.get_rand_node().set, key=key_str, value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key=key_str, test=normal_test(test_str))
do_sync(a.get_rand_node().set, key="kalas", value="apa", test=normal_test(True))
time.sleep(.7)
do_sync(a.get_rand_node().get, key="kalas", test=normal_test("apa"))
for _ in range(3):
test_str = '%030x' % random.randrange(16 ** random.randint(1, 2000))
do_sync(a.get_rand_node().set, key="kalas", value=test_str, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test(test_str))
do_sync(a.get_rand_node().set, key="kalas", value=None, test=normal_test(True))
do_sync(a.get_rand_node().get, key="kalas", test=normal_test(None))
finally:
a.stop()
| apache-2.0 | -8,205,857,815,014,780,000 | 32.533333 | 107 | 0.578529 | false |
smallyear/linuxLearn | salt/salt/beacons/__init__.py | 1 | 8936 | # -*- coding: utf-8 -*-
'''
This package contains the loader modules for the salt streams system
'''
# Import Python libs
from __future__ import absolute_import
import logging
import copy
import re
# Import Salt libs
import salt.loader
import salt.utils
import salt.utils.minion
log = logging.getLogger(__name__)
class Beacon(object):
'''
This class is used to eveluate and execute on the beacon system
'''
def __init__(self, opts, functions):
self.opts = opts
self.beacons = salt.loader.beacons(opts, functions)
self.interval_map = dict()
def process(self, config):
'''
Process the configured beacons
The config must be a dict and looks like this in yaml
code_block:: yaml
beacons:
inotify:
/etc/fstab: {}
/var/cache/foo: {}
'''
ret = []
b_config = copy.deepcopy(config)
if 'enabled' in b_config and not b_config['enabled']:
return
for mod in config:
if mod == 'enabled':
continue
if 'enabled' in config[mod] and not config[mod]['enabled']:
log.trace('Beacon {0} disabled'.format(mod))
continue
elif 'enabled' in config[mod] and config[mod]['enabled']:
# remove 'enabled' item before processing the beacon
del config[mod]['enabled']
log.trace('Beacon processing: {0}'.format(mod))
fun_str = '{0}.beacon'.format(mod)
if fun_str in self.beacons:
interval = self._determine_beacon_config(mod, 'interval', b_config)
if interval:
b_config = self._trim_config(b_config, mod, 'interval')
if not self._process_interval(mod, interval):
log.trace('Skipping beacon {0}. Interval not reached.'.format(mod))
continue
if self._determine_beacon_config(mod, 'disable_during_state_run', b_config):
log.trace('Evaluting if beacon {0} should be skipped due to a state run.'.format(mod))
b_config = self._trim_config(b_config, mod, 'disable_during_state_run')
is_running = False
running_jobs = salt.utils.minion.running(self.opts)
for job in running_jobs:
if re.match('state.*', job['fun']):
is_running = True
if is_running:
log.info('Skipping beacon {0}. State run in progress.'.format(mod))
continue
# Fire the beacon!
raw = self.beacons[fun_str](b_config[mod])
for data in raw:
tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod)
if 'tag' in data:
tag += data.pop('tag')
if 'id' not in data:
data['id'] = self.opts['id']
ret.append({'tag': tag, 'data': data})
else:
log.debug('Unable to process beacon {0}'.format(mod))
return ret
def _trim_config(self, b_config, mod, key):
'''
Take a beacon configuration and strip out the interval bits
'''
if isinstance(b_config[mod], list):
b_config[mod].remove(b_config[0])
elif isinstance(b_config[mod], dict):
b_config[mod].pop(key)
return b_config
def _determine_beacon_config(self, mod, val, config_mod):
'''
Process a beacon configuration to determine its interval
'''
if isinstance(config_mod, list):
config = None
val_config = [arg for arg in config_mod if val in arg]
if val_config:
config = val_config[0][val]
elif isinstance(config_mod, dict):
config = config_mod[mod].get(val, False)
return config
def _process_interval(self, mod, interval):
'''
Process beacons with intervals
Return True if a beacon should be run on this loop
'''
log.trace('Processing interval {0} for beacon mod {1}'.format(interval, mod))
loop_interval = self.opts['loop_interval']
if mod in self.interval_map:
log.trace('Processing interval in map')
counter = self.interval_map[mod]
log.trace('Interval counter: {0}'.format(counter))
if counter * loop_interval >= interval:
self.interval_map[mod] = 1
return True
else:
self.interval_map[mod] += 1
else:
log.trace('Interval process inserting mod: {0}'.format(mod))
self.interval_map[mod] = 1
return False
def list_beacons(self):
'''
List the beacon items
'''
# Fire the complete event back along with the list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacons_list_complete')
return True
def add_beacon(self, name, beacon_data):
'''
Add a beacon item
'''
data = {}
data[name] = beacon_data
if name in self.opts['beacons']:
log.info('Updating settings for beacon '
'item: {0}'.format(name))
else:
log.info('Added new beacon item {0}'.format(name))
self.opts['beacons'].update(data)
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_add_complete')
return True
def modify_beacon(self, name, beacon_data):
'''
Modify a beacon item
'''
data = {}
data[name] = beacon_data
log.info('Updating settings for beacon '
'item: {0}'.format(name))
self.opts['beacons'].update(data)
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_modify_complete')
return True
def delete_beacon(self, name):
'''
Delete a beacon item
'''
if name in self.opts['beacons']:
log.info('Deleting beacon item {0}'.format(name))
del self.opts['beacons'][name]
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_delete_complete')
return True
def enable_beacons(self):
'''
Enable beacons
'''
self.opts['beacons']['enabled'] = True
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacons_enabled_complete')
return True
def disable_beacons(self):
'''
Enable beacons
'''
self.opts['beacons']['enabled'] = False
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacons_disabled_complete')
return True
def enable_beacon(self, name):
'''
Enable a beacon
'''
self.opts['beacons'][name]['enabled'] = True
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_enabled_complete')
return True
def disable_beacon(self, name):
'''
Disable a beacon
'''
self.opts['beacons'][name]['enabled'] = False
# Fire the complete event back along with updated list of beacons
evt = salt.utils.event.get_event('minion', opts=self.opts)
evt.fire_event({'complete': True, 'beacons': self.opts['beacons']},
tag='/salt/minion/minion_beacon_disabled_complete')
return True
| apache-2.0 | 3,378,578,304,264,113,000 | 34.601594 | 106 | 0.543868 | false |
adrientetar/robofab | Lib/ufoLib/plistlib.py | 1 | 15077 | """plistlib.py -- a tool to generate and parse MacOSX .plist files.
The PropertList (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in strings, you can use readPlistFromString()
and writePlistToString().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries, Data or datetime.datetime objects. String values (including
dictionary keys) may be unicode strings -- they will be written out as
UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python string.
Generate Plist example:
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict=dict(
anotherString="<hello & hi there!>",
aUnicodeValue=u'M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
),
someData = Data("<binary gunk>"),
someMoreData = Data("<lots of binary gunk>" * 10),
aDate = datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# unicode keys are possible, but a little awkward to use:
pl[u'\xc5benraa'] = "That was a unicode key."
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromString", "writePlistToString",
"readPlistFromResource", "writePlistToResource",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
from io import StringIO
import re
try:
from datetime import datetime
except ImportError:
# We're running on Python < 2.3, we don't support dates here,
# yet we provide a stub class so type dispatching works.
class datetime(object):
def __init__(self, *args, **kwargs):
raise ValueError("datetime is not supported")
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = 0
if isinstance(pathOrFile, (bytes, str)):
pathOrFile = open(pathOrFile, "rb")
didOpen = 1
p = PlistParser()
rootObject = p.parse(pathOrFile)
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = 0
if isinstance(pathOrFile, (bytes, str)):
pathOrFile = open(pathOrFile, "wb")
didOpen = 1
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
if didOpen:
pathOrFile.close()
def readPlistFromString(data):
"""Read a plist data from a string. Return the root object.
"""
return readPlist(StringIO(data))
def writePlistToString(rootObject):
"""Return 'rootObject' as a plist-formatted string.
"""
f = StringIO()
writePlist(rootObject, f)
return f.getvalue()
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData)
def writePlistToResource(rootObject, path, restype='plst', resid=0):
"""Write 'rootObject' as a plst resource to the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdWrPerm
from Carbon import Res
plistData = writePlistToString(rootObject)
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdWrPerm)
Res.UseResFile(resNum)
try:
Res.Get1Resource(restype, resid).RemoveResource()
except Res.Error:
pass
res = Res.Resource(plistData)
res.AddResource(restype, resid, '')
res.WriteResource()
Res.CloseResFile(resNum)
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escapeAndEncode(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
self.file.write(self.indentLevel * self.indent + line + "\n")
else:
self.file.write("\n")
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z")
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escapeAndEncode(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text
PLISTHEADER = """\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
class PlistWriter(DumbXMLWriter):
def __init__(self, file, indentLevel=0, indent="\t", writeHeader=1):
if writeHeader:
file.write(PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
def writeValue(self, value):
if isinstance(value, (bytes, str)):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, int):
self.simpleElement("integer", str(value))
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
else:
raise TypeError("unsuported type: %s" % type(value))
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = 76 - len(self.indent.replace("\t", " " * 8) *
self.indentLevel)
for line in data.asBase64(maxlinelength).split("\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
self.beginElement("dict")
items = d.items()
for key, value in sorted(items):
if not isinstance(key, (bytes, str)):
raise TypeError("keys must be strings")
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
def writeArray(self, array):
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError(attr)
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError(attr)
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", PendingDeprecationWarning)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
PendingDeprecationWarning)
super(Dict, self).__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", PendingDeprecationWarning)
super(Plist, self).__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodestring(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
self.data = data
def fromBase64(cls, data):
# base64.decodestring just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
fromBase64 = classmethod(fromBase64)
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __cmp__(self, other):
if isinstance(other, self.__class__):
return cmp(self.data, other.data)
elif isinstance(other, str):
return cmp(self.data, other)
else:
return cmp(id(self), id(other))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self.handleBeginElement
parser.EndElementHandler = self.handleEndElement
parser.CharacterDataHandler = self.handleData
parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
self.stack[-1].append(value)
def getData(self):
data = "".join(self.data)
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
self.stack.pop()
def end_key(self):
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData()))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
| bsd-3-clause | 7,137,606,865,641,836,000 | 30.942797 | 152 | 0.616568 | false |
ngannguyen/aimseqtk | lib/sample.py | 1 | 23967 | #Copyright (C) 2013 by Ngan Nguyen
#
#Released under the MIT license, see LICENSE.txt
'''
Object represents a TCR repertoire sample
'''
import os
import random
import copy
import time
import gzip
import cPickle as pickle
from jobTree.scriptTree.target import Target
from sonLib.bioio import system
from sonLib.bioio import logger
import aimseqtk.lib.clone as libclone
import aimseqtk.lib.common as libcommon
class Sample():
'''Represents a sample
'''
def __init__(self, name, clones=None, group=None, color=(0, 0, 0),
marker='.'):
self.name = name
if clones is None:
self.clones = []
else:
self.clones = clones
self.group = group
self.size = 0 # total sequence (read) count
self.size = sum([clone.count for clone in self.clones])
self.numclone = len(self.clones)
self.color = color
self.marker = marker
def __lt__(self, other):
return self.size < other.size
def __le__(self, other):
return self.size <= other.size
def __eq__(self, other):
return self.size == other.size
def __ne__(self, other):
return self.size != other.size
def __gt__(self, other):
return self.size > other.size
def __ge__(self, other):
return self.size >= other.size
def getitems(self):
return self.__dict__.keys()
def __getitem__(self, name):
if name not in self.__dict__:
return None
return self.__dict__[name]
def __setitem__(self, name, val):
self.__dict__[name] = val
def addclone(self, clone):
if clone is not None:
self.clones.append(clone)
self.size = self.size + clone.count
self.numclone = self.numclone + 1
def addclones(self, clones):
count = sum([clone.count for clone in clones])
self.clones.extend(clones)
self.size += count
self.numclone += len(clones)
def setgroup(self, group):
self.group = group
def setcolor(self, color):
self.color = color
def setmarker(self, marker):
self.marker = marker
def setclones(self, clones):
self.clones = []
self.size = 0
self.numclone = 0
if clones is not None:
self.addclones(clones)
def resetfreq(self):
if len(self.clones) > 0:
self.numclone = len(self.clones)
self.size = sum([clone.count for clone in self.clones])
for clone in self.clones:
clone.freq = float(clone.count)/self.size
class EmptySampleError(Exception):
pass
######## RELEVANT FUNCTIONS ########
#======== Write a sample to output file =======
def write_clones(file, clones, append=False):
if not clones:
return
if append:
f = open(file, 'a')
else:
f = open(file, 'w')
firstclone = clones[0]
columns = firstclone.get_sorted_items()
f.write("%s\n" % "\t".join(columns))
for clone in clones:
f.write("%s\n" % clone.getstr())
f.close()
def write_clones_to_fasta(clones, f, sample, currindex):
for i, clone in enumerate(clones):
id = currindex + i
seq = clone.nuc
if clone.aa:
seq = clone.aa
header = "%s;%d|%s|%s|%s;size=%d" % (sample, id, clone.v, clone.j,
clone.d, clone.count)
f.write(">%s\n" % header)
f.write("%s\n" % seq)
#def write_samples(outdir, samples):
# for sample in samples:
# outfile = os.path.join(outdir, "%s.tsv" % sample.name)
# write_sample(outfile, sample)
#======= set group, color, marker etc =====
def set_sample_group(sample, name2group):
if sample.name in name2group:
sample.setgroup(name2group[sample.name])
def set_sample_color(sample, name2color):
if sample.name in name2color:
sample.setcolor(name2color[sample.name])
def set_sample_marker(sample, name2marker):
if sample.name in name2marker:
sample.setmarker(name2marker[sample.name])
#======= Filtering =============
def filter_by_size(clones, mincount=-1, maxcount=-1, minfreq=-1, maxfreq=-1):
# Remove clones that have count (freq) < minsize
# or count (freq) > maxsize if maxsize is specified (!= -1)
newclones = []
for clone in clones:
newclone = copy.deepcopy(clone)
if newclone.count >= mincount and newclone.freq >= minfreq:
if ((maxcount == -1 or newclone.count <= maxcount) and
(maxfreq == -1 or newclone.freq <= maxfreq)):
newclones.append(newclone)
return newclones
def filter_by_status(clones, productive=True):
newclones = []
for clone in clones:
if productive == clone.productive:
newclones.append(copy.deepcopy(clone))
return newclones
#======= Split clones by VJ ===============
def clone_get_recomb_info(cdr3clone, clone):
cdr3clone.vdel = clone.vdel
cdr3clone.jdel = clone.jdel
cdr3clone.d5del = clone.d5del
cdr3clone.d3del = clone.d3del
vd_nts = clone.nuc[clone.lastvpos: clone.firstdpos] # include the last V
cdr3clone.vdins = vd_nts
dj_nts = clone.nuc[clone.lastdpos + 1: clone.firstjpos + 1] # include lastJ
cdr3clone.djins = dj_nts
def split_clones_by_vj(clones, sample_name=None):
v2j2clones = {}
for clone in clones:
numcombi = len(clone.vgenes) * len(clone.jgenes)
if clone.dgenes:
numcombi *= len(clone.dgenes)
nuc = clone.cdr3nuc
if nuc is None:
nuc = clone.nuc
count = clone.count/numcombi
if count == 0:
count = 1
normcount = clone.normcount/numcombi
freq = clone.freq/numcombi
for vindex, v in enumerate(clone.vgenes):
for jindex, j in enumerate(clone.jgenes):
cdr3clones = []
if not clone.dgenes:
cdr3clone = libclone.Cdr3Clone(count, nuc, v, j, '',
clone.cdr3aa, sample_name,
normcount, freq)
cdr3clones.append(cdr3clone)
else:
for dindex, d in enumerate(clone.dgenes):
cdr3clone = libclone.Cdr3Clone(count, nuc, v, j, d,
clone.cdr3aa, sample_name,
normcount, freq)
cdr3clones.append(cdr3clone)
if vindex == 0 and jindex == 0 and dindex == 0:
clone_get_recomb_info(cdr3clone, clone)
if v not in v2j2clones:
v2j2clones[v] = {j: cdr3clones}
elif j not in v2j2clones[v]:
v2j2clones[v][j] = cdr3clones
else:
v2j2clones[v][j].extend(cdr3clones)
return v2j2clones
def sample_get_size(indir):
name = os.path.basename(indir.rstrip('/'))
samfile = os.path.join(indir, "%s" % name)
sample = pickle.load(gzip.open(samfile, 'rb'))
assert sample.name == name
numclone = 0
size = 0
# get total size and total number of clones
for vjfile in os.listdir(indir):
if vjfile == name:
continue
clones = pickle.load(gzip.open(os.path.join(indir, vjfile), "rb"))
numclone += len(clones)
size += sum([clone.count for clone in clones])
if size == 0:
raise EmptySampleError("Sample %s at %s is empty" % (name, indir))
sample.size = size
sample.numclone = numclone
pickle.dump(sample, gzip.open(samfile, "wb"))
return numclone, size
def get_vj2sams(indir, sams=None):
vj2sams = {}
if sams is None:
sams = os.listdir(indir)
for sam in sams:
samdir = os.path.join(indir, sam)
for vj in os.listdir(samdir):
if vj == sam:
continue
if vj not in vj2sams:
vj2sams[vj] = [sam]
else:
vj2sams[vj].append(sam)
return vj2sams
def sample_all_clones(samdir):
name = os.path.basename(samdir.strip('/'))
allclones = []
for vj in os.listdir(samdir):
if vj == name:
continue
vjfile = os.path.join(samdir, vj)
clones = pickle.load(gzip.open(vjfile, 'rb'))
allclones.extend(clones)
return allclones
def reset_freqs_vj(infile, size):
clones = pickle.load(gzip.open(infile, "rb"))
for c in clones:
c.freq = float(c.count)/size
pickle.dump(clones, gzip.open(infile, "wb"))
#========== OBJs ====
class WriteSample(Target):
def __init__(self, indir, outfile):
Target.__init__(self)
self.indir = indir
self.outfile = outfile
def run(self):
if os.path.exists(self.outfile):
system("rm -f" % self.outfile)
for batch in os.listdir(self.indir):
batchfile = os.path.join(self.indir, batch)
clones = pickle.load(gzip.open(batchfile, "rb"))
write_clones(self.outfile, clones, True)
class WriteSampleFasta(Target):
'''may need to fix this, right now indir is sample dir with this
structure:
indir/
sample
v1
v2
...
'''
def __init__(self, indir, outfile):
Target.__init__(self)
self.indir = indir
self.outfile = outfile
def run(self):
sample = os.path.basename(self.indir)
f = open(self.outfile, 'w')
numclone = 0
for file in os.listdir(self.indir):
if file == sample:
continue
filepath = os.path.join(self.indir, file)
clones = pickle.load(gzip.open(filepath, 'rb'))
write_clones_to_fasta(clones, f, sample, numclone)
numclone += len(clones)
f.close()
class WriteSamples(Target):
def __init__(self, indir, outdir, samout):
Target.__init__(self)
self.indir = indir
self.outdir = outdir
self.samout = samout
def run(self):
if 'pickle' in self.samout:
pickledir = os.path.join(self.outdir, "pickle")
system("mkdir -p %s" % pickledir)
system("cp -r %s %s" % (self.indir, pickledir))
if 'txt' in self.samout:
txtdir = os.path.join(self.outdir, "txt")
system("mkdir -p %s" % txtdir)
for sam in os.listdir(self.indir):
samdir = os.path.join(self.indir, sam)
outfile = os.path.join(txtdir, sam)
self.addChildTarget(WriteSample(samdir, outfile))
if 'fasta' in self.samout:
fadir = os.path.join(self.outdir, 'fasta')
system('mkdir -p %s' % fadir)
for sam in os.listdir(self.indir):
samdir = os.path.join(self.indir, sam)
outfile = os.path.join(fadir, sam)
self.addChildTarget(WriteSampleFasta(samdir, outfile))
class FilterSample(Target):
'''Filter a sample by clone size and by productive status
'''
def __init__(self, outdir, name, samplefile, opts):
Target.__init__(self)
self.outdir = outdir
self.name = name
self.samplefile = samplefile
self.opts = opts
def run(self):
# filter by size
starttime = time.time()
opts = self.opts
clones = pickle.load(gzip.open(self.samplefile, 'rb'))
if (opts.mincount > 1 or opts.maxcount > 0 or opts.minfreq > 0 or
opts.maxfreq > 0):
clones = filter_by_size(clones, opts.mincount, opts.maxcount,
opts.minfreq, opts.maxfreq)
msg = ("Filter_by_size for file %s done in %.4f s" %
(self.samplefile, time.time() - starttime))
logger.info(msg)
starttime = time.time()
# filter by status
pclones = filter_by_status(clones, True)
npclones = filter_by_status(clones, False)
filename = os.path.basename(self.samplefile)
if pclones:
pdir = os.path.join(self.outdir, "productive", self.name)
system("mkdir -p %s" % pdir)
pfile = os.path.join(pdir, filename)
pickle.dump(pclones, gzip.open(pfile, "wb"))
if npclones:
npdir = os.path.join(self.outdir, "non_productive", self.name)
system("mkdir -p %s" % npdir)
npfile = os.path.join(npdir, filename)
pickle.dump(npclones, gzip.open(npfile, "wb"))
msg = ("Filter_by_status for file %s done in %.4f s" %
(self.samplefile, time.time() - starttime))
logger.info(msg)
self.setFollowOnTarget(libcommon.CleanupFile(self.samplefile))
class SplitClonesByV(Target):
'''Split clones by V, one file per V
'''
def __init__(self, infile, outdir, sample_name=None):
Target.__init__(self)
self.infile = infile
self.outdir = outdir
self.sample_name = sample_name
def run(self):
clones = pickle.load(gzip.open(self.infile, "rb"))
v2j2clones = split_clones_by_vj(clones, self.sample_name)
for v, j2clones in v2j2clones.iteritems():
vclones = []
for j, vjclones in j2clones.iteritems():
vclones.extend(vjclones)
vfile = os.path.join(self.outdir, v)
pickle.dump(vclones, gzip.open(vfile, "wb"))
class SplitClonesByVJ(Target):
'''
'''
def __init__(self, infile, outdir, sample_name=None):
Target.__init__(self)
self.infile = infile
self.outdir = outdir
self.sample_name = sample_name
def run(self):
clones = pickle.load(gzip.open(self.infile, "rb"))
v2j2clones = split_clones_by_vj(clones, self.sample_name)
for v, j2clones in v2j2clones.iteritems():
for j, vjclones in j2clones.iteritems():
vjfile = os.path.join(self.outdir, "%s_%s" % (v, j))
pickle.dump(vjclones, gzip.open(vjfile, "wb"))
class SampleResetFreqsVJ(Target):
def __init__(self, infile, size):
Target.__init__(self)
self.infile = infile
self.size = size
def run(self):
#stime = time.time()
reset_freqs_vj(self.infile, self.size)
#self.logToMaster("SampleResetFreqsVJ: done in %.4f s\n" %
# (time.time() - stime))
class SampleResetFreqs(Target):
'''
'''
def __init__(self, indir):
Target.__init__(self)
self.indir = indir
def run(self):
self.logToMaster("SampleResetFreqs\n")
name = os.path.basename(self.indir.rstrip('/'))
numclone, size = sample_get_size(self.indir)
for vjfile in os.listdir(self.indir):
if vjfile == name:
continue
vjpath = os.path.join(self.indir, vjfile)
reset_freqs_vj(vjpath, size)
#self.addChildTarget(SampleResetFreqsVJ(vjpath, size))
class VjSampleAgg(Target):
def __init__(self, vj, batches, indir, outdir):
Target.__init__(self)
self.vj = vj
self.batches = batches
self.indir = indir
self.outdir = outdir
def run(self):
#stime = time.time()
vjfile = os.path.join(self.outdir, self.vj)
clones = []
for batch in self.batches:
file = os.path.join(self.indir, batch, self.vj)
currclones = pickle.load(gzip.open(file, "rb"))
clones.extend(currclones)
pickle.dump(clones, gzip.open(vjfile, "wb"))
#self.logToMaster("VjSampleAgg: done in %.4f s\n" %
# (time.time() - stime))
class MakeDbSampleAgg(Target):
'''
Each sample is stored in a directory, with:
sample_name.pickle: contain Sample obj, without the clones
V1J1.pickle: list of Cdr3Clones that have V1 and J1
V1J2.pickle
...
VnJm.pickle
Also recompute the frequencies
'''
def __init__(self, indir, outdir, opts, group=None, color=(0, 0, 0),
marker='.'):
Target.__init__(self)
self.indir = indir
self.outdir = outdir
self.opts = opts
self.group = group
self.color = color
self.marker = marker
def run(self):
# create a Sample obj
name = os.path.basename(self.indir.rstrip('/'))
sample = Sample(name, group=self.group, color=self.color,
marker=self.marker)
samfile = os.path.join(self.outdir, name)
pickle.dump(sample, gzip.open(samfile, 'wb'))
# aggregate the batches
vj2batches = get_vj2sams(self.indir)
for vj, batches in vj2batches.iteritems():
self.addChildTarget(VjSampleAgg(vj, batches, self.indir,
self.outdir))
self.setFollowOnTarget(SampleResetFreqs(self.outdir))
class MakeDbSample(Target):
'''Set up smaller jobs to split clones by VJ
Then set up follow_on job to merge them
'''
def __init__(self, indir, outdir, opts, group, color, marker):
Target.__init__(self)
self.indir = indir
self.outdir = outdir
self.opts = opts
self.group = group
self.color = color
self.marker = marker
def run(self):
name = os.path.basename(self.indir.rstrip("/"))
global_dir = self.getGlobalTempDir()
tempdir = os.path.join(global_dir, "split_by_vj", name)
system("mkdir -p %s" % tempdir)
for file in os.listdir(self.indir): # each batch, split clone by V
infile = os.path.join(self.indir, file)
child_outdir = os.path.join(tempdir, os.path.splitext(file)[0])
system("mkdir -p %s" % child_outdir)
self.addChildTarget(SplitClonesByV(infile, child_outdir, name))
#self.addChildTarget(SplitClonesByVJ(infile, child_outdir, name))
self.setFollowOnTarget(MakeDbSampleAgg(tempdir, self.outdir, self.opts,
self.group, self.color, self.marker))
#======= SAMPLING =============
'''
Different samples have different level of sequencing, and hence will create
bias. For example, large sample (lots of sequences got amplified and sequenced)
will inherently have more overlapping with other samples than a smaller sample.
The purpose of sampling is to make sure that every sample has the same number
of starting sequences to avoid the bias
'''
class SamplingError(Exception):
pass
def get_top_clones(vj2index2count, size):
top_vj2i2c = {}
countvji = []
for vj, i2c in vj2index2count.iteritems():
for i, c in i2c.iteritems():
countvji.append((c, (vj,i)))
countvji = sorted(countvji, reverse=True, key=lambda item:item[0])
assert size <= len(countvji)
for item in countvji[:size]:
c = item[0]
vj = item[1][0]
i = item[1][1]
if vj not in top_vj2i2c:
top_vj2i2c[vj] = {i: c}
else:
top_vj2i2c[vj][i] = c
return top_vj2i2c
def sampling(sample, sampledir, outdir, args=None):
if not args:
return sampledir
#raise ValueError("Sample sampling: sample is None or has 0 clone.\n")
size = args[0]
uniq = False
if len(args) > 1 and args[1] == 'uniq': # sampling unique clones
uniq = True
top = False
if len(args) >= 3 and args[1] == 'top': # sampling, then get largest clones
top = True
numtop = int(args[2])
if sample is None or sample.size == 0:
raise ValueError("Sample sampling: sample is None or has 0 clone.\n")
if size <= 0 or size > sample.size:
raise ValueError(("Sample sampling: invalid size %d.\n" % size +
"Sample %s has %d sequences.\n" % (sample.name, sample.size)))
newsample = Sample(sample.name, group=sample.group, color=sample.color,
marker=sample.marker)
indices = [] # represent all clones
for vj in os.listdir(sampledir):
if vj == sample.name:
continue
vjfile = os.path.join(sampledir, vj)
clones = pickle.load(gzip.open(vjfile, 'rb'))
for i, clone in enumerate(clones):
if uniq:
indices.append((vj, i))
else:
indices.extend([(vj, i)] * clone.count)
chosen_indices = random.sample(indices, size)
vj2index2count = {}
for (vj, i) in chosen_indices:
if vj not in vj2index2count:
vj2index2count[vj] = {i: 1}
elif i not in vj2index2count[vj]:
vj2index2count[vj][i] = 1
else:
vj2index2count[vj][i] += 1
# only return top clones if "top" is specified
if top and numtop:
vj2index2count = get_top_clones(vj2index2count, numtop)
vj2newclones = {}
numclone = 0
for vj, i2count in vj2index2count.iteritems():
clones = pickle.load(gzip.open(os.path.join(sampledir, vj), 'rb'))
newclones = []
for i, count in i2count.iteritems():
newclone = copy.deepcopy(clones[i])
newclone.count = count
newclone.freq = float(count)/size
newclones.append(newclone)
vjoutfile = os.path.join(outdir, vj)
pickle.dump(newclones, gzip.open(vjoutfile, "wb"))
numclone += len(newclones)
newsample.size = size
newsample.numclone = numclone
outsamfile = os.path.join(outdir, sample.name)
pickle.dump(newsample, gzip.open(outsamfile, "wb"))
def sampling_uniq(sample, size):
# randomly select "size" number of clones.
# Note: Use with caution -- each clone's count and freq stays the
# same (i.e sum of freq may not add up to 1)
if sample is None or sample.size == 0:
raise ValueError("Sample sampling: sample is None or has 0 clone.\n")
if size <= 0 or size > len(sample.clones):
raise ValueError("Sample sampling: invalid size %d.\n" % size)
newsample = Sample(sample.name, group=sample.group, color=sample.color,
marker=sample.marker)
newclones = random.sample(sample.clones, size)
newsample.setclones(newclones)
return newsample
#class Sampling(Target):
# def __init__(self, sample, size, outdir):
# Target.__init__(self)
# self.sample = sample
# self.size = size
# self.outdir = outdir
#
# def run(self):
# subsample = sampling(self.sample, self.size)
# outfile = os.path.join(self.outdir, "%s.pickle" % sample.name)
# pickle.dump(subsample, gzip.open(outfile, "wb"))
class SampleAnalysis0(Target):
'''General child job Obj to do analysis for a specific sample
'''
def __init__(self, sample, samdir, outdir, func, *func_args):
Target.__init__(self)
self.sample = sample
self.samdir = samdir
self.outdir = outdir
self.func = func
self.func_args = func_args
def run(self):
self.func(self.sample, self.samdir, self.outdir, args=self.func_args)
class SampleAnalysis(Target):
'''General child job Obj to do analysis for a specific sample
'''
def __init__(self, sample, samdir, outfile, func, *func_args):
Target.__init__(self)
self.sample = sample
self.samdir = samdir
self.outfile = outfile
self.func = func
self.func_args = func_args
def run(self):
result_obj = self.func(self.sample, self.samdir, args=self.func_args)
pickle.dump(result_obj, gzip.open(self.outfile, 'wb'))
| mit | -7,928,144,477,451,427,000 | 33.634393 | 88 | 0.56728 | false |
monkeymia/js | mmdb/DB_Interface.py | 1 | 14431 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# https://github.com/monkeymia/
#
# Copyright (c) 2014, monkeymia, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library
#
import MySQLdb
import os
import os.path
import DB_Query_Result
import JSON_to_DB
# please read sql tutorial:
# http://www.tutorialspoint.com/mysql/mysql-like-clause.htm
class DB_Interface(object):
# Wrapper to encapsulate SQL Statements and SQL Server.
# A wrapper helps to reduce the risk of unwanted SQL injections.
#
#
# Assumptions:
# * Somebody outside may change sql database - no cache
# * primary key is table name lower case
# * primary key is conform to ANSI-C variable format i.e string length 30
# * value NULL forbidden - helps to make client code easier
# * no protection against stupid client code. for example after creation
# use database must be called.
#
FORMAT_AS_HTML_TABLE = 1
FORMAT_AS_CSV = 2
FORMAT_AS_LEN = 3
FORMAT_AS_USER = 4
FORMAT_AS_REPR = 5
FORMAT_AS_FIRST = 6
FORMAT_AS_PY = 7
# file extension of database description files
extension_json = ".json"
# holds sql server connection for all instances of DB interface
_connection = None
# enable sql query debugging - helps for unit test creation
_debug_me = False
# Hide SQL Server specific system tables
_internal_dbs = ("information_schema", "mysql", "performance_schema")
# defautl sql server language.
_sql_lang = "MySQL"
# avoid duplicate code. grep is your friend.
_sql_pwd_db_cmd = "SELECT DATABASE();"
def __init__(self, sql_lang=None):
if sql_lang:
self._sql_lang = sql_lang
if self.__class__._connection is None:
# it seems no utf8 in /usr/share/mysql/charsets/
# charset='latin1',
self.__class__._connection = MySQLdb.Connect(
host="127.0.0.1",
port=3306,
user="root",
passwd="root",
charset="utf8"
)
# end def
def clear_dbs(self):
# after this call no database is selected !
result = self.ls_dbs()
rows = result.rows
for db in rows:
if result:
result = self.rm_db(db)
return result
# end
def close(self):
cmd = getattr(self.__class__._connection, "close")
if callable(cmd):
self.__class__._connection = None
cmd()
# end
def del_row(self, table_name, key, del_all=False):
if del_all:
sql = "DELETE FROM %s;" % (table_name)
return self._sql_runner(sql)
else:
pk = self._primary_key(table_name)
if not self.has_row(table_name, key):
raise NotImplementedError(
"Error (del_row): row does not exists (%s) (%s)"
% (table_name, key)
)
sql = "DELETE FROM %s WHERE %s=\"%s\";" % (table_name, pk, key)
return self._sql_runner(sql)
# end def
def get_row(self, table_name, key, cols=[]):
pk = self._primary_key(table_name)
if cols:
sql = (
"SELECT %s FROM %s WHERE %s=\"%s\";"
% (",".join(cols), table_name, pk, key)
)
else:
sql = "SELECT * FROM %s WHERE %s=\"%s\";" % (table_name, pk, key)
return self._sql_runner(sql, cols=cols)
# end def
def has_row(self, table_name, key):
pk = self._primary_key(table_name)
sql = "SELECT %s FROM %s WHERE %s=\"%s\";" % (pk, table_name, pk, key)
res = self._sql_runner(sql)
return res
# end def
def ls_dbs(self):
sql = "SHOW DATABASES;"
result = self._sql_runner(sql, cols=["Database"], flat=True)
for db in self._internal_dbs:
try:
result.rows.remove(db)
except ValueError, e:
result.errors.append(
"Error (ls_dbs): Cannot remove internal db %s (%s)."
% (db, e)
)
return result
# end def
def ls_layouts(self):
# return possible database definitions...
result = DB_Query_Result.DB_Query_Result(zero_is_valid_rowcount=True)
rows = []
path = os.path.dirname(__file__)
ext = self.extension_json.lower()
try:
for f in os.listdir(path):
if f.lower().endswith(ext):
rows.append(str(f[:(-len(ext))]))
except (OSError, IOError), e:
result.errors.append(
"Cannot list files (*.%s). - Details: %s"
% (ext, e)
)
result.cols = ["layout"]
result.rows = rows
return result
# end def
def ls_cols(self, table_name):
# sql = "SHOW COLUMNS FROM %s;" % (table_name, ) returns:
# +------------+--------------+------+-----+---------+-------+
# | Field | Type | Null | Key | Default | Extra |
# +------------+--------------+------+-----+---------+-------+
# | general_id | varchar(36) | NO | PRI | NULL | |
# | doc | int(11) | NO | | NULL | |
# | test1 | varchar(255) | NO | | NULL | |
# | test2 | varchar(255) | NO | | NULL | |
# +------------+--------------+------+-----+---------+-------+
# MYSQL Specific !!!
if self._sql_lang == "MySQL":
sql = (
"SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS "
"where TABLE_NAME = '%s'"
% (table_name, )
)
else:
raise NotImplementedError(
"SQL Language (%s) is not supported!" % self._sql_lang)
return self._sql_runner(
sql, cols=["COLUMN_NAME"], flat=True, zero_is_valid=True)
# end def
def ls_rows(self, table_name, cols=[]):
if not cols:
result = self.ls_cols(table_name)
if not result:
return result
cols = result.rows
sql = "SELECT %s FROM %s;" % (",".join(cols), table_name, )
return self._sql_runner(sql, cols=cols, zero_is_valid=True)
# end def
def ls_tables(self):
sql = "SHOW TABLES;"
return self._sql_runner(sql, cols=["TABLES"], flat=True)
# end def
def mk_db(self, db_name):
res = self.rm_db(db_name, silent=True)
if res:
sql = "CREATE DATABASE %s;" % db_name
res = self._sql_runner(sql)
return res
# end def
def mk_table(self, table_name, columns):
# MYSQL Specific !!!
if self._sql_lang == "MySQL":
pk = self._primary_key(table_name)
s = "%s VARCHAR(36) NOT NULL," % (pk, )
for col in sorted(columns):
e = col["type"]
n = col["name"]
s += "%s %s NOT NULL," % (n, e)
s += "PRIMARY KEY (%s)" % (pk, )
sql = "CREATE TABLE %s(%s);" % (table_name, s)
else:
raise NotImplementedError(
"SQL Language (%s) is not supported!" % self._sql_lang)
return self._sql_runner(sql, zero_is_valid=True)
# end def
def new_db(self, db_name, layout):
result = DB_Query_Result.DB_Query_Result(zero_is_valid_rowcount=True)
path = os.path.dirname(__file__)
fname = os.path.join(
path, db_name, "%s%s" % (layout, self.extension_json))
if os.path.exists(fname):
jtd = JSON_to_DB.JSON_to_DB()
result = jtd.convert(fname, layout)
else:
result.errors.append(
"Error (new_db): file not found - Details : %s"
% (fname, )
)
return result
# end def
def new_row(self, table_name, key, values_dct={}):
if not values_dct:
raise NotImplementedError(
"Error (new_row): No Values specified. (%s) (%s)"
% (table_name, key)
)
pk = self._primary_key(table_name)
cols = [pk]
vals = ["\"%s\"" % key]
for k, v in sorted(values_dct.iteritems()):
cols.append("%s" % k)
vals.append("%s" % v)
sql = (
"INSERT INTO %s (%s) VALUES (%s);"
% (table_name, ",".join(cols), ",".join(vals))
)
return self._sql_runner(sql, cols=cols)
# end def
def set_row(self, table_name, key, values_dct={}):
if not values_dct:
raise NotImplementedError(
"Error (new_row): No Values specified. (%s) (%s)"
% (table_name, key)
)
pk = self._primary_key(table_name)
if not self.has_row(table_name, key):
result = self.new_row(table_name, key, values_dct)
else:
cmd = []
for k, v in sorted(values_dct.iteritems()):
cmd.append("%s=%s" % (k, v))
sql = (
"UPDATE %s SET %s WHERE %s=\"%s\";"
% (table_name, ",".join(cmd), pk, key)
)
result = self._sql_runner(sql)
return result
# end def
def pwd_db(self):
sql = self._sql_pwd_db_cmd
return self._sql_runner(sql, cols=["Database()"], flat=True)
# end def
def rm_db(self, db_name, silent=False):
sql = "DROP DATABASE %s %s;" % ("IF EXISTS" if silent else "", db_name)
return self._sql_runner(sql, flat=True, zero_is_valid=True)
# end def
def rm_table(self, table_name):
sql = "DROP TABLE %s;" % table_name
return self._sql_runner(sql, zero_is_valid=True)
# end def
def use_db(self, db_name):
sql = "USE %s;" % (db_name, )
return self._sql_runner(sql, zero_is_valid=True)
# end def
def _format_tuples(self, cursor, req, fmt, **kw):
row_start_hook = kw.get("row_start_hook", None)
row_end_hook = kw.get("row_end_hook", None)
col_hook = kw.get("col_hook", None)
fetcher = getattr(cursor, "fetchall", None)
if callable(fetcher):
rows = cursor.fetchall()
else:
rows = cursor
if fmt == self.FORMAT_AS_HTML_TABLE:
for row in rows:
req.write(" <tr>\n")
for col in row:
req.write(" <td>" + str(col) + "<td>\n")
req.write(" </tr>\n")
elif fmt == self.FORMAT_AS_CSV:
for row in rows:
req.write("\"%s\n\"" % ",".join(col for col in row))
elif fmt == self.FORMAT_AS_LEN:
l = len(rows)
req.write("%s" % l)
elif fmt == self.FORMAT_AS_REPR:
req.write(str(rows))
elif fmt == self.FORMAT_AS_FIRST:
for row in rows:
for col in row:
req.write(str(col))
return
elif fmt == self.FORMAT_AS_PY:
for row in rows:
r = []
for col in row:
r.append(col)
req.append(r)
elif fmt == self.FORMAT_AS_USER:
for row in rows:
if callable(row_start_hook):
row_start_hook()
for col in row:
if callable(row_start_hook):
col_hook(col)
if callable(row_end_hook):
row_end_hook()
else:
raise NotImplementedError(fmt)
# end def
def _primary_key(self, table_name):
return "%s_id" % (table_name.lower(), )
# end def
def _sql_runner(self, sql, cols=[], flat=False, zero_is_valid=False):
result = DB_Query_Result.DB_Query_Result(
zero_is_valid_rowcount=zero_is_valid)
cursor = self.__class__._connection.cursor()
try:
if __debug__ and self.__class__._debug_me:
print sql
rowcount = cursor.execute(sql)
valid = True
except:
valid = False
rowcount = 0
# avoid recursive calls:
if self._sql_pwd_db_cmd not in sql:
result_pwd = self.pwd_db()
if result_pwd.singleton():
msg = (
"Error(sql_runner):execute sql failed - "
"Wrong syntax ? (%s)!!"
% (sql,)
)
else:
msg = (
"Error(sql_runner):execute sql failed - "
"use database missing ?"
)
else:
msg = (
"Error(sql_runner):execute pwd_db failed. (%s)!!"
% (sql,)
)
result.errors.append(msg)
result.rowcount = rowcount
if valid:
result.cols = cols
result.rows = []
for row in cursor.fetchall():
if flat:
result.rows.append(row and row[0])
else:
result.rows.append(row)
lr = len(row)
lc = len(cols)
if lc and lr != lc:
result.errors.append(
"Error(sql_runner):len rows (%s) and len cols "
"(%s) do not match!" % (lr, lc)
)
if __debug__ and self.__class__._debug_me:
print "=>", str(result), str(result.cols), \
str(result.rows), str(result.errors)
return result
# end def
# end class
# __END__
| lgpl-3.0 | 2,860,058,342,691,675,000 | 34.283619 | 79 | 0.486591 | false |
alecperkins/py-ninja | examples/web_app.py | 1 | 2984 | """
An example web app that displays a list of devices, with links to a page for
each device's data. The device page updates every 2 seconds with the latest
heartbeat.
Note: requires Flask
"""
from _examples import *
from flask import Flask
from ninja.api import NinjaAPI
import json
api = NinjaAPI(secrets.ACCESS_TOKEN)
app = Flask(__name__)
device_cache = {}
BASE_HTML = """
<link rel="icon" href="">
<style>
table {
font-family: monospace;
}
td.label {
padding-right: 1em;
font-style: italic;
}
body {
max-width: 600px;
margin: 0 auto;
padding-top: 1em;
}
</style>
<script src="//cdnjs.cloudflare.com/ajax/libs/zepto/1.0rc1/zepto.min.js"></script>
"""
def getDevice(guid):
if not guid in device_cache:
device = api.getDevice(guid)
device_cache[guid] = device
else:
device = device_cache[guid]
return device
@app.route('/')
def hello():
user = api.getUser()
devices = api.getDevices()
html = BASE_HTML
html += """
<h1>Devices for {user.name}</h1>
<p>User ID: ({user.id})</p>
""".format(user=user)
html += """
<table>
<thead>
<tr>
<th>Device</th>
<th>ID</th>
</tr>
</thead>
<tbody>
"""
for device in devices:
device_cache[device.guid] = device
html += """
<tr>
<td class="label">{device}</td>
<td><a href="/{device.guid}/">{device.guid} »</a></td>
</tr>
""".format(device=device)
html += '</tbody></table>'
return html
@app.route('/<guid>/')
def showDevice(guid):
device = getDevice(guid)
html = BASE_HTML
html += """
<h1>{device}</h1>
<a href="/">« index</a>
<p>Device heartbeat every 2 seconds [<a href="heartbeat.json">raw</a>]</p>
<table>
""".format(device=device)
for field in device.asDict():
html += "<tr><td class='label'>%s</td><td>…</td></tr>" % (field,)
html += '</table>'
html += """
<script>
var $table = $('table');
function fetch(){
$.getJSON('heartbeat.json', function(response){
render(response);
});
}
function render(device){
var html = '';
for(k in device){
html += '<tr><td class="label">' + k + '</td><td>' + device[k] + '</td></tr>';
}
$table.html(html);
}
setInterval(fetch, 2000);
</script>
"""
return html
@app.route('/<guid>/heartbeat.json')
def deviceHeartbeat(guid):
device = getDevice(guid)
device.heartbeat()
return json.dumps(device.asDict(for_json=True))
app.run(debug=True)
| unlicense | -8,983,361,077,322,436,000 | 20.163121 | 98 | 0.493633 | false |
rmoritz/chessrank | chessrank/server/main.py | 1 | 2072 | #! python
import pymongo
import motor
from os import path
from tornado.ioloop import IOLoop
from tornado.options import options
from requesthandlers.api import ApiHandler
from requesthandlers.api.player import PlayerHandler
from requesthandlers.api.user import UserHandler
from requesthandlers.api.tournament import TournamentHandler
from requesthandlers.api.section import SectionHandler
from requesthandlers.api.session import SessionHandler
from requesthandlers.api.lookups import LookupsHandler
from requesthandlers import IndexPageHandler, VerifyPageHandler
from app import CustomApp
options.define('port', default=8888, help='run on the given port', type=int)
def load_app_settings():
db = pymongo.MongoClient().chessrank
return db.settings.find_one()
def main():
server_path = path.dirname(__file__)
template_path = path.join(server_path, 'templates')
static_path = path.normpath(path.join(server_path, '..', 'client'))
settings = {
'static_path': static_path,
'template_path': template_path,
'xsrf_cookies': False, # TODO: Enable
'login_url': '/',
'db': motor.MotorClient().chessrank,
}
app_settings = load_app_settings()
settings.update(app_settings)
handlers = [
(r'/api/tournaments(?:/([0-9a-fA-F]{24}))?.*', TournamentHandler),
(r'/api/sections(?:/([0-9a-fA-F]{24}))?.*', SectionHandler),
(r'/api/players(?:/([0-9a-fA-F]{24}))?.*', PlayerHandler),
(r'/api/users(?:/([0-9a-fA-F]{24}))?.*', UserHandler),
(r'/api/session', SessionHandler),
(r'/api/lookups', LookupsHandler),
(r'/api.*', ApiHandler),
(r'/verify/(.+)', VerifyPageHandler),
(r'/', IndexPageHandler)
]
options.parse_command_line()
app = CustomApp(handlers, 'localhost', **settings)
app.listen(options.port)
IOLoop.instance().start()
# Start the app
main()
| mit | -5,630,893,928,674,735,000 | 34.118644 | 82 | 0.619691 | false |
ErickMurillo/geodjango-example | geodjango/settings.py | 1 | 2225 | """
Django settings for geodjango project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pbmy6nmvdko3jn8xlbao6+sdcmq&jh52z3t#q4@7njc_75a%eg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'world',
#'addac_shapefile',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'geodjango.urls'
WSGI_APPLICATION = 'geodjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geodjango',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_media"),
)
| mit | 1,494,220,483,510,673,200 | 23.184783 | 71 | 0.722247 | false |
redhat-openstack/trove | trove/guestagent/datastore/experimental/postgresql/pgutil.py | 1 | 8041 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import psycopg2
from trove.common import exception
LOG = logging.getLogger(__name__)
PG_ADMIN = 'os_admin'
class PostgresConnection(object):
def __init__(self, autocommit=False, **connection_args):
self._autocommit = autocommit
self._connection_args = connection_args
def execute(self, statement, identifiers=None, data_values=None):
"""Execute a non-returning statement.
"""
self._execute_stmt(statement, identifiers, data_values, False)
def query(self, query, identifiers=None, data_values=None):
"""Execute a query and return the result set.
"""
return self._execute_stmt(query, identifiers, data_values, True)
def _execute_stmt(self, statement, identifiers, data_values, fetch):
if statement:
with psycopg2.connect(**self._connection_args) as connection:
connection.autocommit = self._autocommit
with connection.cursor() as cursor:
cursor.execute(
self._bind(statement, identifiers), data_values)
if fetch:
return cursor.fetchall()
else:
raise exception.UnprocessableEntity(_("Invalid SQL statement: %s")
% statement)
def _bind(self, statement, identifiers):
if identifiers:
return statement.format(*identifiers)
return statement
class PostgresLocalhostConnection(PostgresConnection):
HOST = 'localhost'
def __init__(self, user, password=None, port=5432, autocommit=False):
super(PostgresLocalhostConnection, self).__init__(
autocommit=autocommit, user=user, password=password,
host=self.HOST, port=port)
# TODO(pmalik): No need to recreate the connection every time.
def psql(statement, timeout=30):
"""Execute a non-returning statement (usually DDL);
Turn autocommit ON (this is necessary for statements that cannot run
within an implicit transaction, like CREATE DATABASE).
"""
return PostgresLocalhostConnection(
PG_ADMIN, autocommit=True).execute(statement)
# TODO(pmalik): No need to recreate the connection every time.
def query(query, timeout=30):
"""Execute a query and return the result set.
"""
return PostgresLocalhostConnection(
PG_ADMIN, autocommit=False).query(query)
class DatabaseQuery(object):
@classmethod
def list(cls, ignore=()):
"""Query to list all databases."""
statement = (
"SELECT datname, pg_encoding_to_char(encoding), "
"datcollate FROM pg_database "
"WHERE datistemplate = false"
)
for name in ignore:
statement += " AND datname != '{name}'".format(name=name)
return statement
@classmethod
def create(cls, name, encoding=None, collation=None):
"""Query to create a database."""
statement = "CREATE DATABASE \"{name}\"".format(name=name)
if encoding is not None:
statement += " ENCODING = '{encoding}'".format(
encoding=encoding,
)
if collation is not None:
statement += " LC_COLLATE = '{collation}'".format(
collation=collation,
)
return statement
@classmethod
def drop(cls, name):
"""Query to drop a database."""
return "DROP DATABASE IF EXISTS \"{name}\"".format(name=name)
class UserQuery(object):
@classmethod
def list(cls, ignore=()):
"""Query to list all users."""
statement = "SELECT usename FROM pg_catalog.pg_user"
if ignore:
# User a simple tautology so all clauses can be AND'ed without
# crazy special logic.
statement += " WHERE 1=1"
for name in ignore:
statement += " AND usename != '{name}'".format(name=name)
return statement
@classmethod
def list_root(cls, ignore=()):
"""Query to list all superuser accounts."""
statement = (
"SELECT usename FROM pg_catalog.pg_user WHERE usesuper = true"
)
for name in ignore:
statement += " AND usename != '{name}'".format(name=name)
return statement
@classmethod
def get(cls, name):
"""Query to get a single user."""
return (
"SELECT usename FROM pg_catalog.pg_user "
"WHERE usename = '{name}'".format(name=name)
)
@classmethod
def create(cls, name, password, encrypt_password=None, *options):
"""Query to create a user with a password."""
create_clause = "CREATE USER \"{name}\"".format(name=name)
with_clause = cls._build_with_clause(
password, encrypt_password, *options)
return ''.join([create_clause, with_clause])
@classmethod
def _build_with_clause(cls, password, encrypt_password=None, *options):
tokens = ['WITH']
if password:
# Do not specify the encryption option if 'encrypt_password'
# is None. PostgreSQL will use the configuration default.
if encrypt_password is True:
tokens.append('ENCRYPTED')
elif encrypt_password is False:
tokens.append('UNENCRYPTED')
tokens.append('PASSWORD')
tokens.append("'{password}'".format(password=password))
if options:
tokens.extend(options)
if len(tokens) > 1:
return ' '.join(tokens)
return ''
@classmethod
def update_password(cls, name, password, encrypt_password=None):
"""Query to update the password for a user."""
return cls.alter_user(name, password, encrypt_password)
@classmethod
def alter_user(cls, name, password, encrypt_password=None, *options):
"""Query to alter a user."""
alter_clause = "ALTER USER \"{name}\"".format(name=name)
with_clause = cls._build_with_clause(
password, encrypt_password, *options)
return ''.join([alter_clause, with_clause])
@classmethod
def update_name(cls, old, new):
"""Query to update the name of a user."""
return "ALTER USER \"{old}\" RENAME TO \"{new}\"".format(
old=old,
new=new,
)
@classmethod
def drop(cls, name):
"""Query to drop a user."""
return "DROP USER \"{name}\"".format(name=name)
class AccessQuery(object):
@classmethod
def list(cls, user):
"""Query to list grants for a user."""
return (
"SELECT datname, pg_encoding_to_char(encoding), datcollate "
"FROM pg_database "
"WHERE datistemplate = false "
"AND 'user {user}=CTc' = ANY (datacl)".format(user=user)
)
@classmethod
def grant(cls, user, database):
"""Query to grant user access to a database."""
return "GRANT ALL ON DATABASE \"{database}\" TO \"{user}\"".format(
database=database,
user=user,
)
@classmethod
def revoke(cls, user, database):
"""Query to revoke user access to a database."""
return "REVOKE ALL ON DATABASE \"{database}\" FROM \"{user}\"".format(
database=database,
user=user,
)
| apache-2.0 | -8,859,612,235,505,157,000 | 30.410156 | 78 | 0.599055 | false |
johandry/RosPi | test/distance.py | 1 | 1426 | #!/usr/bin/env python
import time
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO_TRIGGER=17
GPIO_ECHO=27
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_TRIGGER,GPIO.OUT)
GPIO.setup(GPIO_ECHO,GPIO.IN)
def getDistance_1():
GPIO.output(GPIO_TRIGGER, GPIO.LOW)
time.sleep(0.3)
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
while GPIO.input(GPIO_ECHO) == 0:
pass
signaloff = time.time()
while GPIO.input(GPIO_ECHO) == 1:
pass
signalon = time.time()
return (signalon - signaloff) * 17000
def _measure():
GPIO.output(GPIO_TRIGGER, GPIO.LOW)
time.sleep(0.3)
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
while GPIO.input(GPIO_ECHO)==0:
pass
start = time.time()
while GPIO.input(GPIO_ECHO)==1:
pass
stop = time.time()
return (stop-start) * 34300/2
def getDistance_2():
# This function takes 3 measurements and
# returns the average.
distance1=_measure()
time.sleep(0.1)
distance2=_measure()
time.sleep(0.1)
distance3=_measure()
return (distance1 + distance2 + distance3)/3
start=time.time()
distance = getDistance_1()
stop=time.time()
print "Using function #1: "+str(distance) + " cm at "+str(stop-start)+" sec"
start=time.time()
distance = getDistance_2()
stop=time.time()
print "Using function #2: "+str(distance) + " cm at "+str(stop-start)+" sec"
GPIO.cleanup()
| mit | 9,043,258,076,341,595,000 | 18.805556 | 76 | 0.682328 | false |
thanos/ilabs | ilabs/patterns.py | 1 | 10432 | """
integration pattern classes for message processing
(c) integrationLabs 1996- 2004
coded by: thanos and friends
integrationLabs opensource
$RCSfile: __init__.py,v $
$Date: 2004/07/16 17:31:19 $
$Revision: 1.3 $
"""
from types import ListType, TupleType
from ilabs.core import Node, RoutingStrategy, RouteTable, Router
class NodeList(Node):
deferChange = False
onChange = None
def __init__(self, name='', *args):
Node.__init__(self, name)
self.list =[]
if self.onChange: self.onChange(self.__init__, *args)
def __setitem__(self, i, item):
elf.list[i] = item
if self.onChange: self.onChange(self.__setitem__, i, item)
def __delitem__(self, i):
del self.list[i]
if self.onChange: self.onChange(self.__delitem__, i)
def __setslice__(self, i, j, other):
self.list[i:j] = other
if self.onChange: self.onChange(self.__setslice__, i, j, other)
def __delslice__(self, i, j):
del self.list[i:j]
if self.onChange: self.onChange(self.__delslice__, i, j)
def append(self, value):
self.list.append(value)
if self.onChange: self.onChange(self.append, value)
def insert(self, where, value):
self.list.insert(where, value)
if self.onChange: self.onChange(self.insert, value)
def pop(self, i=-1):
result = self.list.pop(i)
if self.onChange: self.onChange(self.pop, value)
return result
def remove(self, item):
self.list.remove(item)
if self.onChange: self.onChange(self.remove, value)
def count(self, item):
result = self.list.count(item)
if self.onChange: self.onChange(self.count, value)
return result
def index(self, item, *args):
result = self.list.index(item, *args)
if self.onChange: self.onChange(self.index, value)
return result
def reverse(self):
self.list.reverse()
if self.onChange: self.onChange(self.reverse, value)
def sort(self, *args):
self.list.sort(*args)
if self.onChange: self.onChange(self.sort, value)
def extend(self, other):
self.list.extend(other)
if self.onChange: self.onChange(self.extend, value)
def __repr__(self): return repr(self.list)
def __lt__(self, other): return self.list < other.list
def __le__(self, other): return self.list <= other.list
def __eq__(self, other): return self.list == other.list
def __ne__(self, other): return self.list != other.list
def __gt__(self, other): return self.list > other.list
def __ge__(self, other): return self.list >= other.list
def __cmp__(self, other): return cmp(self.list, other.list)
def __contains__(self, item): return item in self.list
def __len__(self): return len(self.list)
def __getitem__(self, i): return self.list[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.list[i:j]
class PipeLine(NodeList):
"""
Behaves as a chained list of Nodes. Received messages are passed to the first node, and the
last node in turn sends the message to the destination.
The PipeLine's process is called before and of the containing nodes.
Use list operations (append, insert, del, setitem, etc) to set the nodes in the bank.
When Nodes are added, inserted removed they are relinked to maintain the pipeline.
PipeLine = [Node1, Node2.. NodeN]
NodeA -> PipeLine -> NodeB
Expands to:
NodeA -> PipeLine -> Node1 - > Node2 -> ...-> NodeN -> NodeB
"""
def onChange(self, *args):
""" called every time the list is altered to relink the nodes """
self.link()
def send(self, agent):
""" send the agent and sends it to 1st node """
if len(self):
self[0].receive(agent)
else:
Node.send(self, agent)
def connect(self, receiver, *args):
""" connects 1st as the pipelines destination and last node of chain to the actual destination """
Node.connect(self, receiver, *args)
if len(self):
self[-1].connect(receiver)
return self
def link(self):
""" used to relink the nodes """
if len(self):
for i in xrange(len(self[:-1])):
self[i].connect(self[i+1])
if self.destination:
self[-1].connect(self.destination)
class NodeBank(NodeList):
"""
Behaves as a "parallel" bank of Nodes.
Received messages are passed to the all the nodes in turn, the resulting messages are
then sent on to the destination.
As with PipeLine this can be treated as a list.
Use list operations (append, insert, del, setitem, etc) to set the nodes in the bank.
"""
def connect(self, receiver, *args):
assert receiver, "receiver must not be None"
Node.connect(self, receiver, *args)
if len(self):
map(lambda x, r = receiver, a = args: x.connect(r, *a), self)
def receive(self, agent):
if len(self):
for node in self:
node.receive(agent)
else:
Node.recieve(self, agent)
def append(self, node):
NodeList.append(self, node)
self.connectNode(node)
def __setitem__(self, index, node):
NodeList.__setitem__(self, index, node)
self.connectNode(node)
def connectNode(self, node):
if self.destination:
node.connect(self.destination)
def onChange(self, *args):
"do nothing"
pass
class RouteList(RouteTable):
def addRoute(self, route, *criteria):
if not criteria:
return RouteTable.addRoute(self, route, route)
return RouteTable.addRoute(self, route, *criteria)
class AllRoutes(RoutingStrategy):
def getRoutesUsingAgent(self, agent, table):
return table.allRoutes()
class Distributor(Router):
strategyClass = AllRoutes
tableClass = RouteList
"""
Will send messages to a list of connected subscribers
"""
class SubscriptionTable(RouteTable):
"""
Implements a router table adds subscribing route (node)
to an approriate distributer.
"""
def addRoute(self, node, *criteria):
for criterion in criteria:
if criterion not in self.table:
self.table[criterion] = Distributor()
self.table[criterion].connect(node)
class RecipientList(Router):
"""
Implements a router that sends a message to each route
on the approriate subscription list
"""
tableClass = SubscriptionTable
class MessageProcessor(Node):
routerClass = RecipientList
concentratorClass = Node
def __init__(self, name='', router=None, concentrator=None):
Node.__init__(self, name)
if not router:
router = routerClass()
self.router = router
if not concentrator:
concentrator = concentratorClass()
self.destination = concentrator
def register(self, processor, *criteria):
if id not in table:
processor.connect(self.destination)
self.router.connect(processor, *criteria)
def send(self, envelope):
self.router.receive(envelope)
def connect(self, recipient):
self.destination.connect(recipient)
class StateMachine(Node):
"""
Implements a hierachical state machine.
Add attribute the states where
states is a tuple of entries.
Each entry can be one of:
entry := current, message, next
entry := current state, message, test, resultset
current := StateMachine.INIT
State
collection of State
StateMachine.ANY
message := Message
collection of Message
StateMachine.ANY
next := State
StateMachine.CURRENT
test := python expression
resultset:= (result, next)+
some simple examples:
states=((DrawerClosed, Eject, DrawerOpen),
(DrawerOpen, Eject, CDStopped),
(CDStopped, Play, CDPlaying),
(CDPlaying, Pause, CDPaused),
((CDPlaying,CDPaused), Stoped, CDStoped),
(CDPaused, (Pause,Play) CDPlaying)
)
states=((DrawerClosed, Eject, DrawerOpen),
(DrawerOpen, Eject, "hasCd()", (False, CDStopped), (True, DrawerClosed)),
((CDStopped, CDPlaying, CDPause), Eject, DrawerOpen),
((CDStopped, CDPause), Play, CDPlaying),
(CDPlaying,CDPaused), Stoped, CDStoped),
((CDPlaying,CDPaused), NextTrack, "isLastTrack()", (False, StateMachine.CURRENT), (True, CDStop)),
((CDPlaying,CDPaused), PrevTrack", isFirstTrack()", (False, StateMachine.CURRENT), (True, CDStop))
)
for more and hierachicle see the test code
"""
class State(Node):
def __repr__(self): return self.name or self.__class__.__name__
def process(self, node, message):
Node.process(self, message)
INIT = State('INIT')
ANY = State('ANY')
LAST=State('LAST')
CURRENT =State('CURRENT')
UNDEF = INIT
DEFAULT = object()
debug = False
def __init__(self, name, *args):
Node.__init__(self, name, *args)
self.state = self.INIT
self.buildTable()
def buildTable(self):
states = {}
for entry in self.states:
currentState, transitions = entry[0], entry[1:]
for transition in transitions:
if len(transition) == 2:
messageList, nextStates = transition
test = None
elif len(transition) > 3:
messageList,test,nextStates = transition[0], compile(transition[1], '<string>', 'eval') , dict(transition[2:])
if self.DEFAULT not in nextStates:
nextStates[self.DEFAULT] = None
if not type(messageList) in (ListType, TupleType):
messageList = [messageList]
for message in messageList:
states[self.getInitKey(currentState, message)] = test, nextStates
self.states = states
def getKey(self, message, currentState): return (currentState, message)
getInitKey = getKey
def getNextState(self, message, *varspace):
key = self.getKey( self.state, message)
entry = self.states.get(key)
#print '1.StateMachine key:%s, nextState: %s' % (key, entry)
if entry is None:
key = self.getKey(StateMachine.ANY, message)
entry = self.states.get(key)
#print '2.StateMachine key:%s, nextState: %s' % (key, entry)
retval = self.UNDEF
if entry is not None:
import time
test, resultStates = entry[0], entry[1]
if test:
res = eval(test, *varspace)
try:
newState = resultStates[res]
except KeyError:
newState = resultStates[self.DEFAULT]
"""
print test, 'TEST', time.strftime("%H:%M:%S.%%04d", time.localtime(message.header[0])) % message.header[7]
print res, newState, resultStates
"""
else:
newState = resultStates
else:
if self.UNDEF == StateMachine.CURRENT:
newState = self.state
else:
newState = self.INIT
#print self.getNextState, retval, self.state
return newState
def process(self, message, *varspace):
state = self.getNextState(message, *varspace)
if state != self.state:
if 0:
import time
print time.strftime("%H:%M:%S.%%04d", time.localtime(message.header[0])) % message.header[7]
print self.state, state, message.header
self.state = state
if hasattr(state, 'process'):
return state.process(self, message)
def ofInterest(self):
return [message for state, message in self.states if state == self.state]
| isc | -8,347,245,802,435,815,000 | 25.817481 | 116 | 0.689417 | false |
merll/docker-map | dockermap/map/state/__init__.py | 1 | 1983 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple
from ...map import Flags, SimpleEnum
INITIAL_START_TIME = '0001-01-01T00:00:00Z'
class State(SimpleEnum):
ABSENT = 'absent' # Does not exist.
PRESENT = 'present' # Exists but is not running.
RUNNING = 'running' # Exists and is running.
class StateFlags(Flags):
INITIAL = 1 # Container is present but has never been started.
RESTARTING = 1 << 1 # Container is not running, but in the process of restarting.
PERSISTENT = 1 << 5 # Container is configured as persistent.
NONRECOVERABLE = 1 << 10 # Container is stopped with an error that cannot be solved through restarting.
IMAGE_MISMATCH = 1 << 12 # Container does not correspond with configured image.
MISSING_LINK = 1 << 13 # A configured linked container cannot be found.
VOLUME_MISMATCH = 1 << 14 # Container is pointing to a different path than some of its configured volumes.
EXEC_COMMANDS = 1 << 15 # Container is missing at least one exec command.
HOST_CONFIG_UPDATE = 1 << 16 # The container host config (e.g. memory limits) differs, but can be updated.
NETWORK_DISCONNECTED = 1 << 20 # Container is not connected to a network that it is configured for.
NETWORK_LEFT = 1 << 21 # Container is connected to a network that it is not configured for.
NETWORK_MISMATCH = 1 << 22 # Container has different configured connection parameters than the current link.
MISC_MISMATCH = 1 << 30 # Item does otherwise not correspond with the configuration.
FORCED_RESET = 1 << 31 # Item in any state should be reset.
NEEDS_RESET = (NONRECOVERABLE | FORCED_RESET | IMAGE_MISMATCH | MISSING_LINK | VOLUME_MISMATCH | MISC_MISMATCH)
ConfigState = namedtuple('ConfigState', ['client_name', 'config_id', 'config_flags', 'base_state',
'state_flags', 'extra_data'])
| mit | -3,945,026,140,703,116,300 | 52.594595 | 115 | 0.673222 | false |
lukeroge/Ralybot | plugins/attacks.py | 1 | 5206 | import codecs
import json
import os
import random
import asyncio
import re
from cloudbot import hook
from cloudbot.util import textgen
nick_re = re.compile("^[A-Za-z0-9_|.\-\]\[]*$", re.I)
def is_valid(target):
""" Checks if a string is a valid IRC nick. """
if nick_re.match(target):
return True
else:
return False
def is_self(conn, target):
""" Checks if a string is "****self" or contains conn.name. """
if re.search("(^..?.?.?self|{})".format(re.escape(conn.nick)), target, re.I):
return True
else:
return False
@hook.on_start()
def load_attacks(bot):
"""
:type bot: cloudbot.bot.CloudBot
"""
global larts, flirts, kills, slaps, insults
with codecs.open(os.path.join(bot.data_dir, "larts.txt"), encoding="utf-8") as f:
larts = [line.strip() for line in f.readlines() if not line.startswith("//")]
with codecs.open(os.path.join(bot.data_dir, "flirts.txt"), encoding="utf-8") as f:
flirts = [line.strip() for line in f.readlines() if not line.startswith("//")]
with codecs.open(os.path.join(bot.data_dir, "insults.txt"), encoding="utf-8") as f:
insults = [line.strip() for line in f.readlines() if not line.startswith("//")]
with codecs.open(os.path.join(bot.data_dir, "kills.json"), encoding="utf-8") as f:
kills = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "slaps.json"), encoding="utf-8") as f:
slaps = json.load(f)
with codecs.open(os.path.join(bot.data_dir, "wreck.json"), encoding="utf-8") as f:
wrecks = json.load(f)
@asyncio.coroutine
@hook.command
def lart(text, conn, nick, action):
"""<user> - LARTs <user>"""
target = text.strip()
if not is_valid(target):
return "I can't attack that, since it's not a valid target. " \
"Please choose a valid target for me to lart!"
if is_self(conn, target):
# When the user is trying to make the bot attack itself, make the
# bot attack the user who is doing the command.
target = nick
phrase = random.choice(larts)
# act out the message
action(phrase.format(user=target))
@asyncio.coroutine
@hook.command
def flirt(text, conn, nick, message):
"""<user> - flirts with <user>"""
target = text.strip()
if not is_valid(target):
return "I can't flirt with that, since it's not a valid target. " \
"Please choose a valid target for me to flirt with!"
if is_self(conn, target):
# When the user is trying to make the bot attack itself, make the
# bot attack the user who is doing the command.
target = nick
message('{}, {}'.format(target, random.choice(flirts)))
@asyncio.coroutine
@hook.command
def kill(text, conn, nick, action):
"""<user> - kills <user>"""
target = text.strip()
if not is_valid(target):
return "I can't attack that, since it's not a valid target. " \
"Please choose a valid target for me to kill!"
if is_self(conn, target):
# When the user is trying to make the bot attack itself, make the
# bot attack the user who is doing the command.
target = nick
generator = textgen.TextGenerator(kills["templates"], kills["parts"], variables={"user": target})
# act out the message
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def slap(text, action, nick, conn, notice):
"""<user> -- Makes the bot slap <user>."""
target = text.strip()
if not is_valid(target):
return "I can't slap that, since it's not a valid target. " \
"Please choose a valid target for me to slap!"
if is_self(conn, target):
# When the user is trying to make the bot attack itself, make the
# bot attack the user who is doing the command.
target = nick
variables = {
"user": target
}
generator = textgen.TextGenerator(slaps["templates"], slaps["parts"], variables=variables)
# Act out the message.
action(generator.generate_string())
@asyncio.coroutine
@hook.command
def wreck(text, conn, nick, action):
"""<user> - Makes the bot wreck <user>."""
target = text.strip()
if not is_valid(target):
return "I can't wreck that, since it's not a valid target. " \
"Please choose a valid target for me to wreck!"
if is_self(conn, target):
# When the user is trying to make the bot attack itself, make the
# bot attack the user who is doing the command.
target = nick
generator = textgen.TextGenerator(wrecks["templates"], wrecks["parts"], variables={"user": target})
# Act out the message.
action(generator.generate_string())
@asyncio.coroutine
@hook.command()
def insult(text, conn, nick, notice, message):
"""<user> - insults <user>
:type text: str
:type conn: cloudbot.client.Client
:type nick: str
"""
target = text.strip()
if " " in target:
notice("Invalid username!")
return
# if the user is trying to make the bot target itself, target them
if is_self(conn, target):
target = nick
message("{}, {}".format(target, random.choice(insults)))
| gpl-3.0 | 871,965,403,281,068,200 | 28.412429 | 103 | 0.626201 | false |
kaushik94/sympy | sympy/parsing/autolev/test-examples/ruletest10.py | 8 | 2679 | import sympy.physics.mechanics as me
import sympy as sm
import math as m
import numpy as np
x, y = me.dynamicsymbols('x y')
a, b = sm.symbols('a b', real=True)
e = a*(b*x+y)**2
m = sm.Matrix([e,e]).reshape(2, 1)
e = e.expand()
m = sm.Matrix([i.expand() for i in m]).reshape((m).shape[0], (m).shape[1])
e = sm.factor(e, x)
m = sm.Matrix([sm.factor(i,x) for i in m]).reshape((m).shape[0], (m).shape[1])
eqn = sm.Matrix([[0]])
eqn[0] = a*x+b*y
eqn = eqn.row_insert(eqn.shape[0], sm.Matrix([[0]]))
eqn[eqn.shape[0]-1] = 2*a*x-3*b*y
print(sm.solve(eqn,x,y))
rhs_y = sm.solve(eqn,x,y)[y]
e = (x+y)**2+2*x**2
e.collect(x)
a, b, c = sm.symbols('a b c', real=True)
m = sm.Matrix([a,b,c,0]).reshape(2, 2)
m2 = sm.Matrix([i.subs({a:1,b:2,c:3}) for i in m]).reshape((m).shape[0], (m).shape[1])
eigvalue = sm.Matrix([i.evalf() for i in (m2).eigenvals().keys()])
eigvec = sm.Matrix([i[2][0].evalf() for i in (m2).eigenvects()]).reshape(m2.shape[0], m2.shape[1])
frame_n = me.ReferenceFrame('n')
frame_a = me.ReferenceFrame('a')
frame_a.orient(frame_n, 'Axis', [x, frame_n.x])
frame_a.orient(frame_n, 'Axis', [sm.pi/2, frame_n.x])
c1, c2, c3 = sm.symbols('c1 c2 c3', real=True)
v=c1*frame_a.x+c2*frame_a.y+c3*frame_a.z
point_o = me.Point('o')
point_p = me.Point('p')
point_o.set_pos(point_p, c1*frame_a.x)
v = (v).express(frame_n)
point_o.set_pos(point_p, (point_o.pos_from(point_p)).express(frame_n))
frame_a.set_ang_vel(frame_n, c3*frame_a.z)
print(frame_n.ang_vel_in(frame_a))
point_p.v2pt_theory(point_o,frame_n,frame_a)
particle_p1 = me.Particle('p1', me.Point('p1_pt'), sm.Symbol('m'))
particle_p2 = me.Particle('p2', me.Point('p2_pt'), sm.Symbol('m'))
particle_p2.point.v2pt_theory(particle_p1.point,frame_n,frame_a)
point_p.a2pt_theory(particle_p1.point,frame_n,frame_a)
body_b1_cm = me.Point('b1_cm')
body_b1_cm.set_vel(frame_n, 0)
body_b1_f = me.ReferenceFrame('b1_f')
body_b1 = me.RigidBody('b1', body_b1_cm, body_b1_f, sm.symbols('m'), (me.outer(body_b1_f.x,body_b1_f.x),body_b1_cm))
body_b2_cm = me.Point('b2_cm')
body_b2_cm.set_vel(frame_n, 0)
body_b2_f = me.ReferenceFrame('b2_f')
body_b2 = me.RigidBody('b2', body_b2_cm, body_b2_f, sm.symbols('m'), (me.outer(body_b2_f.x,body_b2_f.x),body_b2_cm))
g = sm.symbols('g', real=True)
force_p1 = particle_p1.mass*(g*frame_n.x)
force_p2 = particle_p2.mass*(g*frame_n.x)
force_b1 = body_b1.mass*(g*frame_n.x)
force_b2 = body_b2.mass*(g*frame_n.x)
z = me.dynamicsymbols('z')
v=x*frame_a.x+y*frame_a.z
point_o.set_pos(point_p, x*frame_a.x+y*frame_a.y)
v = (v).subs({x:2*z, y:z})
point_o.set_pos(point_p, (point_o.pos_from(point_p)).subs({x:2*z, y:z}))
force_o = -1*(x*y*frame_a.x)
force_p1 = particle_p1.mass*(g*frame_n.x)+ x*y*frame_a.x
| bsd-3-clause | 4,608,471,831,100,164,600 | 40.859375 | 116 | 0.640911 | false |
pombredanne/SmartNotes | mercurial/hgweb/protocol.py | 1 | 6254 | #
# Copyright 21 May 2005 - (c) 2005 Jake Edge <[email protected]>
# Copyright 2005-2007 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
import cStringIO, zlib, tempfile, errno, os, sys
from mercurial import util, streamclone
from mercurial.node import bin, hex
from mercurial import changegroup as changegroupmod
from common import ErrorResponse, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
# __all__ is populated with the allowed commands. Be sure to add to it if
# you're adding a new command, or the new command won't work.
__all__ = [
'lookup', 'heads', 'branches', 'between', 'changegroup',
'changegroupsubset', 'capabilities', 'unbundle', 'stream_out',
]
HGTYPE = 'application/mercurial-0.1'
def lookup(repo, req):
try:
r = hex(repo.lookup(req.form['key'][0]))
success = 1
except Exception,inst:
r = str(inst)
success = 0
resp = "%s %s\n" % (success, r)
req.respond(HTTP_OK, HGTYPE, length=len(resp))
yield resp
def heads(repo, req):
resp = " ".join(map(hex, repo.heads())) + "\n"
req.respond(HTTP_OK, HGTYPE, length=len(resp))
yield resp
def branches(repo, req):
nodes = []
if 'nodes' in req.form:
nodes = map(bin, req.form['nodes'][0].split(" "))
resp = cStringIO.StringIO()
for b in repo.branches(nodes):
resp.write(" ".join(map(hex, b)) + "\n")
resp = resp.getvalue()
req.respond(HTTP_OK, HGTYPE, length=len(resp))
yield resp
def between(repo, req):
if 'pairs' in req.form:
pairs = [map(bin, p.split("-"))
for p in req.form['pairs'][0].split(" ")]
resp = cStringIO.StringIO()
for b in repo.between(pairs):
resp.write(" ".join(map(hex, b)) + "\n")
resp = resp.getvalue()
req.respond(HTTP_OK, HGTYPE, length=len(resp))
yield resp
def changegroup(repo, req):
req.respond(HTTP_OK, HGTYPE)
nodes = []
if 'roots' in req.form:
nodes = map(bin, req.form['roots'][0].split(" "))
z = zlib.compressobj()
f = repo.changegroup(nodes, 'serve')
while 1:
chunk = f.read(4096)
if not chunk:
break
yield z.compress(chunk)
yield z.flush()
def changegroupsubset(repo, req):
req.respond(HTTP_OK, HGTYPE)
bases = []
heads = []
if 'bases' in req.form:
bases = [bin(x) for x in req.form['bases'][0].split(' ')]
if 'heads' in req.form:
heads = [bin(x) for x in req.form['heads'][0].split(' ')]
z = zlib.compressobj()
f = repo.changegroupsubset(bases, heads, 'serve')
while 1:
chunk = f.read(4096)
if not chunk:
break
yield z.compress(chunk)
yield z.flush()
def capabilities(repo, req):
caps = ['lookup', 'changegroupsubset']
if repo.ui.configbool('server', 'uncompressed', untrusted=True):
caps.append('stream=%d' % repo.changelog.version)
if changegroupmod.bundlepriority:
caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
rsp = ' '.join(caps)
req.respond(HTTP_OK, HGTYPE, length=len(rsp))
yield rsp
def unbundle(repo, req):
proto = req.env.get('wsgi.url_scheme') or 'http'
their_heads = req.form['heads'][0].split(' ')
def check_heads():
heads = map(hex, repo.heads())
return their_heads == [hex('force')] or their_heads == heads
# fail early if possible
if not check_heads():
req.drain()
raise ErrorResponse(HTTP_OK, 'unsynced changes')
# do not lock repo until all changegroup data is
# streamed. save to temporary file.
fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
fp = os.fdopen(fd, 'wb+')
try:
length = int(req.env['CONTENT_LENGTH'])
for s in util.filechunkiter(req, limit=length):
fp.write(s)
try:
lock = repo.lock()
try:
if not check_heads():
raise ErrorResponse(HTTP_OK, 'unsynced changes')
fp.seek(0)
header = fp.read(6)
if header.startswith('HG') and not header.startswith('HG10'):
raise ValueError('unknown bundle version')
elif header not in changegroupmod.bundletypes:
raise ValueError('unknown bundle compression type')
gen = changegroupmod.unbundle(header, fp)
# send addchangegroup output to client
oldio = sys.stdout, sys.stderr
sys.stderr = sys.stdout = cStringIO.StringIO()
try:
url = 'remote:%s:%s' % (proto,
req.env.get('REMOTE_HOST', ''))
try:
ret = repo.addchangegroup(gen, 'serve', url)
except util.Abort, inst:
sys.stdout.write("abort: %s\n" % inst)
ret = 0
finally:
val = sys.stdout.getvalue()
sys.stdout, sys.stderr = oldio
req.respond(HTTP_OK, HGTYPE)
return '%d\n%s' % (ret, val),
finally:
del lock
except ValueError, inst:
raise ErrorResponse(HTTP_OK, inst)
except (OSError, IOError), inst:
filename = getattr(inst, 'filename', '')
# Don't send our filesystem layout to the client
if filename.startswith(repo.root):
filename = filename[len(repo.root)+1:]
else:
filename = ''
error = getattr(inst, 'strerror', 'Unknown error')
if inst.errno == errno.ENOENT:
code = HTTP_NOT_FOUND
else:
code = HTTP_SERVER_ERROR
raise ErrorResponse(code, '%s: %s' % (error, filename))
finally:
fp.close()
os.unlink(tempname)
def stream_out(repo, req):
req.respond(HTTP_OK, HGTYPE)
try:
for chunk in streamclone.stream_out(repo, untrusted=True):
yield chunk
except streamclone.StreamException, inst:
yield str(inst)
| gpl-3.0 | -9,050,525,145,168,187,000 | 31.572917 | 77 | 0.564119 | false |
blstream/myHoard_Python | myhoard/apps/media/models.py | 1 | 5791 | import logging
from datetime import datetime
from PIL import Image, ImageOps
from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
from flask import current_app, g
from flask.ext.mongoengine import Document
from mongoengine import MapField, ImageField, DateTimeField, ObjectIdField, ValidationError
from mongoengine.python_support import StringIO
logger = logging.getLogger(__name__)
class Media(Document):
images = MapField(ImageField())
created_date = DateTimeField(default=datetime.now)
item = ObjectIdField()
collection = ObjectIdField()
owner = ObjectIdField()
@property
def public(self):
from myhoard.apps.collections.models import Collection
return bool(Collection.objects(id=self.collection, public=True).count()) if self.collection else True
def __unicode__(self):
return '<{} {}>'.format(type(self).__name__, self.id)
@classmethod
def get_visible_or_404(cls, media_id):
from myhoard.apps.collections.models import Collection
media = cls.objects.get_or_404(id=media_id)
if media.collection:
Collection.get_visible_or_404(media.collection)
return media
@classmethod
def create(cls, image_file):
if not image_file:
raise ValidationError(errors={'image': 'Field is required'})
media = cls()
media.id = None
media.created_date = None
media.owner = g.user
cls.create_image_files(media, image_file)
logger.info('Creating {}...'.format(media))
media.save()
logger.info('Creating {} done'.format(media))
return media
@classmethod
def put(cls, media_id, image_file):
media = cls.objects.get_or_404(id=media_id)
if media.owner != g.user:
raise Forbidden('Only collection owner can edit media') if media.public else NotFound()
return cls.update(media, image_file)
@classmethod
def update(cls, media, image_file):
if not image_file:
raise ValidationError(errors={'image': 'Field is required'})
for image in media.images.itervalues():
image.delete()
media.images = {}
cls.create_image_files(media, image_file)
logger.info('Updating {} ...'.format(media))
media.save()
logger.info('Updating {} done'.format(media))
return media
@classmethod
def delete(cls, media_id):
media = cls.objects.get_or_404(id=media_id)
if media.owner != g.user:
raise Forbidden('Only media owner can delete media') if media.public else NotFound()
for image in media.images.itervalues():
image.delete()
logger.info('Deleting {} ...'.format(media))
super(cls, media).delete()
logger.info('Deleting {} done'.format(media))
@classmethod
def create_from_item(cls, item):
logger.info('Updating {} Media IDs ...'.format(item))
for media in cls.objects(id__in=item.media, item__not__exists=True, owner=g.user):
media.item = item.id
media.collection = item.collection
logger.info('Updating {} ...'.format(media))
media.save()
logger.info('Updating {} done'.format(media))
item.media = cls.objects(item=item.id).scalar('id')
logger.info('Updating {} ...'.format(item))
item.save()
logger.info('Updating {} done'.format(item))
logger.info('Updating {} Media IDs done'.format(item))
@classmethod
def delete_from_item(cls, item):
for media_id in cls.objects(item=item.id).scalar('id'):
cls.delete(media_id)
@classmethod
def get_image_file(cls, media, size):
if size:
if (size in media.images) and (size != 'master'):
image = media.images[size].get()
else:
raise ValidationError(errors={'size': 'Not in {}'.format(
', '.join(str(size) for size in current_app.config['IMAGE_THUMBNAIL_SIZES']))})
else:
image = media.images['master'].get()
return image
@classmethod
def open_image_file(cls, image_file):
extensions = current_app.config['IMAGE_EXTENSIONS']
if not ('.' in image_file.filename and image_file.filename.rsplit('.', 1)[1] in extensions):
raise ValidationError(errors={'image': 'File extension is not {}'.format(', '.join(extensions))})
try:
image = Image.open(image_file)
image_format = image.format
except IOError:
raise InternalServerError('No PIL drivers for that file type')
return image, image_format
@classmethod
def create_image_files(cls, media, image_file):
image, image_format = cls.open_image_file(image_file)
for size in current_app.config['IMAGE_THUMBNAIL_SIZES']:
cls.save_image_in_mapfield(
ImageOps.fit(image, (size, size), Image.ANTIALIAS),
image_format, media, str(size)
)
# since image is source stream for thumbnails
# we are forced to save original image last
cls.save_image_in_mapfield(image, image_format, media, 'master')
# Super elastic workaround cost a little advanced usage
# https://github.com/MongoEngine/mongoengine/issues/382
# https://github.com/MongoEngine/mongoengine/pull/391
@classmethod
def save_image_in_mapfield(cls, image_obj, image_format, instance, index):
io = StringIO()
image_obj.save(io, image_format)
io.seek(0)
image_proxy = cls._fields['images'].field.get_proxy_obj('images', instance)
image_proxy.put(io)
instance.images[index] = image_proxy
| apache-2.0 | 3,235,573,336,128,406,000 | 31.903409 | 109 | 0.619755 | false |
DLR-SC/DataFinder | src/datafinder/gui/user/models/repository/repository.py | 1 | 28254 | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
#
#Redistribution and use in source and binary forms, with or without
# All rights reserved.
#modification, are permitted provided that the following conditions are
#met:
#
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Qt-specific abstraction of the model of the data repository.
"""
__version__ = "$Revision-Id:$"
import operator
import sys
from types import StringTypes
from PyQt4 import QtCore
from datafinder.common.logger import getDefaultLogger
from datafinder.core.error import ItemError
from datafinder.core.item.base import ItemBase
from datafinder.core.item.data_persister.constants import ITEM_STATE_ARCHIVED, \
ITEM_STATE_ARCHIVED_MEMBER, ITEM_STATE_ARCHIVED_READONLY, \
ITEM_STATE_MIGRATED, ITEM_STATE_UNSUPPORTED_STORAGE_INTERFACE, \
ITEM_STATE_INACCESSIBLE
from datafinder.gui.user.common.util import determineDisplayRepresentation, startNewQtThread, \
determinePropertyDefinitionToolTip
from datafinder.gui.user.models.repository.action_handler import ActionHandler
from datafinder.gui.user.models.repository.history import HistoryModel
from datafinder.gui.user.models.repository.icon_provider import IconProvider
class RepositoryModel(HistoryModel):
""" Implements the Qt-specific model for a data repository and its items. """
def __init__(self, preferences):
"""
Constructor.
@param repositoryManager: The central preferences.
@type repositoryManager: L{PreferencesHandler<datafinder.core.configuration.preferences.PreferencesHandler>}
"""
HistoryModel.__init__(self)
self._headers = list()
self._headerIds = list()
self._repository = None
self._actionHandler = None
self._preferences = preferences
self._iconProvider = None
self._emptyQVariant = QtCore.QVariant()
self._emptyQModelIndex = QtCore.QModelIndex()
self._placeHolderCollection = ItemBase("...")
self._placeHolderCollection._isCollection = True
self._placeHolderLeaf = ItemBase("...")
self._childrenPopulator = _ChildrenPopulator(self)
self._lockedItems = list()
def load(self, repository):
"""
Loads the model.
@param repository: The data repository.
@type repository: L{Repository<datafinder.core.repository.Repository>}
"""
self._actionHandler = ActionHandler(self, repository)
self._headers = [self.tr("Name")]
self._headerIds = ["name"]
self._iconProvider = IconProvider(repository.configuration)
systemPropertyDefinitions = sorted(repository.configuration.systemPropertyDefinitions,
key=operator.attrgetter("displayName"))
for propertyDefinition in systemPropertyDefinitions:
self._headers.append(self.tr(propertyDefinition.displayName))
self._headerIds.append(propertyDefinition.identifier)
self._repository = repository
self.activeIndex = self._emptyQModelIndex
self.reset()
def lock(self, indexes):
""" Locks the given index. """
# determine paths which are not required to be locked as
# the parent is going to be locked either.
items = list()
invalidPaths = list()
for index in indexes:
items.append(self.nodeFromIndex(index))
currentItem = self.nodeFromIndex(self.activeIndex) # this is done to protect the current item
items.append(currentItem)
for item1 in items:
for item2 in items:
if item1.path != item2.path:
if item1.path.startswith(item2.path) \
and not item2.path in invalidPaths:
invalidPaths.append(item1.path)
items.remove(currentItem)
if currentItem.path in invalidPaths: # prevent invalidation of current item
self.activeIndex = self._emptyQModelIndex
# lock it
for item in items:
if not item.path in self._lockedItems and not item.path in invalidPaths:
self._lockedItems.append(item.path)
result = self._findEffectedRowIntervals(item)
self.emit(QtCore.SIGNAL("layoutAboutToBeChanged()"))
for path, length in result:
index = self.indexFromPath(path)
if length > 0:
self.beginRemoveRows(index, 0, length - 1)
self.endRemoveRows()
if path in self._lockedItems:
self.beginInsertRows(index, 0, 0)
self.endInsertRows()
self.emit(QtCore.SIGNAL("layoutChanged()"))
def _findEffectedRowIntervals(self, item, result=None):
""" Determines the already retrieved children of the given item. """
if result is None:
result = list()
if item.childrenPopulated and item.isCollection:
children = item.getChildren()
for child in children:
self._findEffectedRowIntervals(child, result)
childrenLength = len(children)
result.append((item.path, childrenLength))
return result
def unlock(self, index):
""" Unlocks the given index. """
item = self.nodeFromIndex(index)
if item.path in self._lockedItems:
result = self._findEffectedRowIntervals(item)
result.reverse()
self.emit(QtCore.SIGNAL("layoutAboutToBeChanged()"))
self._lockedItems.remove(item.path)
for path, length in result:
if not path is None:
index = self.indexFromPath(path)
currentRowCount = self.rowCount(index)
if length > currentRowCount:
self.beginInsertRows(index, currentRowCount + 1, length)
self.endInsertRows()
elif length < currentRowCount:
self.beginRemoveRows(index, 0, currentRowCount - length - 1)
self.endRemoveRows()
self.emit(QtCore.SIGNAL("layoutChanged()"))
def clear(self):
""" Cleans up everything and clears the model indexes. """
self._repository.release()
self._repository = None
HistoryModel.clear(self)
self.reset()
self._actionHandler.clipboard.clear()
self._lockedItems = list()
def index(self, row, column, parent):
""" L{index<PyQt4.QtCore.QAbstractItemModel.index>} """
index = self._emptyQModelIndex
item = self.nodeFromIndex(parent)
if not item is None:
try:
if self._childrenPopulator.childrenPopulated(item):
child = item.getChildren()[row]
if not child.isCreated or item.path in self._lockedItems:
if child.isCollection:
child = self._placeHolderCollection
else:
child = self._placeHolderLeaf
index = self.createIndex(row, column, child)
elif row == 0:
index = self.createIndex(row, column, self._placeHolderCollection)
except IndexError:
index = self._emptyQModelIndex
return index
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
""" L{headerData<PyQt4.QtCore.QAbstractItemModel.headerData>} """
if role == QtCore.Qt.DisplayRole and orientation == QtCore.Qt.Horizontal:
return QtCore.QVariant(self._headers[section])
elif role == QtCore.Qt.TextAlignmentRole:
return QtCore.QVariant(int(QtCore.Qt.AlignLeft))
return self._emptyQVariant
def data(self, index, role=0):
""" L{data<PyQt4.QtCore.QAbstractItemModel.data>} """
data = self._emptyQVariant
item = _Item(self.nodeFromIndex(index))
if role == 0: # QtCore.Qt.DisplayRole
data = self._determineDisplayRole(index.column(), item)
elif role == 1: # QtCore.Qt.DecorationRole
data = self._determineDecoratorRole(index.column(), item)
elif role == 3: # QtCore.Qt.ToolTipRole
data = self._determineToolTipRole(index.column(), item)
return data
def _determineDisplayRole(self, column, item):
""" Determines value of the different columns which has to be displayed. """
data = self._emptyQVariant
value = getattr(item, self._headerIds[column])
if not value is None:
if item.isLink and self._headerIds[column] == "name":
if sys.platform == "win32" and item.uri.startswith("file:///") and value.endswith(".lnk"):
value = value[:-4]
data = QtCore.QVariant(determineDisplayRepresentation(value, self._headerIds[column]))
return data
def _determineDecoratorRole(self, column, item):
""" Determines icon associated with the specific item. """
data = self._emptyQVariant
if column == 0:
if item.item != self._placeHolderCollection and item.item != self._placeHolderLeaf:
if item.isLink and not item.linkTargetPath is None:
linkTargetIndex = self.indexFromPath(item.linkTargetPath)
if linkTargetIndex == self._emptyQModelIndex:
item.item._linkTarget = None
else:
item.item._linkTarget = self.nodeFromIndex(linkTargetIndex)
data = QtCore.QVariant(self._iconProvider.iconForItem(item))
return data
def _determineToolTipRole(self, column, item):
""" Determines the tool tip displayed for the specific item. """
data = self._emptyQVariant
if column == 0:
if item.state in [ITEM_STATE_MIGRATED, ITEM_STATE_UNSUPPORTED_STORAGE_INTERFACE]:
data = QtCore.QVariant("Data is currently not accessible.")
elif item.state in [ITEM_STATE_ARCHIVED, ITEM_STATE_ARCHIVED_MEMBER, ITEM_STATE_ARCHIVED_READONLY]:
data = QtCore.QVariant("Data is archived.")
elif item.state in [ITEM_STATE_INACCESSIBLE]:
data = QtCore.QVariant("Data is managed by an external inaccessible storage system.")
elif item.isLink:
if item.linkTargetPath is None:
data = QtCore.QVariant("No link target information available.")
else:
data = QtCore.QVariant("Link Target: " + item.linkTargetPath)
else:
data = QtCore.QVariant(item.path)
else:
try:
property_ = item.properties[self._headerIds[column]]
except KeyError:
data = self._emptyQVariant
else:
data = QtCore.QVariant(determinePropertyDefinitionToolTip(property_.propertyDefinition))
return data
def hasChildren(self, index):
""" L{hasChildren<PyQt4.QtCore.QAbstractItemModel.hasChildren>} """
item = self.nodeFromIndex(index)
if item is None:
return False
else:
return item.isCollection
def columnCount(self, _):
""" L{columnCount<PyQt4.QtCore.QAbstractItemModel.columnCount>} """
return len(self._headers)
def rowCount(self, index):
""" L{rowCount<PyQt4.QtCore.QAbstractItemModel.rowCount>} """
rowCount = 0
item = self.nodeFromIndex(index)
if not item is None and item.isCollection:
if item.path in self._lockedItems:
rowCount = 1
elif self._childrenPopulator.childrenPopulated(item): # only when children are populated the real row count is calculated
rowCount = len(item.getChildren())
else:
rowCount = 1
return rowCount
def canFetchMore(self, index):
"""
@see: L{canFetchMore<PyQt4.QtCore.QAbstractItemModel.canFetchMore>}
@note: This method effectively populates child items.
The items are only populated when the parent item is expanded.
This behavior has been implemented to avoid expensive rowCount calls by Qt tree views
which are determining the row count of the expanded item and all its children.
"""
item = self.nodeFromIndex(index)
if not item is None:
if not self._childrenPopulator.childrenPopulated(item) \
and not self._childrenPopulator.isWorkingOn(item):
return True
return False
def fetchMore(self, index):
""" @see: L{fetchMore<PyQt4.QtCore.QAbstractItemModel.fetchMore>} """
item = self.nodeFromIndex(index)
if not item is None:
self._childrenPopulator.populateChildren(item)
def parent(self, index):
""" @see: L{parentIndex<PyQt4.QtCore.QAbstractItemModel.parentIndex>} """
parentIndex = self._emptyQModelIndex
item = self.nodeFromIndex(index)
try:
parentItem = item.parent
except (AttributeError, ItemError):
parentItem = None
if not parentItem is None:
if not parentItem.parent is None:
parentIndex = self._index(parentItem.path)
return parentIndex
def flags(self, index):
""" L{flags<PyQt4.QtCore.QAbstractItemModel.flags>} """
item = self.nodeFromIndex(index)
if item == self._placeHolderCollection or item == self._placeHolderLeaf:
return QtCore.Qt.NoItemFlags
flags = QtCore.Qt.ItemIsSelectable
flags |= QtCore.Qt.ItemIsEnabled
if index.column() == 0:
if not (item.name.endswith(":") and sys.platform == "win32"):
flags |= QtCore.Qt.ItemIsEditable
return flags
def sort(self, column, order=QtCore.Qt.AscendingOrder):
""" L{sort<PyQt4.QtCore.QAbstractItemModel.sort>} """
self._sortedColumn = column
self._sortedOrder = order
parent = self.nodeFromIndex(self.activeIndex)
if self._childrenPopulator.childrenPopulated(parent):
self.emit(QtCore.SIGNAL("layoutAboutToBeChanged()"))
self.sortItems(parent.getChildren(), column, order)
self.emit(QtCore.SIGNAL("layoutChanged()"))
else:
self._childrenPopulator.populateChildren(parent, callback=self._createSortCallback(parent, column, order))
def _createSortCallback(self, parent, column, order):
""" Creates a sort call back function. """
def _sortCallback():
""" Performs sorting. """
children = parent.getChildren()
self.emit(QtCore.SIGNAL("layoutAboutToBeChanged()"))
self.sortItems(children, column, order)
self.emit(QtCore.SIGNAL("layoutChanged()"))
self.activeIndex = self.activeIndex
return _sortCallback
def sortItems(self, items, column, order):
""" Sorts the given set of item. """
if self.initialized:
items.sort(reverse=order==QtCore.Qt.DescendingOrder,
cmp=self._createCompareItemProperties(column))
def _createCompareItemProperties(self, column):
""" Creates the comparison function for the given column. """
def _compareItemProperties(x, y):
""" Performs the comparison. """
propertyValueX = getattr(_Item(x), self._headerIds[column])
propertyValueY = getattr(_Item(y), self._headerIds[column])
if isinstance(propertyValueX, StringTypes) \
and isinstance(propertyValueY, StringTypes):
return cmp(propertyValueX.lower(), propertyValueY.lower())
else:
return cmp(propertyValueX, propertyValueY)
return _compareItemProperties
def nodeFromIndex(self, index):
"""
Returns the node under the given index.
@param index: The index of the node that has to be returned.
@type index: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@return: The item for the given index.
@rtype: L{BaseItem<datafinder.core.items.base.BaseItem>}
"""
item = index.internalPointer()
if item is None:
if self.initialized:
return self._repository.root
else:
return None
return item
def nodeFromPath(self, path):
"""
Returns the node under the given path.
@param path: The path of node that has to be returned.
@type path: C{unicode}
@return: The item of the given path.
@rtype: L{BaseItem<datafinder.core.items.base.BaseItem>}
"""
index = self._index(unicode(path))
return self.nodeFromIndex(index)
def indexFromPath(self, path, column=0):
"""
Returns the index for the given path.
When the path does not exist the root index is returned.
@param path: Path identifying the item.
@type path: C{unicode}
@return: Index referring to the item.
@rtype: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
return self._index(path, column)
def _index(self, path, column=0):
"""
Converts a given path in the associated C{QtCore.QModelIndex}.
@param path: Path that that has to be converted.
@type path: C{unicode}
@param column: Specifies the column of the returned index.
@type column: C{int}
@return: Associated index of the path.
@rtype: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
path = path.replace("\\", "/")
if not path.startswith("/"):
path = "/" + path
if path.endswith("/") and len(path) > 1:
path = path[:-1]
return self._find(path, self._emptyQModelIndex, column)
def _find(self, path, parentIndex, column=0):
"""
Traverse down the tree. Starts at the parent item.
The token parameter contains a list that represents a path.
When the path was correct the underlying C{QtCore.QModelIndex}
will returned else the default C{QtCore.QModelIndex}.
@param path: Absolute path of the item.
@type path: C{unicode}
@param parentIndex: Parent item which marks the search start.
@type parentIndex: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
@return: Returns the index for the given token string.
@rtype: L{QModelIndex<PyQt4.QtCore.QModelIndex>}
"""
index = self._emptyQModelIndex
if not path is None and path != "/":
parent = self.nodeFromIndex(parentIndex)
if not parent is None:
if not self._childrenPopulator.childrenPopulated(parent):
self._childrenPopulator.populateChildren(parent, True)
children = parent.getChildren()
for row, child in enumerate(children):
childIndex = self.createIndex(row, column, child)
if not child.path is None:
if path.lower() == child.path.lower():
return childIndex
if path[:len(child.path) + 1].lower() == child.path.lower() + "/":
return self._find(path, childIndex, column)
else:
print "Invalid child found '%s', '%s'." % (parent.path, child.name)
return index
@property
def initialized(self):
""" Initialized flag. """
initialized = False
if not self._repository is None:
initialized = True
return initialized
@property
def repository(self):
""" Returns the underlying repository instance. """
return self._repository
@property
def preferences(self):
""" Returns the global preferences instance. """
return self._preferences
@property
def iconProvider(self):
""" Return the icon provider of the repository. """
return self._iconProvider
def __getattr__(self, name):
""" Delegates to the action handler. """
if self.initialized:
return getattr(self._actionHandler, name)
class _ChildrenPopulator(object):
""" Helper class allowing synchronous and asynchronous retrieval of item children. """
def __init__(self, repositoryModel):
""" Constructor. """
self._repositoryModel = repositoryModel
self._workerThreads = dict()
self._logger = getDefaultLogger()
def childrenPopulated(self, item):
"""
Determines whether the children of the item are already retrieved.
@param item: Item whose children should be populated.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
@return: C{True} when the children are currently retrieved.
@rtype: C{bool}
"""
childrenPopulated = False
if item.childrenPopulated and not item.path in self._workerThreads:
childrenPopulated = True
return childrenPopulated
def isWorkingOn(self, item):
"""
Determines whether the children of the item are currently retrieved.
@param item: Item whose children should be populated.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
@return: C{True} when the children are currently retrieved.
@rtype: C{bool}
"""
return item.path in self._workerThreads
def populateChildren(self, item, synchronous=False, callback=None):
"""
Populates the children of the given item asynchronously.
@param item: Item whose children should be populated.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
@param synchronous: Flag determining whether the call is synchronously or not.
@type synchronous: C{bool}
@param callback: Call back function that is called when populating children is done asynchronously.
@type callback: C{function}
"""
if not self.childrenPopulated(item) and not self.isWorkingOn(item):
if synchronous:
self._workerThreads[item.path] = ""
try:
item.getChildren()
except ItemError, error:
self._logger.error(error.message)
callbacks = [self._createPopulateCallback(item), callback]
self._workerThreads[item.path] = startNewQtThread(item.getChildren, callbacks)
def _createPopulateCallback(self, item):
""" Create a call back function for the specific item. """
def _populateCallback():
"""
This is the call back function for the corresponding
thread querying the children of a specific item.
"""
if not item.path is None:
try:
numberOfItems = len(item.getChildren())
except ItemError:
numberOfItems = 0
index = self._repositoryModel.indexFromPath(item.path)
del self._workerThreads[item.path]
currentRowCount = self._repositoryModel.rowCount(index)
self._repositoryModel.emit(QtCore.SIGNAL("layoutAboutToBeChanged()"))
if numberOfItems > currentRowCount:
self._repositoryModel.beginInsertRows(index, currentRowCount + 1, numberOfItems)
self._repositoryModel.endInsertRows()
elif numberOfItems < currentRowCount:
self._repositoryModel.beginRemoveRows(index, 0, currentRowCount - numberOfItems - 1)
self._repositoryModel.endRemoveRows()
self._repositoryModel.emit(QtCore.SIGNAL("layoutChanged()"))
return _populateCallback
class _Item(object):
""" Helper class allowing access to item properties."""
def __init__(self, item):
"""
Constructor.
@param item: Item to wrap.
@type item: L{ItemBase<datafinder.core.item.base.ItemBase>}
"""
self._item = item
def __getattr__(self, name):
""" Overwrites the default attribute behavior. """
if hasattr(self._item, name):
return getattr(self._item, name)
else:
try:
prop = self._item.properties[name]
return prop.value
except (KeyError, TypeError, AttributeError):
return None
@property
def iconName(self):
""" Getter for the icon name. """
iconName = None
source = self._item
if self.isLink and not self.linkTarget is None:
source = self.linkTarget
if not source.dataType is None:
iconName = source.dataType.iconName
if not source.dataFormat is None:
iconName = source.dataFormat.iconName
return iconName
@property
def item(self):
""" Getter for the encapsulated item. """
return self._item
| bsd-3-clause | -2,833,630,415,100,707,300 | 38.888567 | 133 | 0.581369 | false |
dhylands/bioloid3 | tests/test_packet.py | 1 | 5344 | #!/usr/bin/env python3
# This file tests the packet parser
import unittest
import binascii
from bioloid.packet import Command, ErrorCode, Id, Packet
class TestId(unittest.TestCase):
def test_id(self):
id = Id(1)
self.assertEqual(id.get_dev_id(), 1)
self.assertEqual(repr(id), 'Id(0x01)')
self.assertEqual(str(id), '0x01')
id = Id(Id.BROADCAST)
self.assertEqual(id.get_dev_id(), 254)
self.assertEqual(repr(id), 'Id(0xfe)')
self.assertEqual(str(id), 'BROADCAST')
id = Id(Id.INVALID)
self.assertEqual(id.get_dev_id(), 255)
self.assertEqual(repr(id), 'Id(0xff)')
self.assertEqual(str(id), 'INVALID')
class TestComand(unittest.TestCase):
def test_command(self):
cmd = Command(Command.PING)
self.assertEqual('Command(0x01)', repr(cmd))
self.assertEqual('PING', str(cmd))
cmd = Command(0x10)
self.assertEqual('Command(0x10)', repr(cmd))
self.assertEqual('0x10', str(cmd))
self.assertEqual(Command.PING, Command.parse('PING'))
self.assertRaises(ValueError, Command.parse, 'xxx')
class TestErrorCode(unittest.TestCase):
def test_error_code(self):
err = ErrorCode(ErrorCode.RESERVED)
self.assertEqual('ErrorCode(0x80)', repr(err))
self.assertEqual('Reserved', str(err))
err = ErrorCode(ErrorCode.INSTRUCTION)
self.assertEqual('ErrorCode(0x40)', repr(err))
self.assertEqual('Instruction', str(err))
err = ErrorCode(ErrorCode.NONE)
self.assertEqual('ErrorCode(0x00)', repr(err))
self.assertEqual('None', str(err))
err = ErrorCode(ErrorCode.NOT_DONE)
self.assertEqual('ErrorCode(0x100)', repr(err))
self.assertEqual('NotDone', str(err))
err = ErrorCode(ErrorCode.TIMEOUT)
self.assertEqual('ErrorCode(0x101)', repr(err))
self.assertEqual('Timeout', str(err))
err = ErrorCode(ErrorCode.TOO_MUCH_DATA)
self.assertEqual('ErrorCode(0x102)', repr(err))
self.assertEqual('TooMuchData', str(err))
err = ErrorCode(0x7f)
self.assertEqual('ErrorCode(0x7f)', repr(err))
self.assertEqual('All', str(err))
err = ErrorCode(ErrorCode.OVERLOAD | ErrorCode.RANGE)
self.assertEqual('ErrorCode(0x28)', repr(err))
self.assertEqual('Range,Overload', str(err))
self.assertEqual(ErrorCode.NONE, ErrorCode.parse('none'))
self.assertEqual(0x7f, ErrorCode.parse('ALL'))
self.assertEqual(ErrorCode.CHECKSUM, ErrorCode.parse('CheckSum'))
self.assertRaises(ValueError, ErrorCode.parse, 'xxx')
class TestPacket(unittest.TestCase):
def parse_packet(self, data_str, expected_err=ErrorCode.NONE, status_packet=False):
data = binascii.unhexlify(data_str.replace(' ', ''))
pkt = Packet(status_packet=status_packet)
for i in range(len(data)):
byte = data[i]
err = pkt.process_byte(byte)
if i + 1 == len(data):
self.assertEqual(err, expected_err)
else:
self.assertEqual(err, ErrorCode.NOT_DONE)
return pkt
def test_cmd_bad_checksum(self):
self.parse_packet('ff ff fe 04 03 03 01 f5', ErrorCode.CHECKSUM)
def test_cmd_set_id(self):
pkt = self.parse_packet('ff ff fe 04 03 03 01 f6')
self.assertEqual(pkt.dev_id, 0xfe)
self.assertEqual(pkt.cmd, Command.WRITE)
self.assertEqual(pkt.param_len(), 2)
self.assertEqual(pkt.params(), bytearray([0x03, 0x01]))
self.assertEqual(0x03, pkt.param_byte(0))
self.assertEqual(0x01, pkt.param_byte(1))
def test_ping_cmd(self):
pkt = self.parse_packet('ff ff 01 02 01 fb')
self.assertEqual(pkt.dev_id, 0x01)
self.assertEqual(pkt.cmd, Command.PING)
self.assertEqual(pkt.param_len(), 0)
def test_ping_cmd_checksum(self):
pkt = self.parse_packet('ff ff 01 02 01 ff', expected_err=ErrorCode.CHECKSUM)
def test_ping_rsp(self):
pkt = self.parse_packet('ff ff 01 02 00 fc', status_packet=True)
self.assertEqual(pkt.dev_id, 0x01)
self.assertEqual(pkt.error_code(), ErrorCode.NONE)
self.assertEqual(pkt.param_len(), 0)
# Error code shouldn't be included in the chuecksum
def test_ping_error_rsp(self):
pkt = self.parse_packet('ff ff 01 02 04 fc', status_packet=True)
self.assertEqual(pkt.dev_id, 0x01)
self.assertEqual(pkt.error_code(), ErrorCode.OVERHEATING)
self.assertEqual(pkt.param_len(), 0)
self.assertEqual(pkt.error_code_str(), 'OverHeating')
def test_ping_cmd_noise(self):
pkt = self.parse_packet('00 ff ff 01 02 01 fb')
self.assertEqual(pkt.dev_id, 0x01)
self.assertEqual(pkt.cmd, Command.PING)
self.assertEqual(pkt.param_len(), 0)
pkt = self.parse_packet('00 ff 00 ff ff 01 02 01 fb')
self.assertEqual(pkt.dev_id, 0x01)
self.assertEqual(pkt.cmd, Command.PING)
self.assertEqual(pkt.param_len(), 0)
pkt = self.parse_packet('ff 00 ff ff ff 01 02 01 fb')
self.assertEqual(pkt.dev_id, 0x01)
self.assertEqual(pkt.cmd, Command.PING)
self.assertEqual(pkt.param_len(), 0)
if __name__ == '__main__':
unittest.main()
| mit | -5,369,088,837,575,138,000 | 34.626667 | 87 | 0.627246 | false |
annoviko/pyclustering | pyclustering/cluster/tests/unit/ut_somsc.py | 1 | 4723 | """!
@brief Unit-tests for SOM-SC algorithm.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest
# Generate images without having a window appear.
import matplotlib
matplotlib.use('Agg')
from pyclustering.cluster.somsc import somsc
from pyclustering.cluster.tests.somsc_templates import SyncnetTestTemplates
from pyclustering.samples.definitions import SIMPLE_SAMPLES
class SomscUnitTest(unittest.TestCase):
def testClusterAllocationSampleSimple1(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], False)
def testClusterOneAllocationSampleSimple1(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, [10], False)
def testClusterAllocationSampleSimple2(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, [10, 5, 8], False)
def testClusterOneAllocationSampleSimple2(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, [23], False)
def testClusterAllocationSampleSimple3(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [10, 10, 10, 30], False)
def testClusterOneAllocationSampleSimple3(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 1, [60], False)
def testClusterAllocationSampleSimple4(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 5, [15, 15, 15, 15, 15], False)
def testClusterOneAllocationSampleSimple4(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 1, [75], False)
def testClusterAllocationSampleSimple5(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 4, [15, 15, 15, 15], False)
def testClusterOneAllocationSampleSimple5(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 1, [60], False)
def testClusterOneDimensionSampleSimple7(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, [10, 10], False)
def testClusterOneDimensionSampleSimple8(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE8, 4, None, False)
def testWrongNumberOfCentersSimpleSample1(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 3, None, False)
def testWrongNumberOfCentersSimpleSample2(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 4, None, False)
def testClusterTheSameData1(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, 2, [10, 20], False)
def testClusterTheSameData2(self):
SyncnetTestTemplates().templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 3, [5, 5, 5], False)
def testClusterAllocationOneDimensionData(self):
SyncnetTestTemplates().templateClusterAllocationOneDimensionData(False)
def test_incorrect_data(self):
self.assertRaises(ValueError, somsc, [], 1, 1)
def test_incorrect_epouch(self):
self.assertRaises(ValueError, somsc, [[0], [1], [2]], 1, -1)
def test_incorrect_amount_clusters(self):
self.assertRaises(ValueError, somsc, [[0], [1], [2]], 0, 1)
def test_predict_one_point(self):
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[0.3, 0.2]], [0], False)
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[4.1, 1.1]], [1], False)
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[2.1, 1.9]], [2], False)
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[2.1, 4.1]], [3], False)
def test_predict_two_points(self):
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[0.3, 0.2], [2.1, 1.9]], [0, 2], False)
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, [[2.1, 4.1], [2.1, 1.9]], [3, 2], False)
def test_predict_four_points(self):
to_predict = [[0.3, 0.2], [4.1, 1.1], [2.1, 1.9], [2.1, 4.1]]
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, to_predict, [0, 1, 2, 3], False)
def test_predict_five_points(self):
to_predict = [[0.3, 0.2], [4.1, 1.1], [3.9, 1.1], [2.1, 1.9], [2.1, 4.1]]
SyncnetTestTemplates().predict(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4, to_predict, [0, 1, 1, 2, 3], False)
| gpl-3.0 | -1,462,007,474,119,726,600 | 43.854369 | 119 | 0.707178 | false |
ChrisFadden/PartyTowers | webrouter.py | 1 | 4597 | # This file runs the websockets.
import string, cgi, time
import sys
sys.path.insert(0, 'PyWebPlug')
from wsserver import *
from time import sleep
def setupMessages():
return
class Client:
def __init__(self, socket):
self.socket = socket
self.needsConfirmation = True
def handle(self):
if (self.socket):
try:
data = self.socket.readRaw()
except:
self.socket = None
if len(data) == 0:
return
print("Data:", data)
if self.needsConfirmation:
code = data[3:7]
if code == "0000":
print("Becoming a host!")
self.becomeHost()
else:
print("Trying to find host", code)
self.host = findHost(code)
if self.host:
print("Found host.")
self.confirm()
else:
print("No host found.")
else:
if self.host.socket:
try:
self.host.socket.send(data)
except:
self.host.socket = None
print("Host's socket is closed.")
# This is called to confirm to the client that they have been accepted,
# after they send us their details.
def confirm(self):
self.pID = self.host.getNextpID()
self.host.players[self.pID] = self
self.needsConfirmation = False
self.sID = extend(self.pID, 2)
self.socket.send("999" + self.sID)
self.host.socket.send("998" + self.sID)
def becomeHost(self):
host = Host(self.socket, newHostCode())
clients.remove(self)
hosts.append(host)
def disconnect(self):
print("Lost client...")
clients.remove(self)
self.socket = None
return
class Host:
def __init__(self, socket, hostCode):
self.socket = socket
self.hostCode = hostCode
self.players = {}
self.pID = 0
self.socket.send("999" + str(self.hostCode))
self.writingTo = 0
self.data = ""
def getNextpID(self):
self.pID += 1
return self.pID
def handle(self):
if (self.socket):
try:
self.data += self.socket.readRaw()
except:
self.socket = None
if len(self.data) == 0:
return
print("Host says: "+self.data)
ind = self.data.find("*")
if (ind < 0):
return
if self.writingTo == 0:
try:
self.writingTo = int(self.data[0:2])
except:
self.data = self.data[1:]
self.handle()
return;
pID = self.writingTo
if self.players[pID]:
if self.players[pID].socket:
try:
self.players[pID].socket.send(self.data[2:ind])
except:
self.players[pID].socket = None;
print("Client's socket closed.")
else:
print("Host", self.hostCode," tried to send a messaged to non-existant player", pID)
self.data = self.data[ind+2:]
self.writingTo = 0
def disconnect(self):
print("Lost host.")
hosts.remove(self)
self.socket = None
return
def findHost(code):
for host in hosts:
if host.hostCode == code:
return host
return None
def newHostCode():
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
code = ''.join(chars[int(random.random()*26)] for _ in range(4))
if findHost(code):
return newHostCode()
return code
def extend(v, l):
out = str(v)
while len(out) < l:
out = "0" + out
return out
# This handles a new client.
# We need to hand them to an object
# so that we can read and write from it
def handle(socket):
global clients
client = Client(socket)
clients.append(client)
def main():
global gameStarted
global stage
try:
setupMessages()
server = startServer()
while True:
newClient = handleNetwork()
if newClient:
handle(newClient)
for client in clients:
client.handle()
for host in hosts:
host.handle()
sleep(0.01)
except KeyboardInterrupt:
print(' received, closing server.')
server.close()
clients = []
hosts = []
pID = 0
if __name__ == '__main__':
main()
| mit | 5,511,224,919,330,852,000 | 24.971751 | 96 | 0.510115 | false |
BrainTech/openbci | obci/interfaces/hybrid/p300etr/signalAnalysis.py | 1 | 3428 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
Modul dedicated for signal analysis.
Author: Dawid Laszuk
Contact: [email protected]
"""
from scipy.signal import butter, buttord
from scipy.signal import filtfilt, lfilter
from scipy.signal import cheb2ord, cheby2
import numpy as np
import sys
class DataAnalysis(object):
def __init__(self, fs):
# Define constants
self.fs = float(fs)
self.initConst()
# Set filter parameters
self.set_lowPass_filter()
self.set_highPass_filter()
def initConst(self, avrM=1, conN=1, csp_time=[0,1]):
self.avrM = avrM
self.conN = conN
self.csp_time = csp_time
self.iInit, self.iFin = csp_time[0]*self.fs, csp_time[1]*self.fs
def prepareSignal(self, s, avrM=None):
"""
Prepare 1D signal for anylisis.
"""
if avrM == None: avrM = self.avrM
temp = s
#~ temp = (temp-temp.mean())/temp.std()
temp = self.filtrHigh(temp)
temp = self.movingAvr(temp, avrM+1)
#~ temp = self.filtrLow(temp)
#~ temp = self.movingAvr(temp, 10)
#~ temp = temp[self.iInit:self.iFin:avrM]
if self.avrM == self.fs:
pass
else:
temp = map(lambda i: temp[i], np.floor(np.linspace(self.csp_time[0], self.csp_time[1], self.avrM)*self.fs))
return np.array(temp)
def set_lowPass_filter(self, wp=20., ws=40., gpass=1., gstop=10.):
Nq = self.fs/2.
wp, ws = float(wp)/Nq, float(ws)/Nq
gpass, gstop = float(gpass), float(gstop)
N_filtr, Wn_filtr = buttord(wp, ws, gpass, gstop)
self.b_L, self.a_L = butter(N_filtr, Wn_filtr, btype='low')
self.N_L, self.Wn_L = N_filtr, Wn_filtr
def set_lowPass_filter_ds(self, fs_new):
Nq = self.fs/2.
N_filtr, Wn_filtr = 2., fs_new/Nq
self.b_L, self.a_L = butter(N_filtr, Wn_filtr, btype='low')
self.N_L, self.Wn_L = N_filtr, Wn_filtr
def set_highPass_filter(self, wp=5., ws=1., gpass=1., gstop=20.):
#~ wp, ws = 10., 5.
Nq = self.fs/2.
wp, ws = float(wp)/Nq, float(ws)/Nq
gpass, gstop = float(gpass), float(gstop)
#~ N_filtr, Wn_filtr = buttord(wp, ws, gpass, gstop)
N_filtr, Wn_filtr = 2, 1.5/Nq
self.b_H, self.a_H = butter(N_filtr, Wn_filtr, btype='high')
self.N_H, self.Wn_H = N_filtr, Wn_filtr
def set_bandPass_filter(self):
pass
def printInfo(self):
Nq = self.fs*0.5
print "{0}".format(sys.argv[0])
print "Low pass filter: "
print "(N, Wn*Nq) = ( {}, {})".format(self.N_L, self.Wn_L*Nq)
print "High pass filter: "
print "(N, Wn*Nq) = ( {}, {})".format(self.N_H, self.Wn_H*Nq)
def filtrLow(self, s):
return filtfilt(self.b_L, self.a_L, s)
def filtrHigh(self, s):
return filtfilt(self.b_H, self.a_H, s)
def movingAvr(self, s, r):
L, r = len(s), int(r)
temp = np.zeros(L+r)
temp[:r] = s[0]
temp[r:] = s
for i in range(1,r):
s += temp[r-i:L+r-i]
s = np.array(s)
return s/float(r)
if __name__ == "__main__":
sp = DataAnalysis(128.)
sp.printInfo()
p = range(2,40)
print sp.movingAvr(p, 4)
| gpl-3.0 | -3,986,371,602,798,707,700 | 27.566667 | 119 | 0.527421 | false |
skosukhin/spack | var/spack/repos/builtin/packages/dia/package.py | 1 | 2553 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Dia(Package):
"""Dia is a program for drawing structured diagrams."""
homepage = 'https://wiki.gnome.org/Apps/Dia'
url = 'https://ftp.gnome.org/pub/gnome/sources/dia/0.97/dia-0.97.3.tar.xz'
version('0.97.3', '0e744a0f6a6c4cb6a089e4d955392c3c')
depends_on('intltool', type='build')
depends_on('[email protected]:+X')
depends_on('libxslt')
depends_on('python')
depends_on('swig')
depends_on('libsm')
depends_on('libuuid')
depends_on('libxinerama')
depends_on('libxrender')
depends_on('libxml2')
depends_on('freetype')
# TODO: Optional dependencies, not yet supported by Spack
# depends_on('libart')
# depends_on('py-pygtk', type=('build', 'run'))
def url_for_version(self, version):
"""Handle Dia's version-based custom URLs."""
return 'https://ftp.gnome.org/pub/gnome/sources/dia/%s/dia-%s.tar.xz' % (version.up_to(2), version)
def install(self, spec, prefix):
# configure, build, install:
options = ['--prefix=%s' % prefix,
'--with-cairo',
'--with-xslt-prefix=%s' % spec['libxslt'].prefix,
'--with-python',
'--with-swig']
configure(*options)
make()
make('install')
| lgpl-2.1 | 524,486,476,142,904,800 | 37.681818 | 107 | 0.62358 | false |
iansmcf/busybees | busybees/hive.py | 1 | 3541 | import threading
import logging
import operator
import locked_queue
import queen_dir
import queen
import worker
# imports for high-level scheduling
import random
import job_list
# configures debug logging?
logging.basicConfig(filename='/tmp/hive.log', level=logging.DEBUG,
format='%(asctime)s (%(threadName)-2s) %(message)s')
class Hive(object):
# creates the 'global' queen directory using QueenDir
def __init__(self):
self.queens=queen_dir.QueenDir()
# creates a new queen object
def create_queen(self, name, default_worker=worker.Worker):
logging.debug("Creating a queen with name \'%s\'..." %name)
lock = locked_queue.LockedQueue()
sched = job_list.JobList()
cq = queen.Queen(name, lock, sched, default_worker)
c = threading.Thread(name="%s:queen"%name,target=cq.main)
self.queens.add_queen(name, c, lock, sched)
# kill gracefully, sending a special "die"-marked queue
def kill_queen(self,name):
# code will
if self.queens.get_queen(name).isAlive():
if self.queens.get_stat(name) != "ded":
logging.debug("Waiting for the lock in order to kill %s..."%name)
self.queens.get_lock(name).acquire()
logging.debug("Sending death command to %s..."%name)
self.queens.get_lock(name).append('die')
self.queens.set_stat(name, "ded")
self.queens.get_lock(name).cond.notify()
self.queens.get_lock(name).release()
else:
logging.debug("Kill command for queen \'%s\' failed: already marked for death." %name)
else:
logging.debug("ERROR: I tried to kill a queen (%s) who was unstarted!"%name)
# starts the queen using the QueenDir, but only if the queen is
# not alive, else prints an error
def start_queen(self, name):
if not self.queens.get_queen(name).isAlive():
logging.debug("Starting the queen with name \'%s\'..."%name)
self.queens.get_queen(name).start()
else:
logging.debug("ERROR: I tried to start a queen (%s) who was already alive!"%name)
# Appends some instructions to the locked queue as a tuple
# of the instruction value(s) and the worker class to use.
# The format of the instructions will differ depending
# on the particular implementation of the queen.
# All instructions provided will use the worker type specified.
# Accepts lists gracefully as individual commands (probably).
def instruct_queen(self, name, instructions, worker_type='default'):
self.queens.get_lock(name).acquire()
if type(instructions) == list:
for i in instructions:
self.queens.get_lock(name).append((i,worker_type))
else:
self.queens.get_lock(name).append((instructions,worker_type))
self.queens.get_lock(name).cond.notify()
self.queens.get_lock(name).release()
# die gracefully, returns a dict of results also
def die(self):
logging.debug("Trying to die...")
for queen in self.queens.enum_queens():
self.kill_queen(queen)
for queen in self.queens.enum_queens():
self.queens.get_queen(queen).join()
if not self.queens.get_queen(queen).isAlive():
logging.debug("Joined: %s" % queen)
else:
assert not self.queens.get_queen(queen).isAlive(), "Joined the queen \'%s\', but the thread is still alive." % queen
results = {}
for queen in self.queens.enum_queens():
results[queen] = self.get_result(queen)
logging.debug("RIP everyone")
return results
# log results from a queen
def get_result(self, name):
self.queens.set_result(name, self.queens.get_lock(name).access())
return self.queens.get_result(name)
# return list of queens?
def get_queens(self):
return self.queens.enum_queens()
| bsd-3-clause | -6,159,453,586,008,030,000 | 32.72381 | 120 | 0.709122 | false |
madre/analytics_nvd3 | django_nvd3/templatetags/nvd3_tags.py | 1 | 4675 | from django.template.defaultfilters import register
from django.utils.safestring import mark_safe
from django.conf import settings
from nvd3.NVD3Chart import NVD3Chart
from nvd3 import lineWithFocusChart, lineChart, \
multiBarChart, pieChart, stackedAreaChart, \
multiBarHorizontalChart, linePlusBarChart, \
cumulativeLineChart, discreteBarChart, scatterChart
@register.simple_tag
def load_chart(chart_type, series, container, kw_extra={}, *args, **kwargs):
"""Loads the Chart objects in the container.
**usage**:
{% load_chart "lineWithFocusChart" data_set "div_lineWithFocusChart" %}
**Arguments**:
* ``chart_type`` - Give chart type name eg. lineWithFocusChart/pieChart
* ``series`` - Data set which are going to be plotted in chart.
* ``container`` - Chart holder in html page.
**kw_extra settings**::
* ``x_is_date`` - if enabled the x-axis will be display as date format
* ``x_axis_format`` - set the x-axis date format, ie. "%d %b %Y"
* ``tag_script_js`` - if enabled it will add the javascript tag '<script>'
* ``jquery_on_ready`` - if enabled it will load the javascript only when page is loaded
this will use jquery library, so make sure to add jquery to the template.
* ``color_category`` - Define color category (eg. category10, category20, category20c)
* ``chart_attr`` - Custom chart attributes
"""
if not chart_type:
return False
if 'x_is_date' not in kw_extra:
kw_extra['x_is_date'] = False
if 'x_axis_format' not in kw_extra:
kw_extra['x_axis_format'] = "%d %b %Y"
if 'color_category' not in kw_extra:
kw_extra['color_category'] = "category20"
if 'tag_script_js' not in kw_extra:
kw_extra['tag_script_js'] = True
if 'chart_attr' not in kw_extra:
kw_extra['chart_attr'] = {}
# set the container name
kw_extra['name'] = str(container)
# Build chart
chart = eval(chart_type)(**kw_extra)
xdata = series['x']
y_axis_list = [k for k in series.keys() if k.startswith('y')]
if len(y_axis_list) > 1:
# Ensure numeric sorting
y_axis_list = sorted(y_axis_list, key=lambda x: int(x[1:]))
for key in y_axis_list:
ydata = series[key]
axis_no = key.split('y')[1]
name = series['name' + axis_no] if series.get('name' + axis_no) else None
extra = series['extra' + axis_no] if series.get('extra' + axis_no) else {}
kwargs = series['kwargs' + axis_no] if series.get('kwargs' + axis_no) else {}
chart.add_serie(name=name, y=ydata, x=xdata, extra=extra, **kwargs)
chart.display_container = False
chart.buildcontent()
html_string = chart.htmlcontent + '\n'
return mark_safe(html_string)
@register.simple_tag
def include_container(include_container, height=400, width=600):
"""
Include the html for the chart container and css for nvd3
This will include something similar as :
<div id="containername"><svg style="height:400px;width:600px;"></svg></div>
**usage**:
{% include_container "lineWithFocusChart" 400 400 %}
**Arguments**:
* ``include_container`` - container_name
* ``height`` - Chart height
* ``width`` - Chart width
"""
chart = NVD3Chart()
chart.name = str(include_container)
chart.set_graph_height(height)
chart.set_graph_width(width)
chart.buildcontainer()
return mark_safe(chart.container + '\n')
@register.simple_tag
def include_chart_jscss(static_dir=''):
"""
Include the html for the chart container and css for nvd3
This will include something similar as :
<link media="all" href="/static/nvd3/src/nv.d3.css" type="text/css" rel="stylesheet" />
<script src="/static/d3/d3.min.js" type="text/javascript"></script>
<script src="/static/nvd3/nv.d3.min.js" type="text/javascript"></script>
**usage**:
{% include_chart_jscss 'newfies' %}
**Arguments**:
* ``static_dir`` -
"""
if static_dir:
static_dir += '/'
chart = NVD3Chart()
chart.header_css = [
'<link media="all" href="%s" type="text/css" rel="stylesheet" />\n' % h for h in
(
"http://cdn.bootcss.com/nvd3/1.1.13-beta/nv.d3.min.css",
)
]
chart.header_js = [
'<script src="%s" type="text/javascript"></script>\n' % h for h in
(
"http://cdn.bootcss.com/d3/3.3.8/d3.min.js",
"http://cdn.bootcss.com/nvd3/1.1.13-beta/nv.d3.min.js"
)
]
chart.buildhtmlheader()
return mark_safe(chart.htmlheader + '\n')
| apache-2.0 | -5,722,957,427,420,694,000 | 32.633094 | 95 | 0.614118 | false |
Felix5721/voc | tests/structures/test_assignment.py | 1 | 2314 | from ..utils import TranspileTestCase
class AssignmentTests(TranspileTestCase):
def test_simple_assignment(self):
self.assertCodeExecution("""
x = 42
print(x)
print('Done.')
""")
def test_multiple_assignment(self):
self.assertCodeExecution("""
x = y = 42
print(x, y)
print('Done.')
""")
def test_old_style_conditional_assignment(self):
self.assertCodeExecution("""
x = 42
y = x or 37
print(y)
x = 0
y = x or 37
print(y)
print('Done.')
""")
def test_conditional_assignment(self):
self.assertCodeExecution("""
x = 42
y = 99 if x else 37
print(y)
x = 0
y = 99 if x else 37
print(y)
print('Done.')
""")
def test_access_potentially_unassigned(self):
self.assertCodeExecution("""
x = 37
if x > 0:
y = 42
print(y)
print('Done.')
""")
def test_use_potentially_unassigned(self):
self.assertCodeExecution("""
x = 37
if y > 0:
print("Yes")
else:
print("No")
print('Done.')
""")
def test_assign_to_argument(self):
self.assertCodeExecution("""
def foo(arg):
val = arg + 10
arg = val - 2
return arg
print(foo(20))
print('Done.')
""")
def test_list_assignment(self):
self.assertCodeExecution("""
[x, y, z] = range(3)
print(x)
print(y)
print(z)
print('Done.')
""")
def test_tuple_assignment(self):
self.assertCodeExecution("""
(x, y, z) = range(3)
print(x)
print(y)
print(z)
print('Done.')
""")
def test_implied_tuple_assignment(self):
self.assertCodeExecution("""
x, y, z = range(3)
print(x)
print(y)
print(z)
print('Done.')
""")
| bsd-3-clause | -3,714,225,759,740,599,300 | 23.357895 | 52 | 0.41357 | false |
kgilmo/penning_artiq | examples/master/repository/photon_histogram.py | 1 | 1824 | from artiq import *
class PhotonHistogram(EnvExperiment):
"""Photon histogram"""
def build(self):
self.setattr_device("core")
self.setattr_device("dds_bus")
self.setattr_device("bd_dds")
self.setattr_device("bd_sw")
self.setattr_device("bdd_dds")
self.setattr_device("bdd_sw")
self.setattr_device("pmt")
self.setattr_argument("nbins", FreeValue(100))
self.setattr_argument("repeats", FreeValue(100))
self.setattr_dataset("cool_f", 230*MHz)
self.setattr_dataset("detect_f", 220*MHz)
self.setattr_dataset("detect_t", 100*us)
@kernel
def program_cooling(self):
with self.dds_bus.batch:
self.bd_dds.set(200*MHz)
self.bdd_dds.set(300*MHz)
@kernel
def cool_detect(self):
with parallel:
self.bd_sw.pulse(1*ms)
self.bdd_sw.pulse(1*ms)
self.bd_dds.set(self.cool_f)
self.bd_sw.pulse(100*us)
self.bd_dds.set(self.detect_f)
with parallel:
self.bd_sw.pulse(self.detect_t)
self.pmt.gate_rising(self.detect_t)
self.program_cooling()
self.bd_sw.on()
self.bdd_sw.on()
return self.pmt.count()
@kernel
def run(self):
self.program_cooling()
hist = [0 for _ in range(self.nbins)]
total = 0
for i in range(self.repeats):
n = self.cool_detect()
if n >= self.nbins:
n = self.nbins - 1
hist[n] += 1
total += n
self.set_dataset("cooling_photon_histogram", hist)
self.set_dataset("ion_present", total > 5*self.repeats,
broadcast=True)
if __name__ == "__main__":
from artiq.frontend.artiq_run import run
run()
| gpl-3.0 | 5,865,015,544,503,428,000 | 25.057143 | 63 | 0.549342 | false |
hansroh/skitai | tools/benchmark/skitaiapp.py | 1 | 1808 | #!/usr/bin/python
from atila import Atila
import shutil, os
app =Alita (__name__)
app.debug = True
app.use_reloader = True
app.securekey = "iodfjksdfkjsdhkfjsd0987987sdf"
app.realm = "Skitai API"
app.user = "app"
app.password = "1111"
app.authorization = "digest"
MULTIPART = """
<form action = "/" enctype="multipart/form-data" method="post">
<input type="hidden" name="submit-hidden" value="Genious">
<p></p>What is your name? <input type="text" name="submit-name" value="Hans Roh"></p>
<p></p>What files are you sending? <br />
<input type="file" name="file1"><br />
<input type="file" name="file2">
</p>
<input type="submit" value="Send">
<input type="reset">
</form>
"""
FORMDATA = """
<form action = "/" method="post">
<input type="hidden" name="submit-hidden" value="Genious">
<p></p>What is your name? <input type="text" name="submit-name" value="Hans Roh"></p>
<input type="submit" value="Send">
<input type="reset">
</form>
"""
import skitaipackage
app.mount ("/", skitaipackage, "package")
@app.route ("/fancy/<int:cid>/<cname>")
def fancy (was, cid, cname, zipcode):
return [
"%s - %s (%s)" % (cid, cname, zipcode),
"<hr />",
was.ab ("fancy", 200, "Skitai Inc", "31052")
]
@app.route('/')
def hello_world (was, **form):
if was.request.command == 'post':
file = form.get ("file1")
if file:
file.save ("d:\\var\\upload", dup = "o")
return str (form)
return [was.ab ("fancy", 200, "Skitai Inc", "31052"), FORMDATA, "<hr />", MULTIPART]
@app.route('/indians')
def hello_world (was, num = 8):
if was.request.command == 'get':
was.response ["Content-Type"] = "text/xml"
return was.toxml ((num,))
else:
return num
@app.route('/ping')
def ping (was, **form):
return "pong"
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5002)
| mit | 3,297,317,775,655,858,700 | 23.767123 | 87 | 0.621128 | false |
kristianeschenburg/parcellearning | parcellearning/cgat/cgat.py | 1 | 5303 | from parcellearning.conv.cgatconv import CGATConv
import numpy as np
import dgl
from dgl import data
from dgl.data import DGLDataset
import dgl.function as fn
from dgl.nn.pytorch import edge_softmax
import torch
import torch.nn as nn
import torch.nn.functional as F
class CGAT(nn.Module):
"""
Instantiate a Graph Attention Network model.
Parameters:
- - - - -
in_dim: int
input feature dimension
num_classes: int
number of output classes
num_heads: list of length (2)
number of independent attention heads
num_heads[0] = hidden heads
num_heads[1] = output heads
num_hidden: int
number of nodes per hidden layer
num_layers: int
number of layers in network
feat_drop: float
layer-wise feature dropout rate [0,1]
graph_margin: float
slack variable controlling margin of graph-structure loss
class_margin: float
slack variable controlling margin of class-boundary loss
top_k: int
number of adjacent nodes to aggregate over in message passing step
activation: torch nn functional
activation function to apply after each layer
negative_slope:
negative slope of leaky ReLU
residual:
use residual connection
"""
def __init__(self,
in_dim,
num_classes,
num_heads,
num_hidden,
num_layers,
feat_drop,
graph_margin,
class_margin,
top_k,
activation=F.leaky_relu,
negative_slope=0.2,
residual=False,
allow_zero_in_degree=True):
super(CGAT, self).__init__()
self.num_layers = num_layers
self.num_heads = num_heads[0]
self.num_out_heads = num_heads[-1]
self.cgat_layers = nn.ModuleList()
self.activation = activation
# input projection (no residual)
self.cgat_layers.append(CGATConv(in_dim,
num_hidden,
self.num_heads,
feat_drop=feat_drop,
graph_margin=graph_margin,
class_margin=class_margin,
top_k=top_k,
negative_slope=0.2,
residual=False,
activation=activation,
allow_zero_in_degree=allow_zero_in_degree))
# hidden layers
for l in range(1, num_layers):
# due to multi-head, the in_dim = num_hidden * num_heads
self.cgat_layers.append(CGATConv(num_hidden*self.num_heads,
num_hidden,
self.num_heads,
feat_drop=feat_drop,
graph_margin=graph_margin,
class_margin=class_margin,
top_k=top_k,
negative_slope=0.2,
residual=False,
activation=activation,
allow_zero_in_degree=allow_zero_in_degree))
# output projection
self.cgat_layers.append(CGATConv(num_hidden*self.num_heads,
num_classes,
self.num_out_heads,
feat_drop=feat_drop,
graph_margin=graph_margin,
class_margin=class_margin,
top_k=top_k,
negative_slope=0.2,
residual=False,
activation=activation,
allow_zero_in_degree=allow_zero_in_degree))
def forward(self, g=None, inputs=None, label=None, **kwds):
"""
Parameters:
- - - - -
g: DGL Graph
the graph
inputs: tensor
node features
Returns:
- - - - -
logits: tensor
output layer
"""
h = inputs
Lg = 0
Lb = 0
for l in range(self.num_layers):
h = self.cgat_layers[l](g, h, label)
Lg += self.cgat_layers[l].Lg
Lb += self.cgat_layers[l].Lb
h = h.flatten(1)
# output projection
logits = self.cgat_layers[-1](g,h,label)
logits = logits.mean(1)
Lg += self.cgat_layers[-1].Lg
Lb += self.cgat_layers[-1].Lb
self.Lg = Lg
self.Lb = Lb
return logits
def save(self, filename):
"""
"""
torch.save(self.state_dict(), filename)
| mit | 6,641,609,600,299,827,000 | 32.77707 | 88 | 0.441448 | false |
GreenJoey/My-Simple-Programs | python/scrapy/BorutoScrapper/BorutoScrapper/middlewares.py | 1 | 1912 | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class BorutoscrapperSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| gpl-2.0 | 7,572,180,509,972,606,000 | 33.107143 | 78 | 0.664921 | false |
zarmstrong/raspberrypi | buttondoubletime.py | 1 | 4470 | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
import thread
SDI = 11
RCLK = 12
SRCLK = 13
SDIB = 15
RCLKB = 16
SRCLKB = 18
BTN = 22
segCode = [0x3f,0x06,0x5b,0x4f,0x66,0x6d,0x7d,0x07,0x7f,0x6f,0x77,0x7c,0x39,0x5e,0x79,0x71,0x40,0x80]
#code0=0x3f
#code1=0x06
#code2=0x5b
#code3=0x4f
#code4=0x66
#code5=0x6d
#code6=0x7d
#code7=0x07
#code8=0x7f
#code9=0x6f
#codeA=0x77
#codeB=0x7c
#codeC=0x39
#codeD=0x5e
#codeE=0x79
#codeF=0x71
codeDash=0x40
#codeDot=0x80
timerStarted = False
class InterruptExecution (Exception):
pass
def print_msg():
print 'Program is running...'
print 'Please press Ctrl+C to end the program...'
def setup():
GPIO.setmode(GPIO.BOARD) #Number GPIOs by its physical location
GPIO.setup(BTN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(SDI, GPIO.OUT)
GPIO.setup(RCLK, GPIO.OUT)
GPIO.setup(SRCLK, GPIO.OUT)
GPIO.output(SDI, GPIO.LOW)
GPIO.output(RCLK, GPIO.LOW)
GPIO.output(SRCLK, GPIO.LOW)
GPIO.setup(SDIB, GPIO.OUT)
GPIO.setup(RCLKB, GPIO.OUT)
GPIO.setup(SRCLKB, GPIO.OUT)
GPIO.output(SDIB, GPIO.LOW)
GPIO.output(RCLKB, GPIO.LOW)
GPIO.output(SRCLKB, GPIO.LOW)
def my_callback(channel):
if GPIO.input(BTN):
doButton()
def doButton():
global timerStarted
print "Timer status on button press: " + str(timerStarted)
if timerStarted == True:
timerStarted = False
hc595_shiftTens(codeDash)
hc595_shiftOnes(codeDash)
else:
timerStarted = True
thread.start_new_thread( countDown, ("counterdown",1,) )
print "Timer status post button press: " + str(timerStarted)
def hc595_shiftTens(dat):
for bit in xrange(0, 8):
GPIO.output(SDI, 0x80 & (dat << bit))
GPIO.output(SRCLK, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(SRCLK, GPIO.LOW)
GPIO.output(RCLK, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(RCLK, GPIO.LOW)
def hc595_shiftOnes(dat):
for bit in xrange(0, 8):
GPIO.output(SDIB, 0x80 & (dat << bit))
GPIO.output(SRCLKB, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(SRCLKB, GPIO.LOW)
GPIO.output(RCLKB, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(RCLKB, GPIO.LOW)
def countDown(threadName, delay):
global timerStarted
while timerStarted:
try:
for i in xrange(12,0,-1):
print "i = " + str(i)
if i > 9:
firstNum=str(i)[0]
print "i > 9"
secondNum=str(i)[1]
else:
firstNum=0
secondNum=i
print "firstNum is " + str(firstNum)
print "secondNum is " + str(secondNum)
print "before tens"
hc595_shiftTens(segCode[int(firstNum)])
print "before ones"
hc595_shiftOnes(segCode[int(secondNum)])
time.sleep(1)
hc595_shiftTens(codeDash)
hc595_shiftOnes(codeDash)
timerStarted = False
except InterruptExecution:
print "Interrupted"
hc595_shiftTens(codeDash)
hc595_shiftOnes(codeDash)
def mainLoop():
while True:
pass
def destroy(): #When program ending, the function is executed.
GPIO.cleanup()
if __name__ == '__main__': #Program starting from here
print_msg()
setup()
try:
GPIO.add_event_detect(BTN, GPIO.FALLING, callback=my_callback)
hc595_shiftTens(codeDash)
hc595_shiftOnes(codeDash)
#timerStarted=1
#thread.start_new_thread( countDown, ("counterdown",1,) )
mainLoop()
except KeyboardInterrupt:
destroy()
| gpl-3.0 | -138,420,117,470,972,080 | 29.408163 | 101 | 0.501566 | false |
mafshar/lost-n-found-service | app/app/settings.py | 1 | 2840 | #Mohammad Afshar, @mafshar, [email protected]
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y!2j_m(5$8^b7f_!^yt3gt6c02qwn)h_063a&f&w40mc==_w!d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'lostnfound.apps.LostnfoundConfig',
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
#
# AUTHENTICATION_BACKENDS = (
# 'backend.email-auth.EmailBackend',
# 'django.contrib.auth.backends.ModelBackend',
# )
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
execfile(os.path.join(BASE_DIR, 'db', 'db_settings.py'))
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = ( os.path.join('static'), )
#Media
PROJ_DIR = BASE_DIR + "/lostnfound"
MEDIA_ROOT = PROJ_DIR + STATIC_URL + 'media/'
MEDIA_URL = MEDIA_ROOT
| apache-2.0 | -1,344,660,734,142,850,300 | 25.296296 | 72 | 0.689789 | false |
wjsl/jaredcumulo | test/system/scalability/run.py | 1 | 7561 | #!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import os
import sys
from ConfigParser import ConfigParser
from subprocess import Popen, PIPE
class JavaConfig:
'''Enable access to properities in java siteConfig file'''
def __init__(self, fname):
self.prop_d = {}
for line in open(fname):
line = line.strip();
if line.startswith('#') or len(line) == 0:
continue
pair = line.split('=')
if len(pair) != 2:
log.error("Invalid property (%s)" % line)
continue
self.prop_d[pair[0].strip()] = pair[1].strip()
def get(self, prop):
return self.prop_d[prop]
def file_len(fname):
i=0
for line in open(fname):
i += 1
return i
def runTest(testName, siteConfig, testDir, numNodes, fdata):
log('Stopping accumulo')
syscall('$ACCUMULO_HOME/bin/stop-all.sh')
log('Creating slaves file for this test')
slavesPath = siteConfig.get('SLAVES')
nodesPath = testDir+'/nodes/%d' % numNodes
syscall('head -n %d %s > %s' % (numNodes,slavesPath,nodesPath))
log('Copying slaves file to accumulo config')
syscall('cp '+nodesPath+' $ACCUMULO_HOME/conf/slaves');
log('Removing /accumulo directory in HDFS')
syscall("hadoop fs -rmr /accumulo")
log('Initializing new Accumulo instance')
instance = siteConfig.get('INSTANCE_NAME')
passwd = siteConfig.get('PASSWORD')
syscall('printf "%s\nY\n%s\n%s\n" | $ACCUMULO_HOME/bin/accumulo init' % (instance, passwd, passwd))
log('Starting new Accumulo instance')
syscall('$ACCUMULO_HOME/bin/start-all.sh')
sleepTime = 30
if numNodes > 120:
sleepTime = int(numNodes / 4)
log('Sleeping for %d seconds' % sleepTime)
time.sleep(sleepTime)
log('Setting up %s test' % testName)
syscall('$ACCUMULO_HOME/bin/accumulo org.apache.accumulo.test.scalability.Run %s setup %s' % (testName, numNodes))
log('Sleeping for 5 seconds')
time.sleep(5)
log('Starting %s clients' % testName)
numThreads = numNodes
if int(numNodes) > 128:
numThreads='128'
syscall('pssh -P -h %s -p %s "$ACCUMULO_HOME/bin/accumulo org.apache.accumulo.test.scalability.Run %s client %s >/tmp/scale.out 2>/tmp/scale.err &" < /dev/null' % (nodesPath, numThreads, testName, numNodes))
log('Sleeping for 30 sec before checking how many clients started...')
time.sleep(30)
output = Popen(["hadoop fs -ls /accumulo-scale/clients"], stdout=PIPE, shell=True).communicate()[0]
num_clients = int(output.split()[1])
log('%s clients started!' % num_clients)
log('Waiting until %d clients finish.' % num_clients)
last = 0
done = 0
while done < num_clients:
time.sleep(5)
output = Popen(["hadoop fs -ls /accumulo-scale/results"], stdout=PIPE, shell=True).communicate()[0]
if not output:
sys.stdout.write('.')
sys.stdout.flush()
continue
done = int(output.split()[1])
if done != last:
sys.stdout.write('.%s' % done)
else:
sys.stdout.write('.')
sys.stdout.flush()
last = done
sys.stdout.flush()
log('\nAll clients are finished!')
log('Copying results from HDFS')
resultsDir = "%s/results/%s" % (testDir, numNodes)
syscall('hadoop fs -copyToLocal /accumulo-scale/results %s' % resultsDir)
log('Calculating results from clients')
times = []
totalMs = 0L
totalEntries = 0L
totalBytes = 0L
for fn in os.listdir(resultsDir):
for line in open('%s/%s' % (resultsDir,fn)):
words = line.split()
if words[0] == 'ELAPSEDMS':
ms = long(words[1].strip())
totalMs += ms
times.append(ms)
totalEntries += long(words[2].strip())
totalBytes += long(words[3].strip())
times.sort()
print times
numClients = len(times)
min = times[0] / 1000
avg = (float(totalMs) / numClients) / 1000
median = times[int(numClients/2)] / 1000
max = times[numClients-1] / 1000
log('Tservs\tClients\tMin\tAvg\tMed\tMax\tEntries\tMB')
log('%d\t%d\t%d\t%d\t%d\t%d\t%dM\t%d' % (numNodes, numClients, min, avg, median, max, totalEntries / 1000000, totalBytes / 1000000))
fdata.write('%d\t%d\t%d\t%d\t%d\t%d\t%dM\t%d\n' % (numNodes, numClients, min, avg, median, max, totalEntries / 1000000, totalBytes / 1000000))
fdata.flush()
time.sleep(5)
log('Tearing down %s test' % testName)
syscall('$ACCUMULO_HOME/bin/accumulo org.apache.accumulo.test.scalability.Run %s teardown %s' % (testName, numNodes))
time.sleep(10)
def syscall(cmd):
log('> %s' % cmd)
os.system(cmd)
def run(cmd, **kwargs):
log.debug("Running %s", ' '.join(cmd))
handle = Popen(cmd, stdout=PIPE, **kwargs)
out, err = handle.communicate()
log.debug("Result %d (%r, %r)", handle.returncode, out, err)
return handle.returncode
def log(msg):
print msg
sys.stdout.flush()
def main():
if not os.getenv('ACCUMULO_HOME'):
raise 'ACCUMULO_HOME needs to be set!'
if not os.getenv('HADOOP_HOME'):
raise 'HADOOP_HOME needs to be set!'
if len(sys.argv) != 2:
log('Usage: run.py <testName>')
sys.exit()
testName = sys.argv[1]
logging.basicConfig(level=logging.DEBUG)
log('Creating test directory structure')
testDir = 'test-%d' % time.time()
nodesDir = testDir+'/nodes'
syscall('mkdir %s' % testDir)
syscall('mkdir %s' % nodesDir)
log('Removing current /accumulo-scale directory')
syscall('hadoop fs -rmr /accumulo-scale')
log('Creating new /accumulo-scale directory structure')
syscall('hadoop fs -mkdir /accumulo-scale')
syscall('hadoop fs -mkdir /accumulo-scale/clients')
syscall('hadoop fs -mkdir /accumulo-scale/results')
syscall('hadoop fs -chmod -R 777 /accumulo-scale')
log('Copying config to HDFS')
syscall('hadoop fs -copyFromLocal ./conf /accumulo-scale/conf')
siteConfig = JavaConfig('conf/site.conf');
slavesPath = siteConfig.get('SLAVES')
maxNodes = file_len(slavesPath)
fdata = open('%s/scale.dat' % testDir, 'w')
fdata.write('Tservs\tClients\tMin\tAvg\tMed\tMax\tEntries\tMB\n')
for numNodes in siteConfig.get('TEST_CASES').split(','):
log('Running %s test with %s nodes' % (testName, numNodes))
if int(numNodes) > maxNodes:
logging.error('Skipping %r test case as slaves file %r contains only %r nodes', numNodes, slavesPath, maxNodes)
continue
runTest(testName, siteConfig, testDir, int(numNodes), fdata)
sys.stdout.flush()
if __name__ == '__main__':
main()
| apache-2.0 | 965,068,868,683,369,700 | 33.368182 | 211 | 0.631795 | false |
aurelg/wp-tools | src/wp-tools/exposed/directory_to_gallery.py | 1 | 8817 | #!/bin/env python2
# -*- coding: utf-8 -*-
"""
Recursively import images from directories in Wordpress, as Exposed galleries.
"""
import MySQLdb
import os
import re
import shutil
from datetime import date
import time
import argparse
class Directory2Gallery(object):
""" main (and only) class """
parameters = None
cnx = None
cur = None
def _get_wp_parameters(self, wp_path):
""" Retrieve wordpress parameters """
with open("%s/wp-config.php" % wp_path) as wp_config_file:
wp_config = {x[0]: x[1] for x in
[line.split("'")[1::2]
for line in wp_config_file.readlines()
if line.startswith("define(")]
if len(x) > 1}
self.parameters = {'wp-path': wp_path,
'db_host': wp_config['DB_HOST'],
'db': wp_config['DB_NAME'],
'db_username': wp_config['DB_USER'],
'db_password': wp_config['DB_PASSWORD']}
def _connect_db(self):
""" connect to the database """
self.cnx = MySQLdb.connect(host=self.parameters['db_host'],
user=self.parameters['db_username'],
passwd=self.parameters['db_password'],
db=self.parameters['db'])
self.cur = self.cnx.cursor()
self.cur.execute("SELECT option_value FROM wp_options WHERE option_name LIKE 'siteurl';")
self.parameters['url'] = self.cur.fetchone()[0]
def _insert_post(self, data):
return self._insert_sqlinto('wp_posts', data)
def _insert_post_meta(self, data):
return self._insert_sqlinto('wp_postmeta', data)
def _insert_sqlinto(self, table, data):
data_sql = 'INSERT INTO %s SET %s;' % (table,
', '.join(["%s=%s" % (k, v)\
for k, v in data.iteritems()]))
self.cur.execute(data_sql)
self.cnx.commit()
return self.cur.lastrowid
def attach_image(self, srcdir, image):
""" Attach image """
today = date.today()
wp_upload = "%s/%s" % (today.year, today.month)
wp_image_path = "%s/wp-content/uploads/%s" % (self.parameters['wp-path'], wp_upload)
if not os.path.isdir(wp_image_path):
os.mkdir(wp_image_path)
def find_unique_name(wp_image_name):
""" Find a unique name by iteratively adding suffixes """
def is_name_unique(name):
""" Check if a given file name is unique """
return not os.path.isfile(name)
count = 0
while not is_name_unique("%s/%s" % (wp_image_path, wp_image_name)):
wp_image_name = "%s_%s.%s" % (wp_image_name[:wp_image_name.rindex('.')],
count,
wp_image_name[wp_image_name.rindex('.')+1:])
count += 1
return wp_image_name
wp_image_name = find_unique_name(image)
shutil.copyfile("%s/%s" % (srcdir, image), "%s/%s" % (wp_image_path, wp_image_name))
mysql_date = time.strftime('"%Y-%m-%d %H:%M:%S"')
guid = '%s/wp-content/uploads/%s/%s' % (self.parameters['url'], wp_upload, wp_image_name)
image_data = {'post_author': 0,
'post_date': mysql_date,
'post_date_gmt': mysql_date,
'post_status': '"inherit"',
'comment_status': '"open"',
'ping_status': '"closed"',
'post_name': '"%s"' % wp_image_name,
'post_modified': mysql_date,
'post_parent': 0,
'guid': '"%s"' % guid,
'menu_order': 0,
'post_type': '"attachment"',
'post_mime_type': '"image/jpeg"',
'comment_count': 0,
'post_content': '""',
'post_title': '"%s"' % wp_image_name,
'post_excerpt': '""',
'to_ping': '""',
'pinged': '""',
'post_content_filtered': '""'}
post_id = self._insert_post(image_data)
image_meta = {'post_id': post_id,
'meta_key': '"_wp_attached_file"',
'meta_value': '"%s/%s"'%(wp_upload, wp_image_name)}
self._insert_post_meta(image_meta)
return post_id
def create_gallery(self, title, image_ids):
""" Create the Exposed gallery """
mysql_date = time.strftime('"%Y-%m-%d %H:%M:%S"')
gallery_data = {'post_author': 1,
'post_date': mysql_date,
'post_date_gmt': mysql_date,
'post_status': '"publish"',
'comment_status': '"closed"',
'ping_status': '"closed"',
'post_name': '"%s"' % title,
'post_modified': mysql_date,
'post_parent': 0,
'guid': '""',
'menu_order': 0,
'post_type': '"gallery"',
'comment_count': 0,
'post_mime_type': '""',
'post_content': '""',
'post_title': '"%s"' % title,
'post_excerpt': '""',
'to_ping': '""',
'pinged': '""',
'post_content_filtered': '""'}
post_id = self._insert_post(gallery_data)
# insert guid
guid = '%s/?post_type=gallery&p=%s' % (self.parameters['url'], post_id)
sql = "UPDATE wp_posts SET guid='%s' WHERE ID = %s" % (guid, post_id)
self.cur.execute(sql)
self.cnx.commit()
# Generate gallery meta, ugly but it works :-)
def gen_metadata(image_ids):
""" Generate the strange metadata describing Exposed galleries. """
prefix = 'a:2:{s:4:"meta";a:%s:{' % len(image_ids)
array = []
for i in range(len(image_ids)):
array.append('i:%s;a:3:{s:5:"title";s:0:"";s:7:"caption";s:0:"";s:3:"url";s:0:"";}' % i)
first = ''.join(array)
middle = '}s:6:"images";a:%s:{' % len(image_ids)
array = []
for i in range(len(image_ids)):
array.append('i:%s;s:%s:"%s";' % (i, len(str(image_ids[i])), image_ids[i]))
second = ''.join(array)
suffix = '}}'
to_return = "%s%s%s%s%s" % (prefix, first, middle, second, suffix)
return to_return
self._insert_post_meta({'post_id': post_id,
'meta_key': '"_st_gallery"',
'meta_value': "'%s'" % gen_metadata(image_ids)})
return post_id
def __init__(self, wp_directory):
self._get_wp_parameters(wp_directory)
self._connect_db()
def add_galleries_from(self, directory):
""" Recursively add directories as galleries """
all_files = os.listdir(directory)
images = [f for f in all_files if re.match(r'[^_].*\.jpg$', f)]
image_ids = [self.attach_image(directory, i) for i in images]
if len(image_ids) > 0:
post_id = self.create_gallery(os.path.basename(directory), image_ids)
print "Directory %s (%s files) -> gallery (post %s), contains %s images (from posts %s)" % \
(os.path.basename(directory), len(all_files), post_id, len(image_ids), \
', '.join(str(l) for l in image_ids))
else:
print "No image imported from %s" % directory
[self.add_galleries_from("%s/%s" % (directory, d)) for d in os.listdir(directory)
if os.path.isdir("%s/%s" % (directory, d))]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=\
"Recursively import images from directories in Wordpress, as Exposed galleryies.")
parser.add_argument('--wordpress-directory', '-w',
dest='wordpress_directory',
required=True,
help="Path to the wordpress installation (where is wp-config.php).")
parser.add_argument('--directory', '-d',
dest='directory',
required=True,
help="Path to the local directory to import galleries from.")
args = parser.parse_args()
Directory2Gallery(args.wordpress_directory).add_galleries_from(args.directory)
| gpl-2.0 | 3,624,760,288,714,065,400 | 44.921875 | 104 | 0.471135 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.