repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
zas/picard | picard/webservice/api_helpers.py | 3 | 11862 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018, 2020 Laurent Monin
# Copyright (C) 2018-2021 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from PyQt5.QtCore import QUrl
from picard import PICARD_VERSION_STR
from picard.config import get_config
from picard.const import (
ACOUSTID_HOST,
ACOUSTID_KEY,
ACOUSTID_PORT,
CAA_HOST,
CAA_PORT,
)
from picard.webservice import (
CLIENT_STRING,
DEFAULT_RESPONSE_PARSER_TYPE,
ratecontrol,
)
ratecontrol.set_minimum_delay((ACOUSTID_HOST, ACOUSTID_PORT), 333)
ratecontrol.set_minimum_delay((CAA_HOST, CAA_PORT), 0)
def escape_lucene_query(text):
return re.sub(r'([+\-&|!(){}\[\]\^"~*?:\\/])', r'\\\1', text)
def _wrap_xml_metadata(data):
return ('<?xml version="1.0" encoding="UTF-8"?>'
'<metadata xmlns="http://musicbrainz.org/ns/mmd-2.0#">%s</metadata>'
% data)
class APIHelper(object):
def __init__(self, host, port, api_path, webservice):
self._host = host
self._port = port
self.api_path = api_path
self._webservice = webservice
@property
def webservice(self):
return self._webservice
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def get(self, path_list, handler, priority=False, important=False, mblogin=False,
cacheloadcontrol=None, refresh=False, queryargs=None, parse_response_type=DEFAULT_RESPONSE_PARSER_TYPE):
path = self.api_path + "/".join(path_list)
return self._webservice.get(self.host, self.port, path, handler,
priority=priority, important=important, mblogin=mblogin,
refresh=refresh, queryargs=queryargs, parse_response_type=parse_response_type)
def post(self, path_list, data, handler, priority=False, important=False,
mblogin=True, queryargs=None, parse_response_type=DEFAULT_RESPONSE_PARSER_TYPE,
request_mimetype=None):
path = self.api_path + "/".join(path_list)
return self._webservice.post(self.host, self.port, path, data, handler,
priority=priority, important=important, mblogin=mblogin,
queryargs=queryargs, parse_response_type=parse_response_type,
request_mimetype=request_mimetype)
def put(self, path_list, data, handler, priority=True, important=False,
mblogin=True, queryargs=None, request_mimetype=None):
path = self.api_path + "/".join(path_list)
return self._webservice.put(self.host, self.port, path, data, handler,
priority=priority, important=important, mblogin=mblogin,
queryargs=queryargs, request_mimetype=request_mimetype)
def delete(self, path_list, handler, priority=True, important=False,
mblogin=True, queryargs=None):
path = self.api_path + "/".join(path_list)
return self._webservice.delete(self.host, self.port, path, handler,
priority=priority, important=important, mblogin=mblogin,
queryargs=queryargs)
class MBAPIHelper(APIHelper):
def __init__(self, webservice):
super().__init__(None, None, "/ws/2/", webservice)
@property
def host(self):
config = get_config()
return config.setting['server_host']
@property
def port(self):
config = get_config()
return config.setting['server_port']
def _get_by_id(self, entitytype, entityid, handler, inc=None, queryargs=None,
priority=False, important=False, mblogin=False, refresh=False):
path_list = [entitytype, entityid]
if queryargs is None:
queryargs = {}
if inc:
queryargs["inc"] = "+".join(inc)
return self.get(path_list, handler,
priority=priority, important=important, mblogin=mblogin,
refresh=refresh, queryargs=queryargs)
def get_release_by_id(self, releaseid, handler, inc=None,
priority=False, important=False, mblogin=False, refresh=False):
if inc is None:
inc = []
return self._get_by_id('release', releaseid, handler, inc,
priority=priority, important=important, mblogin=mblogin, refresh=refresh)
def get_track_by_id(self, trackid, handler, inc=None,
priority=False, important=False, mblogin=False, refresh=False):
if inc is None:
inc = []
return self._get_by_id('recording', trackid, handler, inc,
priority=priority, important=important, mblogin=mblogin, refresh=refresh)
def lookup_discid(self, discid, handler, priority=True, important=True, refresh=False):
inc = ['artist-credits', 'labels']
return self._get_by_id('discid', discid, handler, inc, queryargs={"cdstubs": "no"},
priority=priority, important=important, refresh=refresh)
def _find(self, entitytype, handler, **kwargs):
filters = []
limit = kwargs.pop("limit")
if limit:
filters.append(("limit", limit))
is_search = kwargs.pop("search", False)
if is_search:
config = get_config()
use_advanced_search = kwargs.pop("advanced_search", config.setting["use_adv_search_syntax"])
if use_advanced_search:
query = kwargs["query"]
else:
query = escape_lucene_query(kwargs["query"]).strip().lower()
filters.append(("dismax", 'true'))
else:
query = []
for name, value in kwargs.items():
value = escape_lucene_query(value).strip().lower()
if value:
query.append('%s:(%s)' % (name, value))
query = ' '.join(query)
if query:
filters.append(("query", query))
queryargs = {}
for name, value in filters:
queryargs[name] = bytes(QUrl.toPercentEncoding(str(value))).decode()
path_list = [entitytype]
return self.get(path_list, handler, queryargs=queryargs,
priority=True, important=True, mblogin=False,
refresh=False)
def find_releases(self, handler, **kwargs):
return self._find('release', handler, **kwargs)
def find_tracks(self, handler, **kwargs):
return self._find('recording', handler, **kwargs)
def find_artists(self, handler, **kwargs):
return self._find('artist', handler, **kwargs)
def _browse(self, entitytype, handler, inc=None, **kwargs):
path_list = [entitytype]
queryargs = kwargs
if inc:
queryargs["inc"] = "+".join(inc)
return self.get(path_list, handler, queryargs=queryargs,
priority=True, important=True, mblogin=False,
refresh=False)
def browse_releases(self, handler, **kwargs):
inc = ["media", "labels"]
return self._browse("release", handler, inc, **kwargs)
def submit_ratings(self, ratings, handler):
path_list = ['rating']
params = {"client": CLIENT_STRING}
recordings = (''.join(['<recording id="%s"><user-rating>%s</user-rating></recording>' %
(i[1], j*20) for i, j in ratings.items() if i[0] == 'recording']))
data = _wrap_xml_metadata('<recording-list>%s</recording-list>' % recordings)
return self.post(path_list, data, handler, priority=True,
queryargs=params, parse_response_type="xml",
request_mimetype="application/xml; charset=utf-8")
def get_collection(self, collection_id, handler, limit=100, offset=0):
path_list = ["collection"]
queryargs = None
if collection_id is not None:
inc = ["releases", "artist-credits", "media"]
path_list.extend([collection_id, "releases"])
queryargs = {}
queryargs["inc"] = "+".join(inc)
queryargs["limit"] = limit
queryargs["offset"] = offset
return self.get(path_list, handler, priority=True, important=True,
mblogin=True, queryargs=queryargs)
def get_collection_list(self, handler):
return self.get_collection(None, handler)
@staticmethod
def _collection_request(collection_id, releases):
while releases:
ids = ";".join(releases if len(releases) <= 400 else releases[:400])
releases = releases[400:]
yield ["collection", collection_id, "releases", ids]
@staticmethod
def _get_client_queryarg():
return {"client": CLIENT_STRING}
def put_to_collection(self, collection_id, releases, handler):
for path_list in self._collection_request(collection_id, releases):
self.put(path_list, "", handler,
queryargs=self._get_client_queryarg())
def delete_from_collection(self, collection_id, releases, handler):
for path_list in self._collection_request(collection_id, releases):
self.delete(path_list, handler,
queryargs=self._get_client_queryarg())
class AcoustIdAPIHelper(APIHelper):
def __init__(self, webservice):
super().__init__(ACOUSTID_HOST, ACOUSTID_PORT,
'/v2/', webservice)
@staticmethod
def _encode_acoustid_args(args, format_='json'):
filters = []
args['client'] = ACOUSTID_KEY
args['clientversion'] = PICARD_VERSION_STR
args['format'] = format_
for name, value in args.items():
value = bytes(QUrl.toPercentEncoding(value)).decode()
filters.append('%s=%s' % (name, value))
return '&'.join(filters)
def query_acoustid(self, handler, **args):
path_list = ['lookup']
body = self._encode_acoustid_args(args)
return self.post(path_list, body, handler, priority=False, important=False,
mblogin=False, request_mimetype="application/x-www-form-urlencoded")
def submit_acoustid_fingerprints(self, submissions, handler):
path_list = ['submit']
config = get_config()
args = {'user': config.setting["acoustid_apikey"]}
for i, submission in enumerate(submissions):
args['fingerprint.%d' % i] = submission.fingerprint
args['duration.%d' % i] = str(submission.duration)
args['mbid.%d' % i] = submission.recordingid
if submission.puid:
args['puid.%d' % i] = submission.puid
body = self._encode_acoustid_args(args, format_='json')
return self.post(path_list, body, handler, priority=True, important=False,
mblogin=False, request_mimetype="application/x-www-form-urlencoded")
| gpl-2.0 | 7,906,424,418,091,639,000 | 39.210169 | 116 | 0.59678 | false |
WIZARD-CXY/container-agent | tests/run_containers_test.py | 6 | 16625 | #!/usr/bin/python
"""Tests for run_containers."""
import unittest
import yaml
from container_agent import run_containers
class RunContainersTest(unittest.TestCase):
def testKnownVersion(self):
yaml_code = """
version: v1beta1
"""
run_containers.CheckVersion(yaml.load(yaml_code))
def testNoVersion(self):
yaml_code = """
not_version: not valid
"""
with self.assertRaises(SystemExit):
run_containers.CheckVersion(yaml.load(yaml_code))
def testUnknownVersion(self):
yaml_code = """
version: not valid
"""
with self.assertRaises(SystemExit):
run_containers.CheckVersion(yaml.load(yaml_code))
def testRfc1035Name(self):
self.assertFalse(run_containers.IsRfc1035Name('1'))
self.assertFalse(run_containers.IsRfc1035Name('123'))
self.assertFalse(run_containers.IsRfc1035Name('123abc'))
self.assertFalse(run_containers.IsRfc1035Name('123abc'))
self.assertFalse(run_containers.IsRfc1035Name('a_b'))
self.assertFalse(run_containers.IsRfc1035Name('a:b'))
self.assertFalse(run_containers.IsRfc1035Name('a b'))
self.assertFalse(run_containers.IsRfc1035Name('A.B'))
self.assertFalse(run_containers.IsRfc1035Name('ab-'))
self.assertTrue(run_containers.IsRfc1035Name('a'))
self.assertTrue(run_containers.IsRfc1035Name('abc'))
self.assertTrue(run_containers.IsRfc1035Name('abc123'))
self.assertTrue(run_containers.IsRfc1035Name('abc123def'))
self.assertTrue(run_containers.IsRfc1035Name('abc-123-def'))
def testVolumeValid(self):
yaml_code = """
- name: abc
- name: abc-123
- name: a
"""
x = run_containers.LoadVolumes(yaml.load(yaml_code))
self.assertEqual(3, len(x))
self.assertEqual('abc', x[0])
self.assertEqual('abc-123', x[1])
self.assertEqual('a', x[2])
def testVolumeNoName(self):
yaml_code = """
- notname: notgood
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumes(yaml.load(yaml_code))
def testVolumeInvalidName(self):
yaml_code = """
- name: 123abc
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumes(yaml.load(yaml_code))
def testVolumeDupName(self):
yaml_code = """
- name: abc123
- name: abc123
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumes(yaml.load(yaml_code))
def testContainerValidMinimal(self):
yaml_code = """
- name: abc123
image: foo/bar
- name: abc124
image: foo/bar
"""
user = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(2, len(user))
self.assertEqual('abc123', user[0].name)
self.assertEqual('abc124', user[1].name)
infra = run_containers.LoadInfraContainers(user)
self.assertEqual(1, len(infra))
self.assertEqual('.net', infra[0].name)
def testContainerValidFull(self):
yaml_code = """
- name: abc123
image: foo/bar
command:
- one
- two
workingDir: /tmp
ports:
- name: port1
hostPort: 111
containerPort: 2222
protocol: UDP
volumeMounts:
- name: vol1
path: /mnt
readOnly: true
env:
- key: KEY
value: value str
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), ['vol1'])
self.assertEqual(1, len(x))
self.assertEqual('abc123', x[0].name)
self.assertEqual('foo/bar', x[0].image)
self.assertEqual(['one', 'two'], x[0].command)
self.assertEqual('/tmp', x[0].working_dir)
self.assertEqual((111, 2222, '/udp'), x[0].ports[0])
self.assertEqual('/export/vol1:/mnt:ro', x[0].mounts[0])
self.assertEqual('KEY=value str', x[0].env_vars[0])
def testContainerValidFullJson(self):
"""Proves that the same YAML parsing code handles JSON."""
json_code = """
[
{
"name": "abc123",
"image": "foo/bar",
"command": [
"one",
"two"
],
"workingDir": "/tmp",
"ports": [
{
"name": "port1",
"hostPort": 111,
"containerPort": 2222,
"protocol": "UDP"
}
],
"volumeMounts": [
{
"name": "vol1",
"path": "/mnt",
"readOnly": true
}
],
"env": [
{
"key": "KEY",
"value": "value str"
}
]
}
]
"""
x = run_containers.LoadUserContainers(yaml.load(json_code), ['vol1'])
self.assertEqual(1, len(x))
self.assertEqual('abc123', x[0].name)
self.assertEqual('foo/bar', x[0].image)
self.assertEqual(['one', 'two'], x[0].command)
self.assertEqual('/tmp', x[0].working_dir)
self.assertEqual((111, 2222, '/udp'), x[0].ports[0])
self.assertEqual('/export/vol1:/mnt:ro', x[0].mounts[0])
self.assertEqual('KEY=value str', x[0].env_vars[0])
def testContainerNoName(self):
yaml_code = """
- notname: notgood
image: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerInvalidName(self):
yaml_code = """
- name: not_good
image: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerDupName(self):
yaml_code = """
- name: abc123
image: foo/bar
- name: abc123
image: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerNoImage(self):
yaml_code = """
- name: abc123
notimage: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerWithoutCommand(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(1, len(x))
self.assertEqual(0, len(x[0].command))
def testContainerWithCommand(self):
yaml_code = """
- name: abc123
image: foo/bar
command:
- first
- second
- third fourth
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(1, len(x))
self.assertEqual(3, len(x[0].command))
def testContainerWithoutWorkingDir(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertIsNone(x[0].working_dir)
def testContainerWithWorkingDir(self):
yaml_code = """
- name: abc123
image: foo/bar
workingDir: /foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual('/foo/bar', x[0].working_dir)
def testContainerWorkingDirNotAbsolute(self):
yaml_code = """
- name: abc123
image: foo/bar
workingDir: foo/bar
"""
with self.assertRaises(SystemExit):
run_containers.LoadUserContainers(yaml.load(yaml_code), [])
def testContainerWithoutPorts(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(0, len(x[0].ports))
def testPortValidMinimal(self):
yaml_code = """
- containerPort: 1
- containerPort: 65535
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(2, len(x))
self.assertEqual((1, 1, ''), x[0])
self.assertEqual((65535, 65535, ''), x[1])
def testPortWithName(self):
yaml_code = """
- name: abc123
containerPort: 123
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(1, len(x))
self.assertEqual((123, 123, ''), x[0])
def testPortInvalidName(self):
yaml_code = """
- name: 123abc
containerPort: 123
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortDupName(self):
yaml_code = """
- name: abc123
containerPort: 123
- name: abc123
containerPort: 124
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortNoContainerPort(self):
yaml_code = """
- name: abc123
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortTooLowContainerPort(self):
yaml_code = """
- containerPort: 0
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortTooHighContainerPort(self):
yaml_code = """
- containerPort: 65536
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortWithHostPort(self):
yaml_code = """
- containerPort: 123
hostPort: 456
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(1, len(x))
self.assertEqual((456, 123, ''), x[0])
def testPortTooLowHostPort(self):
yaml_code = """
- containerPort: 123
hostPort: 0
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortTooHighHostPort(self):
yaml_code = """
- containerPort: 123
hostPort: 65536
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortDupHostPort(self):
yaml_code = """
- containerPort: 123
hostPort: 123
- containerPort: 124
hostPort: 123
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testPortWithProtocolTcp(self):
yaml_code = """
- containerPort: 123
protocol: TCP
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(1, len(x))
self.assertEqual((123, 123, ''), x[0])
def testPortWithProtocolUdp(self):
yaml_code = """
- containerPort: 123
protocol: UDP
"""
x = run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(1, len(x))
self.assertEqual((123, 123, '/udp'), x[0])
def testPortWithInvalidProtocol(self):
yaml_code = """
- containerPort: 123
protocol: IGMP
"""
with self.assertRaises(SystemExit):
run_containers.LoadPorts(yaml.load(yaml_code), 'ctr_name')
def testContainerWithoutMounts(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(0, len(x[0].mounts))
def testMountValidMinimal(self):
yaml_code = """
- name: vol1
path: /mnt/vol1
- name: vol2
path: /mnt/vol2
"""
x = run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['vol1', 'vol2'], 'ctr_name')
self.assertEqual(2, len(x))
self.assertEqual('/export/vol1:/mnt/vol1:rw', x[0])
self.assertEqual('/export/vol2:/mnt/vol2:rw', x[1])
def testMountNoName(self):
yaml_code = """
- path: /mnt/vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['vol1'], 'ctr_name')
def testMountInvalidName(self):
yaml_code = """
- name: 1vol
path: /mnt/vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['1vol'], 'ctr_name')
def testMountUnknownName(self):
yaml_code = """
- name: vol1
path: /mnt/vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), [], 'ctr_name')
def testMountNoPath(self):
yaml_code = """
- name: vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['vol1'], 'ctr_name')
def testMountInvalidPath(self):
yaml_code = """
- name: vol1
path: mnt/vol1
"""
with self.assertRaises(SystemExit):
run_containers.LoadVolumeMounts(
yaml.load(yaml_code), ['vol1'], 'ctr_name')
def testContainerWithoutEnv(self):
yaml_code = """
- name: abc123
image: foo/bar
"""
x = run_containers.LoadUserContainers(yaml.load(yaml_code), [])
self.assertEqual(0, len(x[0].env_vars))
def testEnvValidMinimal(self):
yaml_code = """
- key: key1
value: value
- key: key2
value: value too
"""
x = run_containers.LoadEnvVars(yaml.load(yaml_code), 'ctr_name')
self.assertEqual(2, len(x))
self.assertEqual('key1=value', x[0])
self.assertEqual('key2=value too', x[1])
def testEnvNoKey(self):
yaml_code = """
- value: value
"""
with self.assertRaises(SystemExit):
run_containers.LoadEnvVars(yaml.load(yaml_code), 'ctr_name')
def testEnvInvalidKey(self):
yaml_code = """
- key: 1value
value: value
"""
with self.assertRaises(SystemExit):
run_containers.LoadEnvVars(yaml.load(yaml_code), 'ctr_name')
def testEnvNoValue(self):
yaml_code = """
- key: key
"""
with self.assertRaises(SystemExit):
run_containers.LoadEnvVars(yaml.load(yaml_code), 'ctr_name')
def testFlagList(self):
self.assertEqual([], run_containers.FlagList([], '-x'))
self.assertEqual(['-x', 'a'], run_containers.FlagList(['a'], '-x'))
self.assertEqual(['-x', 'a', '-x', 'b', '-x', 'c'],
run_containers.FlagList(['a', 'b', 'c'], '-x'))
def testFlagOrNothing(self):
self.assertEqual([], run_containers.FlagOrNothing(None, '-x'))
self.assertEqual(['-x', 'a'], run_containers.FlagOrNothing('a', '-x'))
def testCheckGroupWideConflictsOk(self):
containers = []
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(80, 80, '')]
containers.append(c)
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(81, 81, '')]
containers.append(c)
c = run_containers.Container('name2', 'ubuntu')
c.ports = [(81, 81, '/udp')]
containers.append(c)
run_containers.CheckGroupWideConflicts(containers)
def testCheckGroupWideConflictsDupHostPort(self):
containers = []
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(80, 80, '')]
containers.append(c)
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(80, 81, '')]
containers.append(c)
with self.assertRaises(SystemExit):
run_containers.CheckGroupWideConflicts(containers)
def testCheckGroupWideConflictsDupContainerPort(self):
containers = []
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(80, 80, '')]
containers.append(c)
c = run_containers.Container('name1', 'ubuntu')
c.ports = [(81, 80, '')]
containers.append(c)
with self.assertRaises(SystemExit):
run_containers.CheckGroupWideConflicts(containers)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -326,527,721,619,271,550 | 29.787037 | 78 | 0.560241 | false |
jonathonwalz/ansible | lib/ansible/modules/network/panos/panos_lic.py | 78 | 4915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: string
sample: 007200004214
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 | -8,696,113,681,780,111,000 | 26.61236 | 99 | 0.633571 | false |
tcheehow/MissionPlanner | Lib/sgmllib.py | 64 | 18437 | """A parser for SGML, using the derived class as a static DTD."""
# XXX This only supports those SGML features used by HTML.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special). RCDATA is
# not supported at all.
from warnings import warnpy3k
warnpy3k("the sgmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import markupbase
import re
__all__ = ["SGMLParser", "SGMLParseError"]
# Regular expressions used for parsing
interesting = re.compile('[&<]')
incomplete = re.compile('&([a-zA-Z][a-zA-Z0-9]*|#[0-9]*)?|'
'<([a-zA-Z][^<>]*|'
'/([a-zA-Z][^<>]*)?|'
'![^<>]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#([0-9]+)[^0-9]')
starttagopen = re.compile('<[>a-zA-Z]')
shorttagopen = re.compile('<[a-zA-Z][-.a-zA-Z0-9]*/')
shorttag = re.compile('<([a-zA-Z][-.a-zA-Z0-9]*)/([^/]*)/')
piclose = re.compile('>')
endbracket = re.compile('[<>]')
tagfind = re.compile('[a-zA-Z][-_.a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?')
class SGMLParseError(RuntimeError):
"""Exception raised for all parse errors."""
pass
# SGML parser base class -- find tags and call handler functions.
# Usage: p = SGMLParser(); p.feed(data); ...; p.close().
# The dtd is defined by deriving a class which defines methods
# with special names to handle tags: start_foo and end_foo to handle
# <foo> and </foo>, respectively, or do_foo to handle <foo> by itself.
# (Tags are converted to lower case for this purpose.) The data
# between tags is passed to the parser by calling self.handle_data()
# with some data as argument (the data may be split up in arbitrary
# chunks). Entity references are passed by calling
# self.handle_entityref() with the entity reference as argument.
class SGMLParser(markupbase.ParserBase):
# Definition of entities -- derived classes may override
entity_or_charref = re.compile('&(?:'
'([a-zA-Z][-.a-zA-Z0-9]*)|#([0-9]+)'
')(;?)')
def __init__(self, verbose=0):
"""Initialize and reset this instance."""
self.verbose = verbose
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
def setnomoretags(self):
"""Enter literal mode (CDATA) till EOF.
Intended for derived classes only.
"""
self.nomoretags = self.literal = 1
def setliteral(self, *args):
"""Enter literal mode (CDATA).
Intended for derived classes only.
"""
self.literal = 1
def feed(self, data):
"""Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n'). (This just saves the text,
all the processing is done by goahead().)
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle the remaining data."""
self.goahead(1)
def error(self, message):
raise SGMLParseError(message)
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.nomoretags:
self.handle_data(rawdata[i:n])
i = n
break
match = interesting.search(rawdata, i)
if match: j = match.start()
else: j = n
if i < j:
self.handle_data(rawdata[i:j])
i = j
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i):
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
k = self.parse_starttag(i)
if k < 0: break
i = k
continue
if rawdata.startswith("</", i):
k = self.parse_endtag(i)
if k < 0: break
i = k
self.literal = 0
continue
if self.literal:
if n > (i + 1):
self.handle_data("<")
i = i+1
else:
# incomplete
break
continue
if rawdata.startswith("<!--", i):
# Strictly speaking, a comment is --.*--
# within a declaration tag <!...>.
# This should be removed,
# and comments handled only in parse_declaration.
k = self.parse_comment(i)
if k < 0: break
i = k
continue
if rawdata.startswith("<?", i):
k = self.parse_pi(i)
if k < 0: break
i = i+k
continue
if rawdata.startswith("<!", i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
k = self.parse_declaration(i)
if k < 0: break
i = k
continue
elif rawdata[i] == '&':
if self.literal:
self.handle_data(rawdata[i])
i = i+1
continue
match = charref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_charref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
i = match.end(0)
if rawdata[i-1] != ';': i = i-1
continue
else:
self.error('neither < nor & ??')
# We get here only if incomplete matches but
# nothing else
match = incomplete.match(rawdata, i)
if not match:
self.handle_data(rawdata[i])
i = i+1
continue
j = match.end(0)
if j == n:
break # Really incomplete
self.handle_data(rawdata[i:j])
i = j
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = n
self.rawdata = rawdata[i:]
# XXX if end: check for empty stack
# Extensions for the DOCTYPE scanner:
_decl_otherchars = '='
# Internal -- parse processing instr, return length or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
if rawdata[i:i+2] != '<?':
self.error('unexpected call to parse_pi()')
match = piclose.search(rawdata, i+2)
if not match:
return -1
j = match.start(0)
self.handle_pi(rawdata[i+2: j])
j = match.end(0)
return j-i
def get_starttag_text(self):
return self.__starttag_text
# Internal -- handle starttag, return length or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
start_pos = i
rawdata = self.rawdata
if shorttagopen.match(rawdata, i):
# SGML shorthand: <tag/data/ == <tag>data</tag>
# XXX Can data contain &... (entity or char refs)?
# XXX Can data contain < or > (tag characters)?
# XXX Can there be whitespace before the first /?
match = shorttag.match(rawdata, i)
if not match:
return -1
tag, data = match.group(1, 2)
self.__starttag_text = '<%s/' % tag
tag = tag.lower()
k = match.end(0)
self.finish_shorttag(tag, data)
self.__starttag_text = rawdata[start_pos:match.end(1) + 1]
return k
# XXX The following should skip matching quotes (' or ")
# As a shortcut way to exit, this isn't so bad, but shouldn't
# be used to locate the actual end of the start tag since the
# < or > characters may be embedded in an attribute value.
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
if rawdata[i:i+2] == '<>':
# SGML shorthand: <> == <last open tag seen>
k = j
tag = self.lasttag
else:
match = tagfind.match(rawdata, i+1)
if not match:
self.error('unexpected call to parse_starttag')
k = match.end(0)
tag = rawdata[i+1:k].lower()
self.lasttag = tag
while k < j:
match = attrfind.match(rawdata, k)
if not match: break
attrname, rest, attrvalue = match.group(1, 2, 3)
if not rest:
attrvalue = attrname
else:
if (attrvalue[:1] == "'" == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
# strip quotes
attrvalue = attrvalue[1:-1]
attrvalue = self.entity_or_charref.sub(
self._convert_ref, attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = match.end(0)
if rawdata[j] == '>':
j = j+1
self.__starttag_text = rawdata[start_pos:j]
self.finish_starttag(tag, attrs)
return j
# Internal -- convert entity or character reference
def _convert_ref(self, match):
if match.group(2):
return self.convert_charref(match.group(2)) or \
'&#%s%s' % match.groups()[1:]
elif match.group(3):
return self.convert_entityref(match.group(1)) or \
'&%s;' % match.group(1)
else:
return '&%s' % match.group(1)
# Internal -- parse endtag
def parse_endtag(self, i):
rawdata = self.rawdata
match = endbracket.search(rawdata, i+1)
if not match:
return -1
j = match.start(0)
tag = rawdata[i+2:j].strip().lower()
if rawdata[j] == '>':
j = j+1
self.finish_endtag(tag)
return j
# Internal -- finish parsing of <tag/data/ (same as <tag>data</tag>)
def finish_shorttag(self, tag, data):
self.finish_starttag(tag, [])
self.handle_data(data)
self.finish_endtag(tag)
# Internal -- finish processing of start tag
# Return -1 for unknown tag, 0 for open-only tag, 1 for balanced tag
def finish_starttag(self, tag, attrs):
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
self.unknown_starttag(tag, attrs)
return -1
else:
self.handle_starttag(tag, method, attrs)
return 0
else:
self.stack.append(tag)
self.handle_starttag(tag, method, attrs)
return 1
# Internal -- finish processing of end tag
def finish_endtag(self, tag):
if not tag:
found = len(self.stack) - 1
if found < 0:
self.unknown_endtag(tag)
return
else:
if tag not in self.stack:
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
self.unknown_endtag(tag)
else:
self.report_unbalanced(tag)
return
found = len(self.stack)
for i in range(found):
if self.stack[i] == tag: found = i
while len(self.stack) > found:
tag = self.stack[-1]
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
method = None
if method:
self.handle_endtag(tag, method)
else:
self.unknown_endtag(tag)
del self.stack[-1]
# Overridable -- handle start tag
def handle_starttag(self, tag, method, attrs):
method(attrs)
# Overridable -- handle end tag
def handle_endtag(self, tag, method):
method()
# Example -- report an unbalanced </...> tag.
def report_unbalanced(self, tag):
if self.verbose:
print '*** Unbalanced </' + tag + '>'
print '*** Stack:', self.stack
def convert_charref(self, name):
"""Convert character reference, may be overridden."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127:
return
return self.convert_codepoint(n)
def convert_codepoint(self, codepoint):
return chr(codepoint)
def handle_charref(self, name):
"""Handle character reference, no need to override."""
replacement = self.convert_charref(name)
if replacement is None:
self.unknown_charref(name)
else:
self.handle_data(replacement)
# Definition of entities -- derived classes may override
entitydefs = \
{'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
def convert_entityref(self, name):
"""Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.
"""
table = self.entitydefs
if name in table:
return table[name]
else:
return
def handle_entityref(self, name):
"""Handle entity references, no need to override."""
replacement = self.convert_entityref(name)
if replacement is None:
self.unknown_entityref(name)
else:
self.handle_data(replacement)
# Example -- handle data, should be overridden
def handle_data(self, data):
pass
# Example -- handle comment, could be overridden
def handle_comment(self, data):
pass
# Example -- handle declaration, could be overridden
def handle_decl(self, decl):
pass
# Example -- handle processing instruction, could be overridden
def handle_pi(self, data):
pass
# To be overridden -- handlers for unknown objects
def unknown_starttag(self, tag, attrs): pass
def unknown_endtag(self, tag): pass
def unknown_charref(self, ref): pass
def unknown_entityref(self, ref): pass
class TestSGMLParser(SGMLParser):
def __init__(self, verbose=0):
self.testdata = ""
SGMLParser.__init__(self, verbose)
def handle_data(self, data):
self.testdata = self.testdata + data
if len(repr(self.testdata)) >= 70:
self.flush()
def flush(self):
data = self.testdata
if data:
self.testdata = ""
print 'data:', repr(data)
def handle_comment(self, data):
self.flush()
r = repr(data)
if len(r) > 68:
r = r[:32] + '...' + r[-32:]
print 'comment:', r
def unknown_starttag(self, tag, attrs):
self.flush()
if not attrs:
print 'start tag: <' + tag + '>'
else:
print 'start tag: <' + tag,
for name, value in attrs:
print name + '=' + '"' + value + '"',
print '>'
def unknown_endtag(self, tag):
self.flush()
print 'end tag: </' + tag + '>'
def unknown_entityref(self, ref):
self.flush()
print '*** unknown entity ref: &' + ref + ';'
def unknown_charref(self, ref):
self.flush()
print '*** unknown char ref: &#' + ref + ';'
def unknown_decl(self, data):
self.flush()
print '*** unknown decl: [' + data + ']'
def close(self):
SGMLParser.close(self)
self.flush()
def test(args = None):
import sys
if args is None:
args = sys.argv[1:]
if args and args[0] == '-s':
args = args[1:]
klass = SGMLParser
else:
klass = TestSGMLParser
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
x = klass()
for c in data:
x.feed(c)
x.close()
if __name__ == '__main__':
test()
| gpl-3.0 | 2,139,560,108,084,405,200 | 31.339964 | 79 | 0.487932 | false |
paplorinc/intellij-community | plugins/hg4idea/testData/bin/hgext/inotify/linux/watcher.py | 92 | 10504 | # watcher.py - high-level interfaces to the Linux inotify subsystem
# Copyright 2006 Bryan O'Sullivan <[email protected]>
# This library is free software; you can redistribute it and/or modify
# it under the terms of version 2.1 of the GNU Lesser General Public
# License, or any later version.
'''High-level interfaces to the Linux inotify subsystem.
The inotify subsystem provides an efficient mechanism for file status
monitoring and change notification.
The watcher class hides the low-level details of the inotify
interface, and provides a Pythonic wrapper around it. It generates
events that provide somewhat more information than raw inotify makes
available.
The autowatcher class is more useful, as it automatically watches
newly-created directories on your behalf.'''
__author__ = "Bryan O'Sullivan <[email protected]>"
import _inotify as inotify
import array
import errno
import fcntl
import os
import termios
class event(object):
'''Derived inotify event class.
The following fields are available:
mask: event mask, indicating what kind of event this is
cookie: rename cookie, if a rename-related event
path: path of the directory in which the event occurred
name: name of the directory entry to which the event occurred
(may be None if the event happened to a watched directory)
fullpath: complete path at which the event occurred
wd: watch descriptor that triggered this event'''
__slots__ = (
'cookie',
'fullpath',
'mask',
'name',
'path',
'raw',
'wd',
)
def __init__(self, raw, path):
self.path = path
self.raw = raw
if raw.name:
self.fullpath = path + '/' + raw.name
else:
self.fullpath = path
self.wd = raw.wd
self.mask = raw.mask
self.cookie = raw.cookie
self.name = raw.name
def __repr__(self):
r = repr(self.raw)
return 'event(path=' + repr(self.path) + ', ' + r[r.find('(') + 1:]
_event_props = {
'access': 'File was accessed',
'modify': 'File was modified',
'attrib': 'Attribute of a directory entry was changed',
'close_write': 'File was closed after being written to',
'close_nowrite': 'File was closed without being written to',
'open': 'File was opened',
'moved_from': 'Directory entry was renamed from this name',
'moved_to': 'Directory entry was renamed to this name',
'create': 'Directory entry was created',
'delete': 'Directory entry was deleted',
'delete_self': 'The watched directory entry was deleted',
'move_self': 'The watched directory entry was renamed',
'unmount': 'Directory was unmounted, and can no longer be watched',
'q_overflow': 'Kernel dropped events due to queue overflow',
'ignored': 'Directory entry is no longer being watched',
'isdir': 'Event occurred on a directory',
}
for k, v in _event_props.iteritems():
mask = getattr(inotify, 'IN_' + k.upper())
def getter(self):
return self.mask & mask
getter.__name__ = k
getter.__doc__ = v
setattr(event, k, property(getter, doc=v))
del _event_props
class watcher(object):
'''Provide a Pythonic interface to the low-level inotify API.
Also adds derived information to each event that is not available
through the normal inotify API, such as directory name.'''
__slots__ = (
'fd',
'_paths',
'_wds',
)
def __init__(self):
'''Create a new inotify instance.'''
self.fd = inotify.init()
self._paths = {}
self._wds = {}
def fileno(self):
'''Return the file descriptor this watcher uses.
Useful for passing to select and poll.'''
return self.fd
def add(self, path, mask):
'''Add or modify a watch.
Return the watch descriptor added or modified.'''
path = os.path.normpath(path)
wd = inotify.add_watch(self.fd, path, mask)
self._paths[path] = wd, mask
self._wds[wd] = path, mask
return wd
def remove(self, wd):
'''Remove the given watch.'''
inotify.remove_watch(self.fd, wd)
self._remove(wd)
def _remove(self, wd):
path_mask = self._wds.pop(wd, None)
if path_mask is not None:
self._paths.pop(path_mask[0])
def path(self, path):
'''Return a (watch descriptor, event mask) pair for the given path.
If the path is not being watched, return None.'''
return self._paths.get(path)
def wd(self, wd):
'''Return a (path, event mask) pair for the given watch descriptor.
If the watch descriptor is not valid or not associated with
this watcher, return None.'''
return self._wds.get(wd)
def read(self, bufsize=None):
'''Read a list of queued inotify events.
If bufsize is zero, only return those events that can be read
immediately without blocking. Otherwise, block until events are
available.'''
events = []
for evt in inotify.read(self.fd, bufsize):
events.append(event(evt, self._wds[evt.wd][0]))
if evt.mask & inotify.IN_IGNORED:
self._remove(evt.wd)
elif evt.mask & inotify.IN_UNMOUNT:
self.close()
return events
def close(self):
'''Shut down this watcher.
All subsequent method calls are likely to raise exceptions.'''
os.close(self.fd)
self.fd = None
self._paths = None
self._wds = None
def __len__(self):
'''Return the number of active watches.'''
return len(self._paths)
def __iter__(self):
'''Yield a (path, watch descriptor, event mask) tuple for each
entry being watched.'''
for path, (wd, mask) in self._paths.iteritems():
yield path, wd, mask
def __del__(self):
if self.fd is not None:
os.close(self.fd)
ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR]
def add_iter(self, path, mask, onerror=None):
'''Add or modify watches over path and its subdirectories.
Yield each added or modified watch descriptor.
To ensure that this method runs to completion, you must
iterate over all of its results, even if you do not care what
they are. For example:
for wd in w.add_iter(path, mask):
pass
By default, errors are ignored. If optional arg "onerror" is
specified, it should be a function; it will be called with one
argument, an OSError instance. It can report the error to
continue with the walk, or raise the exception to abort the
walk.'''
# Add the IN_ONLYDIR flag to the event mask, to avoid a possible
# race when adding a subdirectory. In the time between the
# event being queued by the kernel and us processing it, the
# directory may have been deleted, or replaced with a different
# kind of entry with the same name.
submask = mask | inotify.IN_ONLYDIR
try:
yield self.add(path, mask)
except OSError, err:
if onerror and err.errno not in self.ignored_errors:
onerror(err)
for root, dirs, names in os.walk(path, topdown=False, onerror=onerror):
for d in dirs:
try:
yield self.add(root + '/' + d, submask)
except OSError, err:
if onerror and err.errno not in self.ignored_errors:
onerror(err)
def add_all(self, path, mask, onerror=None):
'''Add or modify watches over path and its subdirectories.
Return a list of added or modified watch descriptors.
By default, errors are ignored. If optional arg "onerror" is
specified, it should be a function; it will be called with one
argument, an OSError instance. It can report the error to
continue with the walk, or raise the exception to abort the
walk.'''
return [w for w in self.add_iter(path, mask, onerror)]
class autowatcher(watcher):
'''watcher class that automatically watches newly created directories.'''
__slots__ = (
'addfilter',
)
def __init__(self, addfilter=None):
'''Create a new inotify instance.
This instance will automatically watch newly created
directories.
If the optional addfilter parameter is not None, it must be a
callable that takes one parameter. It will be called each time
a directory is about to be automatically watched. If it returns
True, the directory will be watched if it still exists,
otherwise, it will be skipped.'''
super(autowatcher, self).__init__()
self.addfilter = addfilter
_dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE
def read(self, bufsize=None):
events = super(autowatcher, self).read(bufsize)
for evt in events:
if evt.mask & self._dir_create_mask == self._dir_create_mask:
if self.addfilter is None or self.addfilter(evt):
parentmask = self._wds[evt.wd][1]
# See note about race avoidance via IN_ONLYDIR above.
mask = parentmask | inotify.IN_ONLYDIR
try:
self.add_all(evt.fullpath, mask)
except OSError, err:
if err.errno not in self.ignored_errors:
raise
return events
class threshold(object):
'''Class that indicates whether a file descriptor has reached a
threshold of readable bytes available.
This class is not thread-safe.'''
__slots__ = (
'fd',
'threshold',
'_iocbuf',
)
def __init__(self, fd, threshold=1024):
self.fd = fd
self.threshold = threshold
self._iocbuf = array.array('i', [0])
def readable(self):
'''Return the number of bytes readable on this file descriptor.'''
fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True)
return self._iocbuf[0]
def __call__(self):
'''Indicate whether the number of readable bytes has met or
exceeded the threshold.'''
return self.readable() >= self.threshold
| apache-2.0 | 5,934,919,016,846,217,000 | 30.355224 | 79 | 0.607673 | false |
ChenJunor/hue | desktop/core/src/desktop/redaction/tests.py | 30 | 14341 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import random
import re
import tempfile
import threading
from desktop.redaction.engine import RedactionEngine, \
RedactionPolicy, \
RedactionRule, \
parse_redaction_policy_from_file, \
_convert_java_pattern_to_python
from desktop.redaction.logfilter import add_log_redaction_filter_to_logger
from nose.tools import assert_true, assert_equal, assert_not_equal, raises
MESSAGE = "This string is not redacted"
def get_path(filename):
return os.path.join(os.path.dirname(__file__), 'test_data', filename)
class MockLoggingHandler(logging.Handler):
def __init__(self, *args, **kwargs):
logging.Handler.__init__(self, *args, **kwargs)
self.records = []
def emit(self, record):
self.records.append(record)
def reset(self):
del self.records[:]
class TestRedactionRule(object):
def test_redaction_rule_works(self):
rule = RedactionRule('password=', 'password=".*"', 'password="???"')
test_strings = [
('message', 'message'),
('password="a password"', 'password="???"'),
('before password="a password" after', 'before password="???" after'),
]
for message, redacted_message in test_strings:
assert_equal(rule.redact(message), redacted_message)
def test_non_redacted_string_returns_same_string(self):
rule = RedactionRule('password=', 'password=".*"', 'password="???"')
message = 'message'
assert_true(rule.redact(message) is message)
def test_equality(self):
rule1 = RedactionRule('password=', 'password=".*"', 'password="???"')
rule2 = RedactionRule('password=', 'password=".*"', 'password="???"')
rule3 = RedactionRule('ssn=', 'ssn=\d{3}-\d{2}-\d{4}', 'ssn=XXX-XX-XXXX'),
assert_equal(rule1, rule2)
assert_not_equal(rule1, rule3)
def test_parse_redaction_policy_from_file(self):
with tempfile.NamedTemporaryFile() as f:
json.dump({
'version': 1,
'rules': [
{
'description': 'redact passwords',
'trigger': 'password=',
'search': 'password=".*"',
'replace': 'password="???"',
},
{
'description': 'redact social security numbers',
'search': '\d{3}-\d{2}-\d{4}',
'replace': 'XXX-XX-XXXX',
},
]
}, f)
f.flush()
policy = parse_redaction_policy_from_file(f.name)
assert_equal(policy.rules, [
RedactionRule(u'password=', u'password=".*"', u'password="???"'),
RedactionRule(None, u'\d{3}-\d{2}-\d{4}', u'XXX-XX-XXXX'),
])
class TestRedactionEngine(object):
def test_redaction_works(self):
redaction_engine = RedactionEngine([
RedactionRule('password=', 'password=".*"', 'password="???"'),
RedactionRule('ssn=', 'ssn=\d{3}-\d{2}-\d{4}', 'ssn=XXX-XX-XXXX'),
])
test_strings = [
('message', 'message'),
('password="a password"', 'password="???"'),
('before password="a password" after', 'before password="???" after'),
('an ssn=123-45-6789', 'an ssn=XXX-XX-XXXX'),
]
for message, redacted_message in test_strings:
assert_equal(redaction_engine.redact(message), redacted_message)
def test_equality(self):
engine1 = RedactionEngine([
RedactionRule('password=', 'password=".*"', 'password="???"'),
])
engine2 = RedactionEngine([
RedactionRule('password=', 'password=".*"', 'password="???"'),
])
engine3 = RedactionEngine([
RedactionRule('ssn=', 'ssn=\d{3}-\d{2}-\d{4}', 'ssn=XXX-XX-XXXX'),
])
assert_equal(engine1, engine2)
assert_not_equal(engine1, engine3)
class TestRedactionLogFilter(object):
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger(cls.__name__)
cls.handler = MockLoggingHandler()
cls.logger.addHandler(cls.handler)
policy = RedactionPolicy([
RedactionRule('password=', 'password=".*"', 'password="???"'),
RedactionRule('ssn=', 'ssn=\d{3}-\d{2}-\d{4}', 'ssn=XXX-XX-XXXX'),
])
engine = RedactionEngine([policy])
add_log_redaction_filter_to_logger(engine, cls.logger)
@classmethod
def tearDownClass(cls):
cls.logger.handlers = []
def tearDown(self):
self.handler.reset()
def test_redaction_filter(self):
test_strings = [
{
'message': 'message',
'result_message': 'message',
'result_msg': 'message',
'result_args': (),
},
{
'message': 'message %s',
'args': ['an arg'],
'result_message': 'message an arg',
'result_msg': 'message %s',
'result_args': ('an arg',),
},
{
'message': 'password="a password"',
'result_message': 'password="???"',
},
{
'message': 'password="%s"',
'args': ['a password'],
'result_message': 'password="???"',
},
{
'message': 'password=%s',
'args': ['"a password"'],
'result_message': 'password="???"',
},
{
'message': 'before password="%s" after',
'args': ['a password'],
'result_message': 'before password="???" after',
},
{
'message': 'ssn=%s-%s-%s',
'args': ['123', '45', '6789'],
'result_message': 'ssn=XXX-XX-XXXX',
},
]
for test in test_strings:
self.logger.debug(test['message'], *test.get('args', ()))
for test, record in zip(test_strings, self.handler.records):
assert_equal(record.getMessage(), test['result_message'])
assert_equal(record.message, test['result_message'])
assert_equal(record.msg, test.get('result_msg', test['result_message']))
assert_equal(record.args, test.get('result_args'))
def test_convert_java_pattern_to_python(self):
assert_equal(_convert_java_pattern_to_python('1-2'), '1-2')
assert_equal(_convert_java_pattern_to_python('$1-$2'), '\\1-\\2')
assert_equal(_convert_java_pattern_to_python('\\$1-$2'), '$1-\\2')
assert_equal(_convert_java_pattern_to_python('\\$$1-$2'), '$\\1-\\2')
@raises(IOError)
def test_does_not_exist(self):
path = get_path('thisfiledoesnotexist.json')
parse_redaction_policy_from_file(path)
@raises(IOError)
def test_is_dir(self):
path = '/tmp'
parse_redaction_policy_from_file(path)
@raises(IOError)
def test_is_not_json(self):
path = get_path('not-json.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_no_version(self):
path = get_path('no-version.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_unknown_version(self):
path = get_path('unknown-version.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_alpha_version(self):
path = get_path('alpha-version.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_no_search(self):
path = get_path('no-search.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_no_replace(self):
path = get_path('no-replace.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_no_brace(self):
path = get_path('no-brace.json')
parse_redaction_policy_from_file(path)
@raises(re.error)
def test_bad_regex(self):
path = get_path('bad-regex.json')
parse_redaction_policy_from_file(path)
@raises(ValueError)
def test_extra_attr(self):
path = get_path('extra-attr.json')
parse_redaction_policy_from_file(path)
def test_empty_file(self):
path = get_path('empty.json')
policy = parse_redaction_policy_from_file(path)
assert_equal(MESSAGE, policy.redact(MESSAGE))
def test_empty_rules(self):
path = get_path('empty-rules.json')
policy = parse_redaction_policy_from_file(path)
assert_equal(MESSAGE, policy.redact(MESSAGE))
def test_basic_good1(self):
path = get_path('good-1.json')
policy = parse_redaction_policy_from_file(path)
assert_equal("Hxllx, wxrld", policy.redact("Hello, world"))
def test_int_version(self):
path = get_path('verint.json')
policy = parse_redaction_policy_from_file(path)
assert_equal("Hxllx, wxrld", policy.redact("Hello, world"))
def test_real_rules(self):
path = get_path('real-1.json')
policy = parse_redaction_policy_from_file(path)
messages = [
("Hello, world", "Hello, world"),
("CC 1234-2345-3456-4576", "CC XXXX-XXXX-XXXX-XXXX"),
("CC 1234234534654576", "CC XXXXXXXXXXXXXXXX"),
("CC 1234,2345,3456,4576", "CC XXXX-XXXX-XXXX-XXXX"),
("SSN 123-45-6789", "SSN XXX-XX-XXXX"),
("SSN 123456789", "SSN XXXXXXXXX"),
("My password=Hello123", "My password=xxxxx"),
("Host www.cloudera.com", "Host HOSTNAME.REDACTED"),
("www.c1-foo.org rules!", "HOSTNAME.REDACTED rules!"),
("IP1 8.8.8.8", "IP1 0.0.0.0"),
("IP2 192.168.0.1", "IP2 0.0.0.0"),
("My email is [email protected]", "My email is [email protected]"),
("[email protected] is interesting", "[email protected] is interesting"),
("Multi 1234-2345-3456-4567\nLine 123-45-6789", "Multi XXXX-XXXX-XXXX-XXXX\nLine XXX-XX-XXXX"),
]
for message, redacted_message in messages:
assert_equal(redacted_message, policy.redact(message))
def test_huge_rules(self):
path = get_path('huge-1.json')
policy = parse_redaction_policy_from_file(path)
assert_equal("This string is not redadted", policy.redact(MESSAGE))
def test_back_refs(self):
path = get_path('replace-1.json')
policy = parse_redaction_policy_from_file(path)
messages = [
("Hello, world", "Hello, world"),
("1234-2345-3456-4576", "XXXX-XXXX-XXXX-4576"),
("Words www.gmail.com is cool", "Words HOSTNAME.REDACTED.com is cool"),
("short.org", "HOSTNAME.REDACTED.org"),
("long.n4me.h-1.co.fr", "HOSTNAME.REDACTED.fr"),
("Ping 192.168.0.1", "Ping 0.192.1.168"),
("Magic word", "word: Magic word, word"),
]
for message, redacted_message in messages:
assert_equal(redacted_message, policy.redact(message))
def test_ordering(self):
path = get_path('ordering-1.json')
policy = parse_redaction_policy_from_file(path)
messages = [
("Hello, world", "Hello, world"),
("one", "four"),
("This one is a nice one", "This four is a nice four"),
("Please help me: ten", "Please help me: thirteen"),
("HappY abc", "HappY stu"),
]
for message, redacted_message in messages:
assert_equal(redacted_message, policy.redact(message))
def test_case_sensitivity(self):
path = get_path('case-1.json')
policy = parse_redaction_policy_from_file(path)
messages = [
("Hello, world", "Hello, world"),
("Say aAa! aaa! AAAAAA!", "Say bbb! bbb! bbbbbb!"),
("I like dddogs. dDd", "I like dddogs. dDd"),
("Cccats. Dddogs", "Cccats. eEeogs"),
("Trigger fff gGg", "Trigger fff gGg"),
("Trigger fFf Ggg", "Trigger fFf Ggg"),
("Trigger fFf gGg", "Trigger fFf hHh"),
]
for message, redacted_message in messages:
assert_equal(redacted_message, policy.redact(message))
def test_multithreading(self):
path = get_path('numbers.json')
policy = parse_redaction_policy_from_file(path)
assert_equal("asdf####fdas### H#ll# w#rld", policy.redact("asdf1234fdas666 H3ll0 w0rld"))
errors = []
lock = threading.Lock()
regex = re.compile(r"[0-9]")
class TestThread(threading.Thread):
def run(self):
for i in xrange(500):
message = u''.join(random_utf8_char() for _ in xrange(128))
redacted_message = policy.redact(message)
if regex.search(redacted_message):
with lock:
errors.append((message, redacted_message))
break
threads = []
for i in xrange(10):
threads.append(TestThread())
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert_equal(errors, [])
def byte_range(first, last):
return list(range(first, last+1))
first_values = byte_range(0x00, 0x7F) + byte_range(0xC2, 0xF4)
trailing_values = byte_range(0x80, 0xBF)
def random_utf8_char():
first = random.choice(first_values)
if first <= 0x7F:
value = bytearray([first])
elif first <= 0xDF:
value = bytearray([first, random.choice(trailing_values)])
elif first == 0xE0:
value = bytearray([first, random.choice(byte_range(0xA0, 0xBF)), random.choice(trailing_values)])
elif first == 0xED:
value = bytearray([first, random.choice(byte_range(0x80, 0x9F)), random.choice(trailing_values)])
elif first <= 0xEF:
value = bytearray([first, random.choice(trailing_values), random.choice(trailing_values)])
elif first == 0xF0:
value = bytearray([first, random.choice(byte_range(0x90, 0xBF)), random.choice(trailing_values), random.choice(trailing_values)])
elif first <= 0xF3:
value = bytearray([first, random.choice(trailing_values), random.choice(trailing_values), random.choice(trailing_values)])
elif first == 0xF4:
value = bytearray([first, random.choice(byte_range(0x80, 0x8F)), random.choice(trailing_values), random.choice(trailing_values)])
return value.decode('utf8')
| apache-2.0 | -2,513,246,936,025,765,400 | 31.892202 | 137 | 0.613137 | false |
mtbc/openmicroscopy | components/tools/OmeroPy/src/omero/plugins/cecog.py | 11 | 6684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods for working with cecog
Copyright 2010 University of Dundee, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import os
import re
import sys
from omero.cli import BaseControl, CLI
import omero
import omero.constants
from omero.rtypes import rstring
class CecogControl(BaseControl):
"""CeCog integration plugin.
Provides actions for prepairing data and otherwise integrating with Cecog. See
the Run_Cecog_4.1.py script.
"""
# [MetaMorph_PlateScanPackage]
# regex_subdirectories = re.compile('(?=[^_]).*?(?P<D>\d+).*?')
# regex_position = re.compile('P(?P<P>.+?)_')
# continuous_frames = 1
regex_token = re.compile(r'(?P<Token>.+)\.')
regex_time = re.compile(r'T(?P<T>\d+)')
regex_channel = re.compile(r'_C(?P<C>.+?)(_|$)')
regex_zslice = re.compile(r'_Z(?P<Z>\d+)')
def _configure(self, parser):
sub = parser.sub()
merge = parser.add(sub, self.merge, self.merge.__doc__)
merge.add_argument("path", help="Path to image files")
rois = parser.add(sub, self.rois, self.rois.__doc__)
rois.add_argument(
"-f", "--file", required=True, help="Details file to be parsed")
rois.add_argument(
"-i", "--image", required=True,
help="Image id which should have ids attached")
for x in (merge, rois):
x.add_login_arguments()
#
# Public methods
#
def merge(self, args):
"""Uses PIL to read multiple planes from a local folder.
Planes are combined and uploaded to OMERO as new images with additional T, C,
Z dimensions.
It should be run as a local script (not via scripting service) in order that
it has access to the local users file system. Therefore need EMAN2 or PIL
installed locally.
Example usage:
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/0037/
Since this dir does not contain folders, this will upload images in '0037'
into a Dataset called Demo_data in a Project called 'Data'.
$ bin/omero cecog merge /Applications/CecogPackage/Data/Demo_data/
Since this dir does contain folders, this will look for images in all
subdirectories of 'Demo_data' and upload images into a Dataset called
Demo_data in a Project called 'Data'.
Images will be combined in Z, C and T according to the \
MetaMorph_PlateScanPackage naming convention.
E.g. tubulin_P0037_T00005_Cgfp_Z1_S1.tiff is Point 37, Timepoint 5, Channel \
gfp, Z 1. S?
see \
/Applications/CecogPackage/CecogAnalyzer.app/Contents/Resources/resources/\
naming_schemes.conf
"""
"""
Processes the command args, makes project and dataset then calls
uploadDirAsImages() to process and
upload the images to OMERO.
"""
from omero.rtypes import unwrap
from omero.util.script_utils import uploadDirAsImages
path = args.path
client = self.ctx.conn(args)
queryService = client.sf.getQueryService()
updateService = client.sf.getUpdateService()
pixelsService = client.sf.getPixelsService()
# if we don't have any folders in the 'dir' E.g.
# CecogPackage/Data/Demo_data/0037/
# then 'Demo_data' becomes a dataset
subDirs = []
for f in os.listdir(path):
fullpath = path + f
# process folders in root dir:
if os.path.isdir(fullpath):
subDirs.append(fullpath)
# get the dataset name and project name from path
if len(subDirs) == 0:
p = path[:-1] # will remove the last folder
p = os.path.dirname(p)
else:
if os.path.basename(path) == "":
p = path[:-1] # remove slash
datasetName = os.path.basename(p) # e.g. Demo_data
p = p[:-1]
p = os.path.dirname(p)
projectName = os.path.basename(p) # e.g. Data
self.ctx.err("Putting images in Project: %s Dataset: %s"
% (projectName, datasetName))
# create dataset
dataset = omero.model.DatasetI()
dataset.name = rstring(datasetName)
dataset = updateService.saveAndReturnObject(dataset)
# create project
project = omero.model.ProjectI()
project.name = rstring(projectName)
project = updateService.saveAndReturnObject(project)
# put dataset in project
link = omero.model.ProjectDatasetLinkI()
link.parent = omero.model.ProjectI(project.id.val, False)
link.child = omero.model.DatasetI(dataset.id.val, False)
updateService.saveAndReturnObject(link)
if len(subDirs) > 0:
for subDir in subDirs:
self.ctx.err("Processing images in %s" % subDir)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, subDir, dataset)
self.ctx.out("%s" % unwrap(rv))
# if there are no sub-directories, just put all the images in the dir
else:
self.ctx.err("Processing images in %s" % path)
rv = uploadDirAsImages(client.sf, queryService, updateService,
pixelsService, path, dataset)
self.ctx.out("%s" % unwrap(rv))
def rois(self, args):
"""Parses an object_details text file, as generated by CeCog Analyzer
and saves the data as ROIs on an Image in OMERO.
Text file is of the form:
frame objID classLabel className centerX centerY mean sd
1 10 6 lateana 1119 41 76.8253796095 \
54.9305640673
Example usage:
bin/omero cecog rois -f \
Data/Demo_output/analyzed/0037/statistics/P0037__object_details.txt -i 502
"""
"""
Processes the command args, parses the object_details.txt file and
creates ROIs on the image specified in OMERO
"""
from omero.util.script_utils import uploadCecogObjectDetails
filePath = args.file
imageId = args.image
if not os.path.exists(filePath):
self.ctx.die(654, "Could find the object_details file at %s"
% filePath)
client = self.ctx.conn(args)
updateService = client.sf.getUpdateService()
ids = uploadCecogObjectDetails(updateService, imageId, filePath)
self.ctx.out("Rois created: %s" % len(ids))
try:
register("cecog", CecogControl, CecogControl.__doc__)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("cecog", CecogControl, CecogControl.__doc__)
cli.invoke(sys.argv[1:])
| gpl-2.0 | -4,093,312,626,652,441,600 | 33.632124 | 78 | 0.624776 | false |
yordan-desta/QgisIns | python/plugins/processing/gui/ScriptEditorDialog.py | 2 | 7417 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptDialog.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing.modeler.ModelerUtils import ModelerUtils
__author__ = 'Alexander Bruy'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import codecs
import sys
import json
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qsci import *
from qgis.core import *
from qgis.utils import iface
from processing.gui.ParametersDialog import ParametersDialog
from processing.gui.HelpEditionDialog import HelpEditionDialog
from processing.algs.r.RAlgorithm import RAlgorithm
from processing.algs.r.RUtils import RUtils
from processing.script.ScriptAlgorithm import ScriptAlgorithm
from processing.script.ScriptUtils import ScriptUtils
from processing.ui.ui_DlgScriptEditor import Ui_DlgScriptEditor
import processing.resources_rc
class ScriptEditorDialog(QDialog, Ui_DlgScriptEditor):
SCRIPT_PYTHON = 0
SCRIPT_R = 1
hasChanged = False
def __init__(self, algType, alg):
QDialog.__init__(self)
self.setupUi(self)
self.setWindowFlags(Qt.WindowMinimizeButtonHint |
Qt.WindowMaximizeButtonHint |
Qt.WindowCloseButtonHint)
# Set icons
self.btnSave.setIcon(
QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.btnSaveAs.setIcon(
QgsApplication.getThemeIcon('/mActionFileSaveAs.svg'))
self.btnEditHelp.setIcon(QIcon(':/processing/images/edithelp.png'))
self.btnRun.setIcon(QIcon(':/processing/images/runalgorithm.png'))
self.btnCut.setIcon(QgsApplication.getThemeIcon('/mActionEditCut.png'))
self.btnCopy.setIcon(
QgsApplication.getThemeIcon('/mActionEditCopy.png'))
self.btnPaste.setIcon(
QgsApplication.getThemeIcon('/mActionEditPaste.png'))
self.btnUndo.setIcon(QgsApplication.getThemeIcon('/mActionUndo.png'))
self.btnRedo.setIcon(QgsApplication.getThemeIcon('/mActionRedo.png'))
# Connect signals and slots
self.btnSave.clicked.connect(self.save)
self.btnSaveAs.clicked.connect(self.saveAs)
self.btnEditHelp.clicked.connect(self.editHelp)
self.btnRun.clicked.connect(self.runAlgorithm)
self.btnCut.clicked.connect(self.editor.cut)
self.btnCopy.clicked.connect(self.editor.copy)
self.btnPaste.clicked.connect(self.editor.paste)
self.btnUndo.clicked.connect(self.editor.undo)
self.btnRedo.clicked.connect(self.editor.redo)
self.editor.textChanged.connect(lambda: self.setHasChanged(True))
self.alg = alg
self.algType = algType
if self.alg is not None:
self.filename = self.alg.descriptionFile
self.editor.setText(self.alg.script)
else:
self.filename = None
self.update = False
self.help = None
self.setHasChanged(False)
self.editor.setLexerType(self.algType)
def editHelp(self):
if self.alg is None:
if self.algType == self.SCRIPT_PYTHON:
alg = ScriptAlgorithm(None, unicode(self.editor.text()))
elif self.algType == self.SCRIPT_R:
alg = RAlgorithm(None, unicode(self.editor.text()))
else:
alg = self.alg
dlg = HelpEditionDialog(alg)
dlg.exec_()
# We store the description string in case there were not saved
# because there was no filename defined yet
if self.alg is None and dlg.descriptions:
self.help = dlg.descriptions
def save(self):
self.saveScript(False)
def saveAs(self):
self.saveScript(True)
def saveScript(self, saveAs):
if self.filename is None or saveAs:
if self.algType == self.SCRIPT_PYTHON:
scriptDir = ScriptUtils.scriptsFolder()
filterName = self.tr('Python scripts (*.py)')
elif self.algType == self.SCRIPT_R:
scriptDir = RUtils.RScriptsFolder()
filterName = self.tr('Processing R script (*.rsx)')
self.filename = unicode(QFileDialog.getSaveFileName(self,
self.tr('Save script'), scriptDir,
filterName))
if self.filename:
if self.algType == self.SCRIPT_PYTHON \
and not self.filename.lower().endswith('.py'):
self.filename += '.py'
if self.algType == self.SCRIPT_R \
and not self.filename.lower().endswith('.rsx'):
self.filename += '.rsx'
text = unicode(self.editor.text())
if self.alg is not None:
self.alg.script = text
try:
with codecs.open(self.filename, 'w', encoding='utf-8') as fout:
fout.write(text)
except IOError:
QMessageBox.warning(self, self.tr('I/O error'),
self.tr('Unable to save edits. Reason:\n %s')
% unicode(sys.exc_info()[1]))
return
self.update = True
# If help strings were defined before saving the script for
# the first time, we do it here
if self.help:
with open(self.filename + '.help', 'w') as f:
json.dump(self.help, f)
self.help = None
self.setHasChanged(False)
else:
self.filename = None
def setHasChanged(self, hasChanged):
self.hasChanged = hasChanged
self.btnSave.setEnabled(hasChanged)
def runAlgorithm(self):
if self.algType == self.SCRIPT_PYTHON:
alg = ScriptAlgorithm(None, unicode(self.editor.text()))
alg.provider = ModelerUtils.providers['script']
if self.algType == self.SCRIPT_R:
alg = RAlgorithm(None, unicode(self.editor.text()))
alg.provider = ModelerUtils.providers['r']
dlg = alg.getCustomParametersDialog()
if not dlg:
dlg = ParametersDialog(alg)
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
| gpl-2.0 | -2,637,167,935,403,416,000 | 35.536946 | 79 | 0.569233 | false |
miki725/django-sub-query | sub_query/db/models/sql/compiler.py | 2 | 2389 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from django.contrib.gis.db.models.sql.compiler import * # noqa
class SubQueryGeoSQLCompiler(GeoSQLCompiler):
def __init__(self, *args, **kwargs):
super(SubQueryGeoSQLCompiler, self).__init__(*args, **kwargs)
self.is_subquery = False
def get_ordering(self):
if hasattr(self, '_get_ordering'):
values = self._get_ordering
else:
values = self._get_ordering = super(SubQueryGeoSQLCompiler, self).get_ordering()
ordering, o_params, ordering_group_by = values
if self.is_subquery:
ordering = []
return ordering, o_params, ordering_group_by
def pre_sql_setup(self):
if hasattr(self, '_pre_sql_setup'):
return self._pre_sql_setup
self._pre_sql_setup = super(SubQueryGeoSQLCompiler, self).pre_sql_setup()
return self._pre_sql_setup
def get_columns(self, with_aliases=False):
if hasattr(self, '_get_columns'):
return self._get_columns
self._get_columns = super(SubQueryGeoSQLCompiler, self).get_columns(with_aliases)
return self._get_columns
def as_sql(self, with_limits=True, with_col_aliases=False):
# these calls are required in order to get ordering columns
self.pre_sql_setup()
self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
self.is_subquery = False
if self.query.distinct and ordering:
distinct_ordering_pairs = list(zip(distinct_fields, ordering))
if not all(map(lambda i: i[1].startswith(i[0]), distinct_ordering_pairs)):
self.is_subquery = True
sql, params = super(SubQueryGeoSQLCompiler, self).as_sql(
with_limits=with_limits, with_col_aliases=with_col_aliases
)
if self.is_subquery:
sql = ' '.join(filter(None, [
'SELECT',
'*',
'FROM (',
'{}'.format(sql),
')',
'"{}"'.format(self.query.model._meta.db_table),
'ORDER BY',
'{}'.format(', '.join(ordering)),
] + o_params))
self.is_subquery = False
return sql, params
| mit | -3,160,998,684,231,973,400 | 32.180556 | 92 | 0.577648 | false |
luisgg/iteexe | exe/export/singlepage.py | 1 | 6261 | # ===========================================================================
# eXe
# Copyright 2004-2005, University of Auckland
# Copyright 2004-2008 eXe Project, http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
This class transforms an eXe node into a page on a single page website
"""
import logging
import re
from cgi import escape
from urllib import quote
from exe.webui.blockfactory import g_blockFactory
from exe.engine.error import Error
from exe.engine.path import Path
from exe.export.pages import Page, uniquifyNames
from exe.webui import common
from exe import globals as G
log = logging.getLogger(__name__)
# ===========================================================================
class SinglePage(Page):
"""
This class transforms an eXe node into a page on a single page website
"""
def save(self, filename, for_print=0):
"""
Save page to a file.
'outputDir' is the directory where the filenames will be saved
(a 'path' instance)
"""
outfile = open(filename, "wb")
outfile.write(self.render(self.node.package,for_print).encode('utf8'))
outfile.close()
def render(self, package, for_print=0):
"""
Returns an XHTML string rendering this page.
"""
html = self.renderHeader(package.title, for_print)
if for_print:
# include extra onload bit:
html += u'<body onload="print_page()">\n'
else:
html += u"<body>\n"
html += u"<div id=\"content\">\n"
html += u"<div id=\"header\">\n"
html += "<h1>"+escape(package.title)+"</h1>"
html += u"</div>\n"
html += u"<div id=\"main\">\n"
html += self.renderNode(package.root, 1)
html += u"</div>\n"
html += self.renderLicense()
html += self.renderFooter()
html += u"</div>\n"
html += u"</body></html>\n"
# JR: Eliminamos los atributos de las ecuaciones
aux = re.compile("exe_math_latex=\"[^\"]*\"")
html = aux.sub("", html)
aux = re.compile("exe_math_size=\"[^\"]*\"")
html = aux.sub("", html)
#JR: Cambio la ruta de los enlaces del glosario y el &
html = html.replace("../../../../../mod/glossary", "../../../../mod/glossary")
html = html.replace("&concept", "&concept")
return html
def renderHeader(self, name, for_print=0):
"""
Returns an XHTML string for the header of this page.
"""
html = u"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
html += u'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 '
html += u'Transitional//EN" '
html += u'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n'
lenguaje = G.application.config.locale
html += u"<html lang=\"" + lenguaje + "\" xml:lang=\"" + lenguaje + "\" xmlns=\"http://www.w3.org/1999/xhtml\">\n"
html += u"<head>\n"
html += u"<style type=\"text/css\">\n"
html += u"@import url(base.css);\n"
html += u"@import url(content.css);\n"
html += u"</style>"
html += u"<title>"
html += name
html += "</title>\n"
html += u"<meta http-equiv=\"Content-Type\" content=\"text/html; "
html += u" charset=utf-8\" />\n";
html += u'<script type="text/javascript" src="common.js"></script>\n'
if for_print:
# include extra print-script for onload bit
html += u'<script type="text/javascript">\n'
html += u'function print_page() {\n'
html += u' window.print();\n'
html += u' window.close();\n'
html += u'}\n'
html += u'</script>\n'
html += u"</head>\n"
return html
#JR: modifico esta funcion para que ponga hX en cada nodo
def renderNode(self, node, nivel):
"""
Returns an XHTML string for this node and recurse for the children
"""
html = ""
html += '<div class="node">\n'
html += ' <div class=\"nodeDecoration\">'
html += '<h' + str(nivel) + ' class=\"nodeTitle\">'
html += escape(node.titleLong)
html += '</h' + str(nivel) + '></div>\n'
style = self.node.package.style
for idevice in node.idevices:
html += u' <div class="%s" id="id%s">\n' % (idevice.klass,
idevice.id)
block = g_blockFactory.createBlock(None, idevice)
if not block:
log.critical("Unable to render iDevice.")
raise Error("Unable to render iDevice.")
if hasattr(idevice, "isQuiz"):
html += block.renderJavascriptForWeb()
html += self.processInternalLinks(block.renderView(style))
html += u' </div>\n' # iDevice div
html += '</div>\n' # node div
for child in node.children:
html += self.renderNode(child, nivel+1)
return html
def processInternalLinks(self, html):
"""
take care of any internal links which are in the form of:
href="exe-node:Home:Topic:etc#Anchor"
For this SinglePage Export, go ahead and keep the #Anchor portion,
but remove the 'exe-node:Home:Topic:etc', since it is all
exported into the same file.
"""
return common.removeInternalLinkNodes(html)
| gpl-2.0 | 4,817,701,529,972,917,000 | 36.716867 | 122 | 0.546079 | false |
kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/modules/health_icd10/__init__.py | 1 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# GNU Health: The Free Health and Hospital Information System
# Copyright (C) 2008-2016 Luis Falcon <[email protected]>
# Copyright (C) 2011-2016 GNU Solidario <[email protected]>
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from health_icd10 import *
| gpl-3.0 | 4,977,416,208,658,131,000 | 44.166667 | 78 | 0.616236 | false |
weynsee/chompy | chompy/chom.py | 1 | 10122 | # Copyright 2009 Wayne See
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import server
import appuifw
import e32
import chm_filebrowser
import os
import e32dbm
CONF_FILE = u"E:\\Data\\chompy\\chompy.cfg"
INIT_FILE = u"E:\\Data\\chompy\\online.html"
LOCAL_FILE = u"E:\\Data\\chompy\\offline.html"
SEPARATOR = u"/"
INIT_HTML = u"""<html>
<body>
<script type="text/javascript">
location.replace("http://localhost:""" + unicode(server.PORT) + """/%s")
</script>
</body>
</html>
"""
ERROR_TEMPLATE = """<html>
<body>
%s
</body>
</html>
"""
ERR_READING = u"CHM File cannot be read"
ERR_NO_HHC = u"CHM File contains no HHC file"
if not os.path.exists("E:\\Data\\chompy"):
os.makedirs("E:\\Data\\chompy")
class Chompy:
def __init__(self):
self.app_lock = e32.Ao_lock()
self.fb = chm_filebrowser.Filebrowser()
self.load_recent()
self.hhc_callback = e32.ao_callgate(self.load_hhc_viewer)
def load_recent(self):
try:
db = e32dbm.open(CONF_FILE, "c")
recent = db["recent"]
if recent:
self.recent = recent.split(SEPARATOR)
else:
self.recent = []
db.close()
except:
self.recent = []
def save_recent(self):
db = e32dbm.open(CONF_FILE, "wf")
try:
db["recent"] = SEPARATOR.join(self.recent)
finally:
db.close()
def browse(self):
self.fb.show()
selected = self.fb.selected
if selected:
file = unicode(selected)
if file not in self.recent:
self.recent.append(file)
self.update_list(len(self.recent) - 1)
self.open(file)
else:
self.refresh()
def to_display(self, filename):
return unicode(os.path.basename(filename))
def update_list(self, selected_index=None):
if self.recent:
self.lb.set_list(self.get_list(), selected_index)
else:
self.lb.set_list(self.get_list())
def get_list(self):
if self.recent:
return map(self.to_display, self.recent)
else:
return [u"Select file"]
def lb_observe(self, index=None):
if index is None:
index = self.lb.current()
if not self.recent:
self.browse()
else:
self.open(self.recent[index])
def open(self, filename=None):
if filename is None:
filename = self.recent[self.lb.current()]
res = appuifw.popup_menu([u"Offline Mode", u"Online Mode"])
if res == 0:
self.open_offline(filename)
elif res == 1:
self.open_online(filename)
def open_online(self, filename):
server.start(filename, self.hhc_callback)
stall()
def open_offline(self, filename):
stall()
e32.ao_yield()
import pychmlib
try:
chm_file = pychmlib.chm.chm(filename)
except:
appuifw.note(ERR_READING, "error")
self.refresh()
return
try:
hhc_file = chm_file.get_hhc()
if hhc_file:
import hhc
hhc_obj = hhc.parse(hhc_file.get_content())
viewer = HHCViewer(filename, hhc_obj, chm_file.encoding)
viewer.set_as_offline(chm_file)
viewer.show()
self.quit()
else:
appuifw.note(ERR_NO_HHC, "error")
self.refresh()
return
finally:
chm_file.close()
def load_hhc_viewer(self, filename=None, contents=None, encoding=None, error=None):
if not error:
viewer = HHCViewer(filename, contents, encoding)
viewer.show()
server.stop() #if there is an error, no need to stop server
self.exit_screen()
else:
if error == server.ERR_INVALID_CHM:
appuifw.note(ERR_READING, "error")
elif error == server.ERR_NO_HHC:
appuifw.note(ERR_NO_HHC, "error")
self.refresh()
def remove(self):
index = self.lb.current()
del self.recent[index]
self.update_list(index)
def quit(self):
self.save_recent()
self.app_lock.signal()
def refresh(self):
menu_list = [(u"Browse for file", self.browse), (u"Exit", self.quit)]
if self.recent:
menu_list.insert(0, (u"Open", self.open))
menu_list.insert(2, (u"Remove", self.remove))
appuifw.app.menu = menu_list
appuifw.app.exit_key_handler = self.quit
appuifw.app.title = u"chompy"
appuifw.app.body = self.lb
def exit_screen(self):
appuifw.app.menu = []
appuifw.app.exit_key_handler = self.quit
appuifw.app.title = u"chompy"
text = appuifw.Text()
text.set(u"Application can now be safely closed.")
appuifw.app.body = text
def show(self):
self.lb = appuifw.Listbox(self.get_list(), self.lb_observe)
self.refresh()
self.app_lock.wait()
self.lb = None
self.hhc_callback = None
self.fb = None
appuifw.app.body = None
appuifw.app.exit_key_handler = None
class HHCViewer:
def __init__(self, filename, hhc_obj, encoding):
self.title = os.path.basename(filename)
self.chm_file = None
self.current_context = hhc_obj
self.encoding = encoding
self.app_lock = e32.Ao_lock()
def set_as_offline(self, chm_file):
self.chm_file = chm_file
def to_displayable_list(self):
entries = map(lambda x: x.name.decode(self.encoding), self.current_context.children)
if not self.current_context.is_root:
entries.insert(0, u"..")
return entries
def lb_observe(self, index=None):
if index is None:
index = self.lb.current()
if index == 0 and not self.current_context.is_root:
#go back up
selected = self.current_context.parent
else:
selected_index = index
if not self.current_context.is_root:
selected_index -= 1
selected = self.current_context.children[selected_index]
if selected.is_inner_node:
if selected.local:
res = appuifw.popup_menu([u"Load page", u"List contents"])
if res == 0:
self.load_in_viewer(selected.local)
elif res == 1:
self.load_directory(selected)
else:
self.load_directory(selected)
else:
self.load_in_viewer(selected.local)
def load_directory(self, entry):
self.current_context = entry
entries = self.to_displayable_list()
self.lb.set_list(entries)
def load_in_viewer(self, local):
if self.chm_file:
self.load_offline(local)
else:
self.load_online(local)
def load_online(self, local):
self.open_local_html(INIT_FILE, INIT_HTML % local)
def open_local_html(self, filename, content):
html_file = open(filename, "wb")
try:
html_file.write(content)
finally:
html_file.close()
browser_lock = e32.Ao_lock()
viewer = appuifw.Content_handler(browser_lock.signal)
viewer.open(filename)
browser_lock.wait()
def load_offline(self, local):
stall(u"Please wait while page is extracted from the archive...")
e32.ao_yield()
ui = self.chm_file.resolve_object("/"+local)
try:
if ui:
content = ui.get_content()
else:
content = ERROR_TEMPLATE % "Page cannot be found"
except:
content = ERROR_TEMPLATE % "Page could not be displayed"
try:
self.open_local_html(LOCAL_FILE, content)
self.refresh()
except:
self.refresh()
def quit(self):
appuifw.app.exit_key_handler = None
self.app_lock.signal()
def open(self):
self.lb_observe()
def refresh(self):
appuifw.app.menu = [(u"Open", self.open), (u"Exit", self.quit)]
appuifw.app.exit_key_handler = self.quit
appuifw.app.title = self.title
appuifw.app.body = self.lb
def show(self):
entries = self.to_displayable_list()
self.lb = appuifw.Listbox(entries, self.lb_observe)
self.refresh()
self.app_lock.wait()
self.lb = None
appuifw.app.body = None
def stall(msg = u"Please wait while CHM file is being read..."):
appuifw.app.menu = []
appuifw.app.title = u"Loading..."
text = appuifw.Text()
text.style = appuifw.STYLE_ITALIC
text.set(msg)
appuifw.app.body = text
appuifw.app.exit_key_handler = stop_quit
def stop_quit():
appuifw.note(u"Cannot exit until process has finished", "info")
if __name__ == '__main__':
Chompy().show() | apache-2.0 | 3,881,770,356,532,504,600 | 29.736677 | 92 | 0.535072 | false |
cubarco/tunasync | tunasync/jobs.py | 1 | 4053 | #!/usr/bin/env python2
# -*- coding:utf-8 -*-
import sh
import sys
from setproctitle import setproctitle
import signal
import Queue
import traceback
def run_job(sema, child_q, manager_q, provider, **settings):
aquired = False
setproctitle("tunasync-{}".format(provider.name))
def before_quit(*args):
provider.terminate()
if aquired:
print("{} release semaphore".format(provider.name))
sema.release()
sys.exit(0)
def sleep_wait(timeout):
try:
msg = child_q.get(timeout=timeout)
if msg == "terminate":
manager_q.put(("CONFIG_ACK", (provider.name, "QUIT")))
return True
except Queue.Empty:
return False
signal.signal(signal.SIGTERM, before_quit)
if provider.delay > 0:
if sleep_wait(provider.delay):
return
max_retry = settings.get("max_retry", 1)
def _real_run(idx=0, stage="job_hook", ctx=None):
"""\
4 stages:
0 -> job_hook, 1 -> set_retry, 2 -> exec_hook, 3 -> exec
"""
assert(ctx is not None)
if stage == "exec":
# exec_job
try:
provider.run(ctx=ctx)
provider.wait()
except sh.ErrorReturnCode:
status = "fail"
else:
status = "success"
return status
elif stage == "set_retry":
# enter stage 3 with retry
for retry in range(max_retry):
status = "syncing"
manager_q.put(("UPDATE", (provider.name, status, ctx)))
print("start syncing {}, retry: {}".format(provider.name, retry))
status = _real_run(idx=0, stage="exec_hook", ctx=ctx)
if status == "success":
break
return status
# job_hooks
elif stage == "job_hook":
if idx == len(provider.hooks):
return _real_run(idx=idx, stage="set_retry", ctx=ctx)
hook = provider.hooks[idx]
hook_before, hook_after = hook.before_job, hook.after_job
status = "pre-syncing"
elif stage == "exec_hook":
if idx == len(provider.hooks):
return _real_run(idx=idx, stage="exec", ctx=ctx)
hook = provider.hooks[idx]
hook_before, hook_after = hook.before_exec, hook.after_exec
status = "syncing"
try:
# print("%s run before_%s, %d" % (provider.name, stage, idx))
hook_before(provider=provider, ctx=ctx)
status = _real_run(idx=idx+1, stage=stage, ctx=ctx)
except Exception:
traceback.print_exc()
status = "fail"
finally:
# print("%s run after_%s, %d" % (provider.name, stage, idx))
# job may break when syncing
if status != "success":
status = "fail"
try:
hook_after(provider=provider, status=status, ctx=ctx)
except Exception:
traceback.print_exc()
return status
while 1:
try:
sema.acquire(True)
except:
break
aquired = True
ctx = {} # put context info in it
ctx['current_dir'] = provider.local_dir
ctx['mirror_name'] = provider.name
status = "pre-syncing"
manager_q.put(("UPDATE", (provider.name, status, ctx)))
try:
status = _real_run(idx=0, stage="job_hook", ctx=ctx)
except Exception:
traceback.print_exc()
status = "fail"
finally:
sema.release()
aquired = False
print("syncing {} finished, sleep {} minutes for the next turn".format(
provider.name, provider.interval
))
manager_q.put(("UPDATE", (provider.name, status, ctx)))
if sleep_wait(timeout=provider.interval * 60):
break
# vim: ts=4 sw=4 sts=4 expandtab
| gpl-3.0 | -5,656,886,585,572,669,000 | 29.022222 | 81 | 0.515174 | false |
SickGear/SickGear | lib/hachoir_py2/parser/archive/cab.py | 2 | 11532 | """
Microsoft Cabinet (CAB) archive.
Author: Victor Stinner, Robert Xiao
Creation date: 31 january 2007
- Microsoft Cabinet SDK
http://msdn2.microsoft.com/en-us/library/ms974336.aspx
"""
from __future__ import absolute_import
from hachoir_py2.parser import Parser
from hachoir_py2.field import (FieldSet, Enum,
CString, String,
UInt8, UInt16, UInt32, Bit, Bits, PaddingBits, NullBits,
DateTimeMSDOS32, RawBytes, CustomFragment)
from hachoir_py2.core.text_handler import textHandler, hexadecimal, filesizeHandler
from hachoir_py2.core.endian import LITTLE_ENDIAN
from hachoir_py2.core.tools import paddingSize
from hachoir_py2.stream import StringInputStream
from hachoir_py2.parser.archive.lzx import LZXStream, lzx_decompress
from hachoir_py2.parser.archive.zlib import DeflateBlock
MAX_NB_FOLDER = 30
COMPRESSION_NONE = 0
COMPRESSION_NAME = {
0: "Uncompressed",
1: "Deflate",
2: "Quantum",
3: "LZX",
}
class Folder(FieldSet):
def createFields(self):
yield UInt32(self, "offset", "Offset to data (from file start)")
yield UInt16(self, "data_blocks", "Number of data blocks which are in this cabinet")
yield Enum(Bits(self, "compr_method", 4, "Compression method"), COMPRESSION_NAME)
if self["compr_method"].value in [2, 3]: # Quantum or LZX use compression level
yield PaddingBits(self, "padding[]", 4)
yield Bits(self, "compr_level", 5, "Compression level")
yield PaddingBits(self, "padding[]", 3)
else:
yield PaddingBits(self, "padding[]", 12)
if self["../flags/has_reserved"].value and self["../reserved_folder_size"].value:
yield RawBytes(self, "reserved_folder", self["../reserved_folder_size"].value, "Per-folder reserved area")
def createDescription(self):
text = "Folder: compression %s" % self["compr_method"].display
if self["compr_method"].value in [2, 3]: # Quantum or LZX use compression level
text += " (level %u: window size %u)" % (self["compr_level"].value, 2 ** self["compr_level"].value)
return text
class CabFileAttributes(FieldSet):
def createFields(self):
yield Bit(self, "readonly")
yield Bit(self, "hidden")
yield Bit(self, "system")
yield Bits(self, "reserved[]", 2)
yield Bit(self, "archive", "Has the file been modified since the last backup?")
yield Bit(self, "exec", "Run file after extraction?")
yield Bit(self, "name_is_utf", "Is the filename using UTF-8?")
yield Bits(self, "reserved[]", 8)
class File(FieldSet):
def createFields(self):
yield filesizeHandler(UInt32(self, "filesize", "Uncompressed file size"))
yield UInt32(self, "folder_offset", "File offset in uncompressed folder")
yield Enum(UInt16(self, "folder_index", "Containing folder ID (index)"), {
0xFFFD: "Folder continued from previous cabinet (real folder ID = 0)",
0xFFFE: "Folder continued to next cabinet (real folder ID = %i)" % (self["../nb_folder"].value - 1),
0xFFFF: "Folder spanning previous, current and next cabinets (real folder ID = 0)"})
yield DateTimeMSDOS32(self, "timestamp")
yield CabFileAttributes(self, "attributes")
if self["attributes/name_is_utf"].value:
yield CString(self, "filename", charset="UTF-8")
else:
yield CString(self, "filename", charset="ASCII")
def createDescription(self):
return "File %s (%s)" % (
self["filename"].display, self["filesize"].display)
class Flags(FieldSet):
static_size = 16
def createFields(self):
yield Bit(self, "has_previous")
yield Bit(self, "has_next")
yield Bit(self, "has_reserved")
yield NullBits(self, "padding", 13)
class DataBlock(FieldSet):
def __init__(self, *args, **kwargs):
FieldSet.__init__(self, *args, **kwargs)
size = (self["size"].value + 8) * 8 # +8 for header values
if self["/flags/has_reserved"].value:
size += self["/reserved_data_size"].value * 8
self._size = size
def createFields(self):
yield textHandler(UInt32(self, "crc32"), hexadecimal)
yield UInt16(self, "size")
yield UInt16(self, "uncompressed_size", "If this is 0, this block is continued in a subsequent cabinet")
if self["/flags/has_reserved"].value and self["/reserved_data_size"].value:
yield RawBytes(self, "reserved_data", self["/reserved_data_size"].value, "Per-datablock reserved area")
compr_method = self.parent.folder["compr_method"].value
if compr_method == 0: # Uncompressed
yield RawBytes(self, "data", self["size"].value, "Folder Data")
self.parent.uncompressed_data += self["data"].value
elif compr_method == 1: # MSZIP
yield String(self, "mszip_signature", 2, "MSZIP Signature (CK)")
yield DeflateBlock(self, "deflate_block", self.parent.uncompressed_data)
padding = paddingSize(self.current_size, 8)
if padding:
yield PaddingBits(self, "padding[]", padding)
self.parent.uncompressed_data = self["deflate_block"].uncomp_data
elif compr_method == 2: # Quantum
yield RawBytes(self, "compr_data", self["size"].value, "Compressed Folder Data")
elif compr_method == 3: # LZX
group = getattr(self.parent.folder, "lzx_group", None)
field = CustomFragment(self, "data", self["size"].value * 8, LZXStream, "LZX data fragment", group)
if group is None:
field.group.args["compr_level"] = self.parent.folder["compr_level"].value
self.parent.folder.lzx_group = field.group
yield field
class FolderParser(Parser):
endian = LITTLE_ENDIAN
def createFields(self):
for file in sorted(self.files, key=lambda x: x["folder_offset"].value):
padding = self.seekByte(file["folder_offset"].value)
if padding:
yield padding
yield RawBytes(self, "file[]", file["filesize"].value, file.description)
class FolderData(FieldSet):
def __init__(self, parent, name, folder, files, *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
def createInputStream(cis, source=None, **args):
stream = cis(source=source)
tags = args.setdefault("tags", [])
tags.extend(stream.tags)
tags.append(("class", FolderParser))
tags.append(("args", {'files': files}))
for unused in self:
pass
if folder["compr_method"].value == 3: # LZX
self.uncompressed_data = lzx_decompress(self["block[0]/data"].getSubIStream(),
folder["compr_level"].value)
return StringInputStream(self.uncompressed_data, source=source, **args)
self.setSubIStream(createInputStream)
self.files = files
self.folder = folder # Folder fieldset
def createFields(self):
self.uncompressed_data = ""
for index in xrange(self.folder["data_blocks"].value):
block = DataBlock(self, "block[]")
for i in block:
pass
yield block
class CabFile(Parser):
endian = LITTLE_ENDIAN
MAGIC = "MSCF"
PARSER_TAGS = {
"id": "cab",
"category": "archive",
"file_ext": ("cab",),
"mime": (u"application/vnd.ms-cab-compressed",),
"magic": ((MAGIC, 0),),
"min_size": 1 * 8, # header + file entry
"description": "Microsoft Cabinet archive"
}
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid magic"
if self["major_version"].value != 1 or self["minor_version"].value != 3:
return "Unknown version (%i.%i)" % (self["major_version"].value, self["minor_version"].value)
if not (1 <= self["nb_folder"].value <= MAX_NB_FOLDER):
return "Invalid number of folder (%s)" % self["nb_folder"].value
return True
def createFields(self):
yield String(self, "magic", 4, "Magic (MSCF)", charset="ASCII")
yield textHandler(UInt32(self, "hdr_checksum", "Header checksum (0 if not used)"), hexadecimal)
yield filesizeHandler(UInt32(self, "filesize", "Cabinet file size"))
yield textHandler(UInt32(self, "fld_checksum", "Folders checksum (0 if not used)"), hexadecimal)
yield UInt32(self, "off_file", "Offset of first file")
yield textHandler(UInt32(self, "files_checksum", "Files checksum (0 if not used)"), hexadecimal)
yield UInt8(self, "minor_version", "Minor version (should be 3)")
yield UInt8(self, "major_version", "Major version (should be 1)")
yield UInt16(self, "nb_folder", "Number of folders")
yield UInt16(self, "nb_files", "Number of files")
yield Flags(self, "flags")
yield UInt16(self, "setid")
yield UInt16(self, "cabinet_serial", "Zero-based cabinet number")
if self["flags/has_reserved"].value:
yield UInt16(self, "reserved_header_size", "Size of per-cabinet reserved area")
yield UInt8(self, "reserved_folder_size", "Size of per-folder reserved area")
yield UInt8(self, "reserved_data_size", "Size of per-datablock reserved area")
if self["reserved_header_size"].value:
yield RawBytes(self, "reserved_header", self["reserved_header_size"].value, "Per-cabinet reserved area")
if self["flags/has_previous"].value:
yield CString(self, "previous_cabinet", "File name of previous cabinet", charset="ASCII")
yield CString(self, "previous_disk", "Description of disk/media on which previous cabinet resides",
charset="ASCII")
if self["flags/has_next"].value:
yield CString(self, "next_cabinet", "File name of next cabinet", charset="ASCII")
yield CString(self, "next_disk", "Description of disk/media on which next cabinet resides", charset="ASCII")
folders = []
files = []
for index in xrange(self["nb_folder"].value):
folder = Folder(self, "folder[]")
yield folder
folders.append(folder)
for index in xrange(self["nb_files"].value):
file = File(self, "file[]")
yield file
files.append(file)
folders = sorted(enumerate(folders), key=lambda x: x[1]["offset"].value)
for i in xrange(len(folders)):
index, folder = folders[i]
padding = self.seekByte(folder["offset"].value)
if padding:
yield padding
files = []
for file in files:
if file["folder_index"].value == index:
files.append(file)
if i + 1 == len(folders):
size = (self.size // 8) - folder["offset"].value
else:
size = (folders[i + 1][1]["offset"].value) - folder["offset"].value
yield FolderData(self, "folder_data[%i]" % index, folder, files, size=size * 8)
end = self.seekBit(self.size, "endraw")
if end:
yield end
def createContentSize(self):
return self["filesize"].value * 8
| gpl-3.0 | -1,445,820,675,758,761,000 | 43.697674 | 120 | 0.601283 | false |
topliceanu/learn | python/python_koans/python2/koans/about_lists.py | 1 | 3314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrays in the Ruby Koans
#
from runner.koan import *
class AboutLists(Koan):
def test_creating_lists(self):
empty_list = list()
self.assertEqual(list, type(empty_list))
self.assertEqual(0, len(empty_list))
def test_list_literals(self):
nums = list()
self.assertEqual([], nums, 'empty lists should equal')
nums[0:] = [1]
self.assertEqual([1], nums)
nums[1:] = [2]
self.assertEqual([1, 2], nums)
nums.append(333)
self.assertEqual([1, 2, 333], nums)
def test_accessing_list_elements(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual('peanut', noms[0])
self.assertEqual('jelly', noms[3])
self.assertEqual('jelly', noms[-1])
self.assertEqual('butter', noms[-3])
def test_slicing_lists(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(['peanut'], noms[0:1])
self.assertEqual(['peanut', 'butter'], noms[0:2])
self.assertEqual([], noms[2:2])
self.assertEqual(['and', 'jelly'], noms[2:20])
self.assertEqual([], noms[4:0])
self.assertEqual([], noms[4:100])
self.assertEqual([], noms[5:0])
def test_slicing_to_the_edge(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(['and', 'jelly'], noms[2:])
self.assertEqual(['peanut', 'butter'], noms[:2])
def test_lists_and_ranges(self):
self.assertEqual(list, type(range(5)))
self.assertEqual([0, 1, 2, 3, 4], range(5))
self.assertEqual([5, 6, 7, 8], range(5, 9))
def test_ranges_with_steps(self):
self.assertEqual([0, 2, 4, 6], range(0, 8, 2))
self.assertEqual([1, 4, 7], range(1, 8, 3))
self.assertEqual([5, 1, -3], range(5, -7, -4))
self.assertEqual([5, 1, -3, -7], range(5, -8, -4))
def test_insertions(self):
knight = ['you', 'shall', 'pass']
knight.insert(2, 'not')
self.assertEqual(['you', 'shall', 'not', 'pass'], knight)
knight.insert(0, 'Arthur')
self.assertEqual(['Arthur', 'you', 'shall', 'not', 'pass'], knight)
def test_popping_lists(self):
stack = [10, 20, 30, 40]
stack.append('last')
self.assertEqual([10, 20, 30, 40, 'last'], stack)
popped_value = stack.pop()
self.assertEqual('last', popped_value)
self.assertEqual([10, 20, 30, 40], stack)
popped_value = stack.pop(1)
self.assertEqual(20, popped_value)
self.assertEqual([10, 30, 40], stack)
# Notice that there is a "pop" but no "push" in python?
# Part of the Python philosophy is that there ideally should be one and
# only one way of doing anything. A 'push' is the same as an 'append'.
# To learn more about this try typing "import this" from the python
# console... ;)
def test_use_deques_for_making_queues(self):
from collections import deque
queue = deque([1, 2])
queue.append('last')
self.assertEqual([1, 2, 'last'], list(queue))
popped_value = queue.popleft()
self.assertEqual(1, popped_value)
self.assertEqual([2, 'last'], list(queue))
| mit | -3,316,874,178,908,327,000 | 30.264151 | 79 | 0.564273 | false |
ghevcoul/pycraft | pycraft/window.py | 1 | 10596 | # python imports
import math
# 3rd party imports
import pyglet.clock
import pyglet.graphics
import pyglet.window
from pyglet.gl import * # noqa
from pyglet.window import key, mouse
from pycraft.util import sectorize, cube_vertices, normalize
from pycraft.objects.block import get_block
from pycraft.configuration import ConfigurationLoader
# TICKS_PER_SEC = 60
# Convenience list of num keys.
NUMERIC_KEYS = [
key._1, key._2, key._3, key._4, key._5,
key._6, key._7, key._8, key._9, key._0
]
class Window(pyglet.window.Window):
def __init__(self, ticks_ps, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
self.set_world(None)
self.set_player(None)
self.ticks_per_second = ticks_ps
# The crosshairs at the center of the screen.
self.reticle = None
# The label that is displayed in the top left of the canvas.
self.game_info_label = pyglet.text.Label(
'', font_name='Arial', font_size=18,
x=10, y=self.height - 10, anchor_x='left', anchor_y='top',
color=(0, 0, 0, 255))
self.current_item_label = pyglet.text.Label(
'', font_name='Arial', font_size=18,
x=self.width - 10, y=10, anchor_x='right', anchor_y='bottom',
color=(0, 0, 0, 255))
# Whether or not the window exclusively captures the mouse.
self.set_exclusive_mouse(False)
# This call schedules the `update()` method to be called
# TICKS_PER_SEC. This is the main game event loop.
# pyglet.clock.schedule_interval(self.update, 1.0 / TICKS_PER_SEC)
pyglet.clock.schedule_interval(self.update, 1.0 / self.ticks_per_second)
config_loader = ConfigurationLoader()
self.config_data = config_loader.load_configuration_file()
def set_world(self, world):
self.world = world
def set_player(self, player):
self.player = player
def set_exclusive_mouse(self, exclusive):
"""If `exclusive` is True, the game will capture the mouse, if False the
game will ignore the mouse.
"""
super(Window, self).set_exclusive_mouse(exclusive)
self.exclusive = exclusive
def on_mouse_press(self, x, y, button, modifiers):
"""Called when a mouse button is pressed. See pyglet docs for button
amd modifier mappings.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
button : int
Number representing mouse button that was clicked. 1 = left button,
4 = right button.
modifiers : int
Number representing any modifying keys that were pressed when the
mouse button was clicked.
"""
if self.exclusive:
vector = self.player.get_sight_vector()
block, previous = self.world.hit_test(self.player.position, vector)
if (button == mouse.RIGHT) or \
((button == mouse.LEFT) and (modifiers & key.MOD_CTRL)):
# ON OSX, control + left click = right click.
player_x, player_y, player_z = normalize(self.player.position)
if previous and self.player.block and \
previous != (player_x, player_y, player_z) and \
previous != (player_x, player_y - 1, player_z):
# make sure the block isn't in the players head or feet
self.world.add_block(previous, get_block(self.player.block))
self.player.adjust_inventory(self.player.block)
elif button == pyglet.window.mouse.LEFT and block:
texture = self.world.objects[block]
if texture.hit_and_destroy():
self.world.remove_block(block)
else:
self.set_exclusive_mouse(True)
def on_mouse_motion(self, x, y, dx, dy):
"""Called when the player moves the mouse.
Parameters
----------
x, y : int
The coordinates of the mouse click. Always center of the screen if
the mouse is captured.
dx, dy : float
The movement of the mouse.
"""
if self.exclusive:
m = 0.15
x, y = self.player.rotation
x, y = x + dx * m, y + dy * m
y = max(-90, min(90, y))
self.player.rotation = (x, y)
def on_key_press(self, symbol, modifiers):
"""Called when the player presses a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == getattr(key, self.config_data['controls']['forward']):
self.player.strafe_forward()
elif symbol == getattr(key, self.config_data['controls']['backward']):
self.player.strafe_backward()
elif symbol == getattr(key, self.config_data['controls']['right']):
self.player.strafe_right()
elif symbol == getattr(key, self.config_data['controls']['left']):
self.player.strafe_left()
elif symbol == getattr(key, self.config_data['controls']['jump']):
self.player.jump()
elif symbol == getattr(key, self.config_data['controls']['down']):
self.player.strafe_down()
elif symbol == key.ESCAPE:
self.set_exclusive_mouse(False)
elif symbol == getattr(key, self.config_data['controls']['fly']):
self.player.fly()
elif symbol in NUMERIC_KEYS:
self.player.switch_inventory(symbol - NUMERIC_KEYS[0])
def on_key_release(self, symbol, modifiers):
"""Called when the player releases a key. See pyglet docs for key
mappings.
Parameters
----------
symbol : int
Number representing the key that was pressed.
modifiers : int
Number representing any modifying keys that were pressed.
"""
if symbol == getattr(key, self.config_data['controls']['forward']):
self.player.strafe_backward()
elif symbol == getattr(key, self.config_data['controls']['backward']):
self.player.strafe_forward()
elif symbol == getattr(key, self.config_data['controls']['left']):
self.player.strafe_right()
elif symbol == getattr(key, self.config_data['controls']['right']):
self.player.strafe_left()
elif symbol == getattr(key, self.config_data['controls']['jump']):
self.player.strafe_down()
elif symbol == getattr(key, self.config_data['controls']['down']):
self.player.strafe_up()
def on_resize(self, width, height):
"""Called when the window is resized to a new `width` and `height`."""
# label
self.game_info_label.y = height - 10
self.current_item_label.x = width - 10
# reticle
if self.reticle:
self.reticle.delete()
x, y = self.width // 2, self.height // 2
n = 10
self.reticle = pyglet.graphics.vertex_list(
4,
('v2i', (x - n, y, x + n, y, x, y - n, x, y + n))
)
def on_draw(self):
"""Called by pyglet to draw the canvas."""
self.clear()
self.set_3d()
glColor3d(1, 1, 1)
self.world.start_shader()
self.world.batch.draw()
self.world.stop_shader()
self.draw_focused_block()
self.set_2d()
self.draw_labels()
self.draw_reticle()
def set_3d(self):
"""Configure OpenGL to draw in 3d."""
width, height = self.get_size()
glEnable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(65.0, width / float(height), 0.1, 60.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
x, y = self.player.rotation
glRotatef(x, 0, 1, 0)
glRotatef(-y, math.cos(math.radians(x)), 0, math.sin(math.radians(x)))
x, y, z = self.player.position
glTranslatef(-x, -y, -z)
def set_2d(self):
"""Configure OpenGL to draw in 2d."""
width, height = self.get_size()
glDisable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, 0, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def update(self, dt):
"""This method is scheduled to be called repeatedly by the pyglet
clock.
Parameters
----------
dt : float
The change in time since the last call.
"""
self.world.process_queue(self.ticks_per_second)
sector = sectorize(self.player.position)
if sector != self.world.sector:
self.world.change_sectors(self.world.sector, sector)
if self.world.sector is None:
self.world.process_entire_queue()
self.world.sector = sector
m = 8
dt = min(dt, 0.2)
for _ in range(m):
self.player.update(dt / m, self.world.objects)
def draw_focused_block(self):
"""Draw black edges around the block that is currently under the
crosshairs.
"""
vector = self.player.get_sight_vector()
block = self.world.hit_test(self.player.position, vector)[0]
if block:
x, y, z = block
vertex_data = cube_vertices(x, y, z, 0.51)
glColor3d(0, 0, 0)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
pyglet.graphics.draw(24, GL_QUADS, ('v3f/static', vertex_data))
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
def draw_labels(self):
"""Draw the label in the top left of the screen."""
x, y, z = self.player.position
self.game_info_label.text = '%02d (%.2f, %.2f, %.2f) %d / %d' % (
pyglet.clock.get_fps(), x, y, z,
len(self.world._shown), len(self.world.objects))
self.game_info_label.draw()
self.current_item_label.text = self.player.block if self.player.block else "No items in inventory"
self.current_item_label.draw()
def draw_reticle(self):
"""Draw the crosshairs in the center of the screen."""
glColor3d(0, 0, 0)
self.reticle.draw(GL_LINES)
| mit | -7,912,588,687,094,050,000 | 37.530909 | 106 | 0.572763 | false |
jcfrank/myrepo | tests/test_git_config.py | 90 | 1229 | import os
import unittest
import git_config
def fixture(*paths):
"""Return a path relative to test/fixtures.
"""
return os.path.join(os.path.dirname(__file__), 'fixtures', *paths)
class GitConfigUnitTest(unittest.TestCase):
"""Tests the GitConfig class.
"""
def setUp(self):
"""Create a GitConfig object using the test.gitconfig fixture.
"""
config_fixture = fixture('test.gitconfig')
self.config = git_config.GitConfig(config_fixture)
def test_GetString_with_empty_config_values(self):
"""
Test config entries with no value.
[section]
empty
"""
val = self.config.GetString('section.empty')
self.assertEqual(val, None)
def test_GetString_with_true_value(self):
"""
Test config entries with a string value.
[section]
nonempty = true
"""
val = self.config.GetString('section.nonempty')
self.assertEqual(val, 'true')
def test_GetString_from_missing_file(self):
"""
Test missing config file
"""
config_fixture = fixture('not.present.gitconfig')
config = git_config.GitConfig(config_fixture)
val = config.GetString('empty')
self.assertEqual(val, None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -993,697,815,883,633,200 | 22.634615 | 68 | 0.656631 | false |
glorizen/nupic | examples/opf/clients/hotgym/prediction/one_gym/run.py | 21 | 5172 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager
import nupic_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
"NOTE: You must run ./swarm.py before this, because model parameters\n"
"are required to run NuPIC.\n"
)
GYM_NAME = "rec-center-hourly" # or use "rec-center-every-15m-large"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
_METRIC_SPECS = (
MetricSpec(field='kw_energy_consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'aae', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
MetricSpec(field='kw_energy_consumption', metric='trivial',
inferenceElement='prediction',
params={'errorMetric': 'altMAPE', 'window': 1000, 'steps': 1}),
)
def createModel(modelParams):
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_output.NuPICPlotOutput([gymName])
else:
output = nupic_output.NuPICFileOutput([gymName])
metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
counter = 0
for row in csvReader:
counter += 1
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
result.metrics = metricsManager.update(result)
if counter % 100 == 0:
print "Read %i lines..." % counter
print ("After %i records, 1-step altMAPE=%f" % (counter,
result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='altMAPE':steps=1:window=1000:"
"field=kw_energy_consumption"]))
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
output.write([timestamp], [consumption], [prediction])
inputFile.close()
output.close()
def runModel(gymName, plot=False):
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot)
| agpl-3.0 | -7,198,310,259,295,470,000 | 33.711409 | 78 | 0.6657 | false |
eternity-group/eternity | qa/rpc-tests/listtransactions.py | 1 | 10134 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, COIN
from io import BytesIO
def txFromHex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def setup_nodes(self):
#This test requires mocktime
enable_mocktime()
return start_nodes(4, self.options.tmpdir)
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
assert_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
assert_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
assert_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
assert_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
# rbf is disabled in Eternity Core
# self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = bytes_to_hex_str(tx3_modified.serialize())
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = bytes_to_hex_str(tx3_b.serialize())
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| mit | 5,482,839,542,988,151,000 | 49.41791 | 113 | 0.579238 | false |
wkfwkf/statsmodels | examples/run_all.py | 34 | 1740 | """run all examples to make sure we don't get an exception
Note:
If an example contaings plt.show(), then all plot windows have to be closed
manually, at least in my setup.
uncomment plt.show() to show all plot windows
"""
from __future__ import print_function
from statsmodels.compat import input
stop_on_error = True
filelist = ['example_glsar.py', 'example_wls.py', 'example_gls.py',
'example_glm.py', 'example_ols_tftest.py', # 'example_rpy.py',
'example_ols.py', 'example_rlm.py',
'example_discrete.py', 'example_predict.py',
'example_ols_table.py',
# time series
'tsa/ex_arma2.py', 'tsa/ex_dates.py']
if __name__ == '__main__':
#temporarily disable show
import matplotlib.pyplot as plt
plt_show = plt.show
def noop(*args):
pass
plt.show = noop
msg = """Are you sure you want to run all of the examples?
This is done mainly to check that they are up to date.
(y/n) >>> """
cont = input(msg)
if 'y' in cont.lower():
for run_all_f in filelist:
try:
print('\n\nExecuting example file', run_all_f)
print('-----------------------' + '-' * len(run_all_f))
exec(open(run_all_f).read())
except:
# f might be overwritten in the executed file
print('**********************' + '*' * len(run_all_f))
print('ERROR in example file', run_all_f)
print('**********************' + '*' * len(run_all_f))
if stop_on_error:
raise
# reenable show after closing windows
plt.close('all')
plt.show = plt_show
plt.show()
| bsd-3-clause | 9,140,511,130,762,936,000 | 30.636364 | 75 | 0.53046 | false |
kavasoglu/ocl_web | ocl_web/apps/collections/views.py | 1 | 33279 | """
OCL Collection views
"""
import logging
import re
import requests
import simplejson as json
from apps.core.utils import SearchStringFormatter
from apps.core.views import UserOrOrgMixin
from braces.views import LoginRequiredMixin
from django.contrib import messages
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import (HttpResponseRedirect, Http404)
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView, View
from django.views.generic.edit import FormView
from libs.ocl import OclApi, OclSearch, OclConstants
from .forms import (CollectionCreateForm, CollectionEditForm,
CollectionDeleteForm, CollectionVersionAddForm, CollectionVersionsEditForm)
logger = logging.getLogger('oclweb')
class CollectionsBaseView(UserOrOrgMixin):
def get_args(self):
super(CollectionsBaseView, self).get_args()
self.collection_id = self.kwargs.get('collection')
self.collection_version_id = self.kwargs.get('collection_version')
def get_collection_data(self, owner_type, owner_id, collection_id, field_name,
collection_version_id=None, search_params=None):
searcher = OclSearch(search_type=field_name,
search_scope=OclConstants.SEARCH_SCOPE_RESTRICTED,
params=search_params)
api = OclApi(self.request, debug=True, facets=True)
if collection_version_id:
search_response = api.get(
owner_type, owner_id, 'collections', collection_id,
collection_version_id, field_name,
params=searcher.search_params)
else:
search_response = api.get(
owner_type, owner_id, 'collections', collection_id, field_name,
params=searcher.search_params)
if search_response.status_code == 404:
raise Http404
elif search_response.status_code != 200:
search_response.raise_for_status()
# Process the results
searcher.process_search_results(
search_type=searcher.search_type, search_response=search_response,
search_params=search_params)
return searcher
def get_collection_versions(self, owner_type, owner_id, collection_id, search_params=None):
# Perform the search
searcher = OclSearch(search_type=OclConstants.RESOURCE_NAME_COLLECTION_VERSIONS,
search_scope=OclConstants.SEARCH_SCOPE_RESTRICTED,
params=search_params)
api = OclApi(self.request, debug=True, facets=False)
search_response = api.get(owner_type, owner_id, 'collections', collection_id, 'versions',
params=searcher.search_params)
if search_response.status_code == 404:
raise Http404
elif search_response.status_code != 200:
search_response.raise_for_status()
# Process the results
searcher.process_search_results(
search_type=searcher.search_type, search_response=search_response,
search_params=search_params)
return searcher
class CollectionReferencesView(CollectionsBaseView, TemplateView):
""" collection concept view. """
template_name = "collections/collection_references.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionReferencesView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
params = self.request.GET.copy()
params['verbose'] = 'true'
params['limit'] = '10'
versions = self.get_collection_versions(
self.owner_type, self.owner_id, self.collection_id,
search_params={'limit': '0'})
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id, 'references',
collection_version_id=self.collection_version_id,
search_params=params)
search_results_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_results_current_page = search_results_paginator.page(searcher.current_page)
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['selected_tab'] = 'References'
context['collection'] = collection
context['references'] = searcher.search_results
context['results'] = searcher.search_results
context['current_page'] = search_results_current_page
context['pagination_url'] = self.request.get_full_path()
context['search_query'] = searcher.get_query()
context['search_filters'] = searcher.search_filter_list
context['search_sort_options'] = searcher.get_sort_options()
context['search_sort'] = self.request.GET.get('search_sort', 'ASC')
context['search_facets_json'] = searcher.search_facets
context['search_filters_debug'] = str(searcher.search_filter_list)
context['collection_versions'] = versions.search_results
return context
class CollectionMappingsView(CollectionsBaseView, TemplateView):
""" collection concept view. """
template_name = "collections/collection_mappings.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionMappingsView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
# to fetch all , set limit to 0
params = self.request.GET.copy()
params['verbose'] = 'true'
params['limit'] = '10'
versions = self.get_collection_versions(
self.owner_type, self.owner_id, self.collection_id,
search_params={'limit': '0'})
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id, OclConstants.RESOURCE_NAME_MAPPINGS,
collection_version_id=self.collection_version_id,
search_params=params)
search_results_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_results_current_page = search_results_paginator.page(searcher.current_page)
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['selected_tab'] = 'Mappings'
context['collection'] = collection
context['collection_version'] = self.collection_version_id
context['results'] = searcher.search_results
context['current_page'] = search_results_current_page
context['pagination_url'] = self.request.get_full_path()
context['search_query'] = searcher.get_query()
context['search_filters'] = searcher.search_filter_list
context['search_sort_options'] = searcher.get_sort_options()
context['search_sort'] = searcher.get_sort()
context['search_facets_json'] = searcher.search_facets
context['search_filters_debug'] = str(searcher.search_filter_list)
context['collection_versions'] = versions.search_results
return context
def get(self, request, *args, **kwargs):
if request.is_ajax():
self.get_args()
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id,
OclConstants.RESOURCE_NAME_MAPPINGS,
collection_version_id=self.collection_version_id,
search_params=self.request.GET
)
response = {
'items': searcher.search_results,
'per_page': searcher.num_per_page,
'total': searcher.num_found,
}
return HttpResponse(
json.dumps(response),
content_type="application/json"
)
return super(CollectionMappingsView, self).get(self, *args, **kwargs)
class CollectionConceptsView(CollectionsBaseView, TemplateView):
""" collection concept view. """
template_name = "collections/collection_concepts.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionConceptsView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
params = self.request.GET.copy()
params['verbose'] = 'true'
params['limit'] = '10'
# to fetch all , set limit to 0
versions = self.get_collection_versions(
self.owner_type, self.owner_id, self.collection_id,
search_params={'limit': '0'})
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id, OclConstants.RESOURCE_NAME_CONCEPTS,
collection_version_id=self.collection_version_id,
search_params=params)
search_results_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_results_current_page = search_results_paginator.page(searcher.current_page)
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['selected_tab'] = 'Concepts'
context['collection'] = collection
context['collection_version'] = self.collection_version_id
context['results'] = searcher.search_results
context['current_page'] = search_results_current_page
context['pagination_url'] = self.request.get_full_path()
context['search_query'] = self.search_string if hasattr(self, 'search_string') else ''
context['search_filters'] = searcher.search_filter_list
context['search_sort_options'] = searcher.get_sort_options()
context['search_sort'] = searcher.get_sort()
context['search_facets_json'] = searcher.search_facets
context['search_filters_debug'] = str(searcher.search_filter_list)
context['collection_versions'] = versions.search_results
return context
def get(self, request, *args, **kwargs):
self.search_string = request.GET.get('q', '')
SearchStringFormatter.add_wildcard(request)
if request.is_ajax():
self.get_args()
# Load the concepts in this collection, applying search parameters
searcher = self.get_collection_data(
self.owner_type, self.owner_id, self.collection_id,
OclConstants.RESOURCE_NAME_CONCEPTS,
collection_version_id=self.collection_version_id,
search_params=self.request.GET
)
response = {
'items': searcher.search_results,
'per_page': searcher.num_per_page,
'total': searcher.num_found,
}
return HttpResponse(
json.dumps(response),
content_type="application/json"
)
return super(CollectionConceptsView, self).get(self, *args, **kwargs)
class CollectionVersionsView(CollectionsBaseView, TemplateView):
""" collection About view. """
template_name = "collections/collection_versions.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionVersionsView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
# Load the collection versions
params = self.request.GET.copy()
params['verbose'] = 'true'
params['limit'] = '10'
searcher = self.get_collection_versions(
self.owner_type, self.owner_id, self.collection_id,
search_params=params)
search_results_paginator = Paginator(range(searcher.num_found), searcher.num_per_page)
search_results_current_page = search_results_paginator.page(searcher.current_page)
for collection_version in searcher.search_results:
if '_ocl_processing' in collection_version and collection_version['_ocl_processing']:
collection_version['is_processing'] = 'True'
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['current_page'] = search_results_current_page
context['pagination_url'] = self.request.get_full_path()
context['selected_tab'] = 'Versions'
context['collection'] = collection
context['collection_versions'] = searcher.search_results
return context
def get(self, request, *args, **kwargs):
self.get_args()
if request.is_ajax():
api = OclApi(self.request, debug=True)
result = api.get(self.owner_type, self.owner_id, 'collections',
kwargs.get('collection'), 'versions', params={'limit': '0'})
return HttpResponse(json.dumps(result.json()), content_type="application/json")
return super(CollectionVersionsView, self).get(self, *args, **kwargs)
class CollectionAboutView(CollectionsBaseView, TemplateView):
""" Collection About view. """
template_name = "collections/collection_about.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionAboutView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
about = None
if ('extras' in collection and isinstance(collection['extras'], dict) and
'about' in collection['extras']):
about = collection['extras'].get('about')
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['selected_tab'] = 'About'
context['collection'] = collection
context['about'] = about
return context
class CollectionDetailView(CollectionsBaseView, TemplateView):
""" Collection detail views """
template_name = "collections/collection_details.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionDetailView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
if results.status_code != 200:
if results.status_code == 404:
raise Http404
else:
results.raise_for_status()
collection = results.json()
context['kwargs'] = self.kwargs
context['collection'] = collection
context['selected_tab'] = 'Details'
return context
class CollectionCreateView(CollectionsBaseView, FormView):
"""
Create new Collection, either for an org or a user.
"""
form_class = CollectionCreateForm
template_name = "collections/collection_create.html"
def get_initial(self):
""" Load some useful data, not really for form display but internal use """
self.get_args()
data = {
'org_id': self.org_id,
'user_id': self.user_id,
'from_org': self.from_org,
'from_user': self.from_user,
'request': self.request,
}
return data
def get_context_data(self, *args, **kwargs):
context = super(CollectionCreateView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
org = ocl_user = None
if self.from_org:
org = api.get('orgs', self.org_id).json()
else:
ocl_user = api.get('users', self.user_id).json()
# Set the context
context['org'] = org
context['ocl_user'] = ocl_user
context['from_user'] = self.from_user
context['from_org'] = self.from_org
return context
def form_valid(self, form):
"""
collection input is good, update API backend.
"""
self.get_args()
data = form.cleaned_data
short_code = data.pop('short_code')
data['id'] = short_code
if re.compile('^[a-zA-Z0-9\-]+$').match(short_code):
api = OclApi(self.request, debug=True)
result = api.post(self.owner_type, self.owner_id, 'collections', **data)
if not result.status_code == requests.codes.created:
emsg = result.json().get('detail', None)
if not emsg:
for msg in result.json().get('__all__'):
messages.add_message(self.request, messages.ERROR, msg)
else:
messages.add_message(self.request, messages.ERROR, emsg)
return HttpResponseRedirect(self.request.path)
messages.add_message(self.request, messages.INFO, _('Collection created'))
if self.from_org:
return HttpResponseRedirect(reverse("collection-home",
kwargs={"org": self.org_id,
'collection': short_code}))
else:
return HttpResponseRedirect(reverse("collection-home",
kwargs={"user": self.user_id,
'collection': short_code}))
else:
validator_template = ' Short Code \'%s\' is not valid. Allowed characters are : Alphabets(a-z,A-Z), Numbers(0-9) and Hyphen(-) '
messages.add_message(self.request, messages.ERROR, validator_template % short_code)
return HttpResponseRedirect(self.request.path)
class CollectionAddReferenceView(CollectionsBaseView, TemplateView):
template_name = "collections/collection_add_reference.html"
def get_context_data(self, *args, **kwargs):
context = super(CollectionAddReferenceView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['collection'] = collection
return context
def get_success_url(self):
""" Return URL for redirecting browser """
if self.from_org:
return reverse('collection-references',
kwargs={'org': self.org_id, 'collection':self.collection_id})
else:
return reverse(
'collection-references',
kwargs={"user": self.request.user.username, 'collection':self.collection_id})
def post(self, request, *args, **kwargs):
self.get_args()
data = json.loads(request.body)
api = OclApi(self.request, debug=True)
result = api.put(
self.owner_type,
self.owner_id,
'collections',
self.collection_id,
'references',
data=data
)
errors = result.json() if result.status_code == requests.codes.bad else []
return HttpResponse(
json.dumps({
'success_url': self.get_success_url(),
'errors': errors
}),
content_type="application/json"
)
class CollectionReferencesDeleteView(CollectionsBaseView, TemplateView):
def delete(self, request, *args, **kwargs):
self.get_args()
references = request.GET.get('references').split(',')
api = OclApi(self.request, debug=True)
data = {'references': references}
res = api.delete(self.owner_type, self.owner_id, 'collections',
self.collection_id, 'references', **data)
return HttpResponse(res.content, status=200)
class CollectionDeleteView(CollectionsBaseView, FormView):
"""
View for deleting Collection.
"""
template_name = "collections/collection_delete.html"
form_class = CollectionDeleteForm
def get_context_data(self, *args, **kwargs):
context = super(CollectionDeleteView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
results = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id)
collection = results.json()
# Set the context
context['kwargs'] = self.kwargs
context['url_params'] = self.request.GET
context['collection'] = collection
return context
def get_success_url(self):
""" Return URL for redirecting browser """
if self.collection_version_id:
if self.from_org:
return reverse('collection-details',
kwargs={'org': self.org_id,
'collection': self.collection_id})
else:
return reverse('collection-details',
kwargs={'user': self.user_id,
'collection': self.collection_id})
else:
if self.from_org:
return reverse('org-collections',
kwargs={'org': self.org_id})
else:
return reverse('users:detail',
kwargs={"username": self.request.user.username})
def form_valid(self, form, *args, **kwargs):
""" Use validated form data to delete the collection"""
self.get_args()
api = OclApi(self.request, debug=True)
if self.collection_version_id:
result = api.delete(self.owner_type, self.owner_id, 'collections',
self.collection_id, self.collection_version_id, **kwargs)
else:
result = api.delete(
self.owner_type, self.owner_id, 'collections', self.collection_id, **kwargs)
if result.status_code != 204:
emsg = result.json().get('detail', 'Error')
messages.add_message(self.request, messages.ERROR, emsg)
return HttpResponseRedirect(self.request.path)
else:
messages.add_message(self.request, messages.INFO, _('Collection Deleted'))
return HttpResponseRedirect(self.get_success_url())
class CollectionEditView(CollectionsBaseView, FormView):
""" Edit collection, either for an org or a user. """
template_name = "collections/collection_edit.html"
def get_form_class(self):
""" Trick to load initial data """
self.get_args()
api = OclApi(self.request, debug=True)
self.collection = api.get(self.owner_type, self.owner_id, 'collections',
self.collection_id).json()
return CollectionEditForm
def get_initial(self):
""" Load some useful data, not really for form display but internal use """
data = {
'org_id': self.org_id,
'user_id': self.user_id,
'from_org': self.from_org,
'from_user': self.from_user,
'request': self.request,
}
data.update(self.collection)
# convert supported locales to string
supported_locale_list = self.collection.get('supported_locales')
if supported_locale_list is None:
data['supported_locales'] = ''
else:
data['supported_locales'] = ','.join(supported_locale_list)
return data
def get_context_data(self, *args, **kwargs):
""" Get collection details for the edit form """
context = super(CollectionEditView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
org = ocl_user = None
if self.from_org:
org = api.get('orgs', self.org_id).json()
else:
ocl_user = api.get('users', self.user_id).json()
# Set the context
context['kwargs'] = self.kwargs
context['org'] = org
context['ocl_user'] = ocl_user
context['from_user'] = self.from_user
context['from_org'] = self.from_org
context['collection'] = self.collection
return context
def form_valid(self, form):
""" If Collection input is valid, then update API backend. """
self.get_args()
# Submit updated collection data to the API
data = form.cleaned_data
api = OclApi(self.request, debug=True)
result = api.update_collection(self.owner_type, self.owner_id, self.collection_id, data)
messages.add_message(self.request, messages.INFO, _('Collection updated'))
if self.from_org:
return HttpResponseRedirect(reverse('collection-details',
kwargs={'org': self.org_id,
'collection': self.collection_id}))
else:
return HttpResponseRedirect(reverse('collection-details',
kwargs={'user': self.user_id,
'collection': self.collection_id}))
class CollectionVersionsNewView(CollectionsBaseView, UserOrOrgMixin, FormView):
form_class = CollectionVersionAddForm
template_name = "collections/collection_versions_new.html"
def get_initial(self):
super(CollectionVersionsNewView, self).get_initial()
self.get_args()
api = OclApi(self.request, debug=True)
# collection_version = None
if self.from_org:
collection_version = api.get('orgs', self.org_id, 'collections', self.collection_id,
'versions', params={'limit': 1}).json()
else:
collection_version = api.get('users', self.user_id, 'collections', self.collection_id,
'versions', params={'limit': 1}).json()
data = {
'request': self.request,
'from_user': self.from_user,
'from_org': self.from_org,
'user_id': self.user_id,
'org_id': self.org_id,
'owner_type': self.owner_type,
'owner_id': self.owner_id,
'collection_id': self.collection_id,
'previous_version': collection_version[0]['id'],
'released': False
}
return data
def get_context_data(self, *args, **kwargs):
context = super(CollectionVersionsNewView, self).get_context_data(*args, **kwargs)
self.get_args()
api = OclApi(self.request, debug=True)
# collection = None
if self.from_org:
collection = api.get('orgs', self.org_id, 'collections', self.collection_id).json()
else:
collection = api.get('users', self.user_id, 'collections', self.collection_id).json()
# Set the context
context['kwargs'] = self.kwargs
context['collection'] = collection
return context
def form_valid(self, form):
self.get_args()
# Submit the new collection version
data = form.cleaned_data
api = OclApi(self.request, debug=True)
result = api.create_collection_version(self.owner_type, self.owner_id,
self.collection_id, data)
if result.status_code == requests.codes.created:
messages.add_message(self.request, messages.INFO, _('Collection version created!'))
if self.from_org:
return HttpResponseRedirect(reverse('collection-versions',
kwargs={'org': self.org_id,
'collection': self.collection_id}))
else:
return HttpResponseRedirect(reverse('collection-versions',
kwargs={'user': self.user_id,
'collection': self.collection_id}))
else:
error_msg = result.json().get('detail', 'Error')
messages.add_message(self.request, messages.ERROR, error_msg)
return HttpResponseRedirect(self.request.path)
class CollectionVersionEditView(LoginRequiredMixin, UserOrOrgMixin, FormView):
""" View to edit collection version """
form_class = CollectionVersionsEditForm
template_name = "collections/collection_versions_edit.html"
def get_form_class(self):
""" Trick to load initial form data """
self.get_args()
api = OclApi(self.request, debug=True)
self.collection_version = api.get(self.owner_type, self.owner_id, 'collections', self.collection_id,
self.collection_version_id).json()
return CollectionVersionsEditForm
def get_initial(self):
""" Load initial form data """
data = {
'org_id': self.org_id,
'user_id': self.user_id,
'from_org': self.from_org,
'from_user': self.from_user,
'collection_id': self.collection_id,
'collection_version_id': self.collection_version_id,
'request': self.request,
}
data.update(self.collection_version)
return data
def get_context_data(self, *args, **kwargs):
""" Load context data needed for the view """
context = super(CollectionVersionEditView, self).get_context_data(*args, **kwargs)
context['kwargs'] = self.kwargs
context['collection_version'] = self.collection_version
return context
def form_valid(self, form):
""" If form data is valid, then update API backend. """
self.get_args()
# Submit updated collection version description to the API
data = {
'description':form.cleaned_data.get('description')
}
api = OclApi(self.request, debug=True)
result = api.update_resource_version(self.owner_type, self.owner_id, self.collection_id,
self.collection_version_id, 'collections', data)
# Check if successful
if result.status_code == requests.codes.ok:
messages.add_message(self.request, messages.INFO, _('Collection version updated'))
if self.from_org:
return HttpResponseRedirect(reverse('collection-versions',
kwargs={'org': self.org_id,
'collection': self.collection_id}))
else:
return HttpResponseRedirect(reverse('collection-versions',
kwargs={'user': self.user_id,
'collection': self.collection_id}))
else:
emsg = result.text
messages.add_message(self.request, messages.ERROR, emsg)
return HttpResponseRedirect(self.request.path)
class CollectionVersionEditJsonView(CollectionsBaseView, TemplateView):
def put(self, request, *args, **kwargs):
self.get_args()
api = OclApi(self.request, debug=True)
data = json.loads(request.body)
res = api.update_resource_version(self.owner_type,
self.owner_id,
self.collection_id,
self.collection_version_id,
'collections',
data)
return HttpResponse(res.content, status=200)
class CollectionVersionDeleteView(CollectionsBaseView, View):
""" collection version delete view"""
def delete(self, request, *args, **kwargs):
self.get_args()
api = OclApi(self.request, debug=True)
if request.is_ajax():
result = api.delete(
self.owner_type,
self.owner_id,
'collections',
self.collection_id,
self.collection_version_id,
**kwargs
)
return HttpResponse(
json.dumps({}),
content_type="application/json"
)
return super(CollectionVersionDeleteView, self).delete(self, *args, **kwargs)
| mpl-2.0 | 3,701,399,419,970,683,000 | 39.289346 | 140 | 0.588329 | false |
SoCo/SoCo | soco/events_asyncio.py | 1 | 20247 | """Classes to handle Sonos UPnP Events and Subscriptions using asyncio.
The `Subscription` class from this module will be used in
:py:mod:`soco.services` if `config.EVENTS_MODULE` is set
to point to this module.
Example:
Run this code, and change your volume, tracks etc::
import logging
logging.basicConfig()
import soco
import asyncio
from pprint import pprint
from soco import events_asyncio
soco.config.EVENTS_MODULE = events_asyncio
def print_event(event):
try:
pprint(event.variables)
except Exception as e:
print("There was an error in print_event:", e)
def _get_device():
device = soco.discover().pop().group.coordinator
print(device.player_name)
return device
async def main():
# pick a device at random and use it to get
# the group coordinator
loop = asyncio.get_event_loop()
device = await loop.run_in_executor(None, _get_device)
sub = await device.renderingControl.subscribe()
sub2 = await device.avTransport.subscribe()
sub.callback = print_event
sub2.callback = print_event
async def before_shutdown():
await sub.unsubscribe()
await sub2.unsubscribe()
await events_asyncio.event_listener.async_stop()
await asyncio.sleep(1)
print("Renewing subscription..")
await sub.renew()
await asyncio.sleep(100)
await before_shutdown()
if __name__ == "__main__":
asyncio.run(main())
"""
import logging
import socket
import sys
import time
import asyncio
try:
from aiohttp import ClientSession, web
except ImportError as error:
print(
"""ImportError: {}:
Use of the SoCo events_asyncio module requires the 'aiohttp'
package and its dependencies to be installed. aiohttp is not
installed with SoCo by default due to potential issues installing
the dependencies 'mutlidict' and 'yarl' on some platforms.
See: https://github.com/SoCo/SoCo/issues/819""".format(
error
)
)
sys.exit(1)
# Event is imported for compatibility with events.py
# pylint: disable=unused-import
from .events_base import Event # noqa: F401
from .events_base import ( # noqa: E402
get_listen_ip,
parse_event_xml,
EventNotifyHandlerBase,
EventListenerBase,
SubscriptionBase,
SubscriptionsMap,
)
from .exceptions import SoCoException # noqa: E402
log = logging.getLogger(__name__) # pylint: disable=C0103
class EventNotifyHandler(EventNotifyHandlerBase):
"""Handles HTTP ``NOTIFY`` Verbs sent to the listener server.
Inherits from `soco.events_base.EventNotifyHandlerBase`.
"""
def __init__(self):
super().__init__()
# The SubscriptionsMapAio instance created when this module is
# imported. This is referenced by
# soco.events_base.EventNotifyHandlerBase.
self.subscriptions_map = subscriptions_map
async def notify(self, request):
"""Serve a ``NOTIFY`` request by calling `handle_notification`
with the headers and content.
"""
content = await request.text()
seq = request.headers["seq"] # Event sequence number
sid = request.headers["sid"] # Event Subscription Identifier
# find the relevant service from the sid
# pylint: disable=no-member
subscription = self.subscriptions_map.get_subscription(sid)
# It might have been removed by another thread
if subscription:
timestamp = time.time()
service = subscription.service
self.log_event(seq, service.service_id, timestamp)
log.debug("Event content: %s", content)
if "x-sonos-http" in content:
# parse_event_xml will generate I/O if
# x-sonos-http is in the content
variables = await asyncio.get_event_loop().run_in_executor(
None, parse_event_xml, content
)
else:
variables = parse_event_xml(content)
# Build the Event object
event = Event(sid, seq, service, timestamp, variables)
# pass the event details on to the service so it can update
# its cache.
# pylint: disable=protected-access
service._update_cache_on_event(event)
# Pass the event on for handling
subscription.send_event(event)
else:
log.debug("No service registered for %s", sid)
return web.Response(text="OK", status=200)
# pylint: disable=no-self-use, missing-docstring
def log_event(self, seq, service_id, timestamp):
log.debug("Event %s received for %s service at %s", seq, service_id, timestamp)
class EventListener(EventListenerBase): # pylint: disable=too-many-instance-attributes
"""The Event Listener.
Runs an http server which is an endpoint for ``NOTIFY``
requests from Sonos devices. Inherits from
`soco.events_base.EventListenerBase`.
"""
def __init__(self):
super().__init__()
self.sock = None
self.ip_address = None
self.port = None
self.runner = None
self.site = None
self.session = None
self.start_lock = None
def start(self, any_zone):
"""A stub since the first subscribe calls async_start."""
return
def listen(self, ip_address):
"""A stub since since async_listen is used."""
return
async def async_start(self, any_zone):
"""Start the event listener listening on the local machine under the lock.
Args:
any_zone (SoCo): Any Sonos device on the network. It does not
matter which device. It is used only to find a local IP
address reachable by the Sonos net.
"""
if not self.start_lock:
self.start_lock = asyncio.Lock()
async with self.start_lock:
if self.is_running:
return
# Use configured IP address if there is one, else detect
# automatically.
ip_address = get_listen_ip(any_zone.ip_address)
if not ip_address:
log.exception("Could not start Event Listener: check network.")
# Otherwise, no point trying to start server
return
port = await self.async_listen(ip_address)
if not port:
return
self.address = (ip_address, port)
self.session = ClientSession(raise_for_status=True)
self.is_running = True
log.debug("Event Listener started")
async def async_listen(self, ip_address):
"""Start the event listener listening on the local machine at
port 1400 (default). If this port is unavailable, the
listener will attempt to listen on the next available port,
within a range of 100.
Make sure that your firewall allows connections to this port.
This method is called by `soco.events_base.EventListenerBase.start`
Handling of requests is delegated to an instance of the
`EventNotifyHandler` class.
Args:
ip_address (str): The local network interface on which the server
should start listening.
Returns:
int: The port on which the server is listening.
Note:
The port on which the event listener listens is configurable.
See `config.EVENT_LISTENER_PORT`
"""
for port_number in range(
self.requested_port_number, self.requested_port_number + 100
):
try:
if port_number > self.requested_port_number:
log.debug("Trying next port (%d)", port_number)
# pylint: disable=no-member
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((ip_address, port_number))
sock.listen(200)
self.sock = sock
self.port = port_number
break
# pylint: disable=invalid-name
except socket.error as e:
log.warning("Could not bind to %s:%s: %s", ip_address, port_number, e)
continue
if not self.port:
return None
self.ip_address = ip_address
await self._async_start()
return self.port
async def _async_start(self):
"""Start the site."""
handler = EventNotifyHandler()
app = web.Application()
app.add_routes([web.route("notify", "", handler.notify)])
self.runner = web.AppRunner(app)
await self.runner.setup()
self.site = web.SockSite(self.runner, self.sock)
await self.site.start()
log.debug("Event listener running on %s", (self.ip_address, self.port))
async def async_stop(self):
"""Stop the listener."""
if self.site:
await self.site.stop()
self.site = None
if self.runner:
await self.runner.cleanup()
self.runner = None
if self.session:
await self.session.close()
self.session = None
if self.sock:
self.sock.close()
self.sock = None
self.port = None
self.ip_address = None
# pylint: disable=unused-argument
def stop_listening(self, address):
"""Stop the listener."""
asyncio.ensure_future(self.async_stop())
class Subscription(SubscriptionBase):
"""A class representing the subscription to a UPnP event.
Inherits from `soco.events_base.SubscriptionBase`.
"""
def __init__(self, service, callback=None):
"""
Args:
service (Service): The SoCo `Service` to which the subscription
should be made.
event_queue (:class:`~queue.Queue`): A queue on which received
events will be put. If not specified, a queue will be
created and used.
"""
super().__init__(service, None)
#: :py:obj:`function`: callback function to be called whenever an
#: `Event` is received. If it is set and is callable, the callback
#: function will be called with the `Event` as the only parameter and
#: the Subscription's event queue won't be used.
self.callback = callback
# The SubscriptionsMapAio instance created when this module is
# imported. This is referenced by soco.events_base.SubscriptionBase.
self.subscriptions_map = subscriptions_map
# The EventListener instance created when this module is imported.
# This is referenced by soco.events_base.SubscriptionBase.
self.event_listener = event_listener
# Used to keep track of the auto_renew loop
self._auto_renew_task = None
# pylint: disable=arguments-differ
def subscribe(self, requested_timeout=None, auto_renew=False, strict=True):
"""Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
This method calls `events_base.SubscriptionBase.subscribe`.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`.
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
`Subscription`: The Subscription instance.
"""
self.subscriptions_map.subscribing()
future = asyncio.Future()
subscribe = super().subscribe
async def _async_wrap_subscribe():
try:
if not self.event_listener.is_running:
await self.event_listener.async_start(self.service.soco)
await subscribe(requested_timeout, auto_renew)
future.set_result(self)
except SoCoException as ex:
future.set_exception(ex)
except Exception as exc: # pylint: disable=broad-except
self._cancel_subscription(exc)
if strict:
future.set_exception(exc)
else:
self._log_exception(exc)
future.set_result(self)
finally:
self.subscriptions_map.finished_subscribing()
asyncio.ensure_future(_async_wrap_subscribe())
return future
def _log_exception(self, exc):
"""Log an exception during subscription."""
msg = (
"An Exception occurred: {}.".format(exc)
+ " Subscription to {},".format(
self.service.base_url + self.service.event_subscription_url
)
+ " sid: {} has been cancelled".format(self.sid)
)
log.exception(msg)
async def renew(
self, requested_timeout=None, is_autorenew=False, strict=True
): # pylint: disable=invalid-overridden-method
"""renew(requested_timeout=None)
Renew the event subscription.
You should not try to renew a subscription which has been
unsubscribed, or once it has expired.
This method calls `events_base.SubscriptionBase.renew`.
Args:
requested_timeout (int, optional): The period for which a renewal
request should be made. If None (the default), use the timeout
requested on subscription.
is_autorenew (bool, optional): Whether this is an autorenewal.
Default `False`.
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
`Subscription`: The Subscription instance.
"""
try:
return await super().renew(requested_timeout, is_autorenew)
except Exception as exc: # pylint: disable=broad-except
self._cancel_subscription(exc)
if self.auto_renew_fail is not None and hasattr(
self.auto_renew_fail, "__call__"
):
# pylint: disable=not-callable
self.auto_renew_fail(exc)
else:
self._log_exception(exc)
if strict:
raise
return self
async def unsubscribe(
self, strict=True
): # pylint: disable=invalid-overridden-method
"""unsubscribe()
Unsubscribe from the service's events.
Once unsubscribed, a Subscription instance should not be reused
This method calls `events_base.SubscriptionBase.unsubscribe`.
Args:
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
`Subscription`: The Subscription instance.
"""
try:
unsub = super().unsubscribe()
if unsub is None:
return
await unsub
except Exception as exc: # pylint: disable=broad-except
if strict:
raise
self._log_exception(exc)
return self
def _auto_renew_start(self, interval):
"""Starts the auto_renew loop."""
self._auto_renew_task = asyncio.get_event_loop().call_later(
interval, self._auto_renew_run, interval
)
def _auto_renew_run(self, interval):
asyncio.ensure_future(self.renew(is_autorenew=True, strict=False))
self._auto_renew_start(interval)
def _auto_renew_cancel(self):
"""Cancels the auto_renew loop"""
if self._auto_renew_task:
self._auto_renew_task.cancel()
self._auto_renew_task = None
# pylint: disable=no-self-use, too-many-branches, too-many-arguments
def _request(self, method, url, headers, success, unconditional=None):
"""Sends an HTTP request.
Args:
method (str): 'SUBSCRIBE' or 'UNSUBSCRIBE'.
url (str): The full endpoint to which the request is being sent.
headers (dict): A dict of headers, each key and each value being
of type `str`.
success (function): A function to be called if the
request succeeds. The function will be called with a dict
of response headers as its only parameter.
unconditional (function): An optional function to be called after
the request is complete, regardless of its success. Takes
no parameters.
"""
async def _async_make_request():
response = await self.event_listener.session.request(
method, url, headers=headers
)
if response.ok:
success(response.headers)
if unconditional:
unconditional()
return _async_make_request()
class nullcontext: # pylint: disable=invalid-name
"""Context manager that does no additional processing.
Backport from python 3.7+ for older pythons.
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
class SubscriptionsMapAio(SubscriptionsMap):
"""Maintains a mapping of sids to `soco.events_asyncio.Subscription`
instances. Registers each subscription to be unsubscribed at exit.
Inherits from `soco.events_base.SubscriptionsMap`.
"""
def __init__(self):
super().__init__()
# A counter of calls to Subscription.subscribe
# that have started but not completed. This is
# to prevent the event listener from being stopped prematurely
self._pending = 0
self.subscriptions_lock = nullcontext()
def register(self, subscription):
"""Register a subscription by updating local mapping of sid to
subscription and registering it to be unsubscribed at exit.
Args:
subscription(`soco.events_asyncio.Subscription`): the subscription
to be registered.
"""
# Add the subscription to the local dict of subscriptions so it
# can be looked up by sid
self.subscriptions[subscription.sid] = subscription
def subscribing(self):
"""Called when the `Subscription.subscribe` method
commences execution.
"""
# Increment the counter
self._pending += 1
def finished_subscribing(self):
"""Called when the `Subscription.subscribe` method
completes execution.
"""
# Decrement the counter
self._pending -= 1
@property
def count(self):
"""
`int`: The number of active or pending subscriptions.
"""
return len(self.subscriptions) + self._pending
subscriptions_map = SubscriptionsMapAio() # pylint: disable=C0103
event_listener = EventListener() # pylint: disable=C0103
| mit | 8,036,526,208,551,836,000 | 34.273519 | 87 | 0.597027 | false |
alshedivat/tensorflow | tensorflow/contrib/gan/python/eval/python/eval_utils_impl.py | 73 | 5394 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility file for visualizing generated images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
__all__ = [
"image_grid",
"image_reshaper",
]
# TODO(joelshor): Make this a special case of `image_reshaper`.
def image_grid(input_tensor, grid_shape, image_shape=(32, 32), num_channels=3):
"""Arrange a minibatch of images into a grid to form a single image.
Args:
input_tensor: Tensor. Minibatch of images to format, either 4D
([batch size, height, width, num_channels]) or flattened
([batch size, height * width * num_channels]).
grid_shape: Sequence of int. The shape of the image grid,
formatted as [grid_height, grid_width].
image_shape: Sequence of int. The shape of a single image,
formatted as [image_height, image_width].
num_channels: int. The number of channels in an image.
Returns:
Tensor representing a single image in which the input images have been
arranged into a grid.
Raises:
ValueError: The grid shape and minibatch size don't match, or the image
shape and number of channels are incompatible with the input tensor.
"""
if grid_shape[0] * grid_shape[1] != int(input_tensor.shape[0]):
raise ValueError("Grid shape %s incompatible with minibatch size %i." %
(grid_shape, int(input_tensor.shape[0])))
if len(input_tensor.shape) == 2:
num_features = image_shape[0] * image_shape[1] * num_channels
if int(input_tensor.shape[1]) != num_features:
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
elif len(input_tensor.shape) == 4:
if (int(input_tensor.shape[1]) != image_shape[0] or
int(input_tensor.shape[2]) != image_shape[1] or
int(input_tensor.shape[3]) != num_channels):
raise ValueError("Image shape and number of channels incompatible with "
"input tensor.")
else:
raise ValueError("Unrecognized input tensor format.")
height, width = grid_shape[0] * image_shape[0], grid_shape[1] * image_shape[1]
input_tensor = array_ops.reshape(
input_tensor, tuple(grid_shape) + tuple(image_shape) + (num_channels,))
input_tensor = array_ops.transpose(input_tensor, [0, 1, 3, 2, 4])
input_tensor = array_ops.reshape(
input_tensor, [grid_shape[0], width, image_shape[0], num_channels])
input_tensor = array_ops.transpose(input_tensor, [0, 2, 1, 3])
input_tensor = array_ops.reshape(
input_tensor, [1, height, width, num_channels])
return input_tensor
def _validate_images(images):
for img in images:
img.shape.assert_has_rank(3)
img.shape.assert_is_fully_defined()
if img.shape[-1] not in (1, 3):
raise ValueError("image_reshaper only supports 1 or 3 channel images.")
# TODO(joelshor): Move the dimension logic from Python to Tensorflow.
def image_reshaper(images, num_cols=None):
"""A reshaped summary image.
Returns an image that will contain all elements in the list and will be
laid out in a nearly-square tiling pattern (e.g. 11 images will lead to a
3x4 tiled image).
Args:
images: Image data to summarize. Can be an RGB or grayscale image, a list of
such images, or a set of RGB images concatenated along the depth
dimension. The shape of each image is assumed to be [batch_size,
height, width, depth].
num_cols: (Optional) If provided, this is the number of columns in the final
output image grid. Otherwise, the number of columns is determined by
the number of images.
Returns:
A summary image matching the input with automatic tiling if needed.
Output shape is [1, height, width, channels].
"""
if isinstance(images, ops.Tensor):
images = array_ops.unstack(images)
_validate_images(images)
num_images = len(images)
num_columns = (num_cols if num_cols else
int(math.ceil(math.sqrt(num_images))))
num_rows = int(math.ceil(float(num_images) / num_columns))
rows = [images[x:x+num_columns] for x in range(0, num_images, num_columns)]
# Add empty image tiles if the last row is incomplete.
num_short = num_rows * num_columns - num_images
assert num_short >= 0 and num_short < num_columns
if num_short > 0:
rows[-1].extend([array_ops.zeros_like(images[-1])] * num_short)
# Convert each row from a list of tensors to a single tensor.
rows = [array_ops.concat(row, 1) for row in rows]
# Stack rows vertically.
img = array_ops.concat(rows, 0)
return array_ops.expand_dims(img, 0)
| apache-2.0 | -8,027,765,234,457,368,000 | 39.253731 | 80 | 0.677234 | false |
rohitwaghchaure/New_Theme_Erp | erpnext/accounts/report/gross_profit/gross_profit.py | 10 | 4692 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from erpnext.stock.utils import get_buying_amount, get_sales_bom_buying_amount
def execute(filters=None):
if not filters: filters = {}
stock_ledger_entries = get_stock_ledger_entries(filters)
source = get_source_data(filters)
item_sales_bom = get_item_sales_bom()
columns = ["Delivery Note/Sales Invoice::120", "Link::30", "Posting Date:Date", "Posting Time",
"Item Code:Link/Item", "Item Name", "Description", "Warehouse:Link/Warehouse",
"Qty:Float", "Selling Rate:Currency", "Avg. Buying Rate:Currency",
"Selling Amount:Currency", "Buying Amount:Currency",
"Gross Profit:Currency", "Gross Profit %:Percent", "Project:Link/Project"]
data = []
for row in source:
selling_amount = flt(row.base_amount)
item_sales_bom_map = item_sales_bom.get(row.parenttype, {}).get(row.name, frappe._dict())
if item_sales_bom_map.get(row.item_code):
buying_amount = get_sales_bom_buying_amount(row.item_code, row.warehouse,
row.parenttype, row.name, row.item_row, stock_ledger_entries, item_sales_bom_map)
else:
buying_amount = get_buying_amount(row.parenttype, row.name, row.item_row,
stock_ledger_entries.get((row.item_code, row.warehouse), []))
buying_amount = buying_amount > 0 and buying_amount or 0
gross_profit = selling_amount - buying_amount
if selling_amount:
gross_profit_percent = (gross_profit / selling_amount) * 100.0
else:
gross_profit_percent = 0.0
icon = """<a href="%s"><i class="icon icon-share" style="cursor: pointer;"></i></a>""" \
% ("/".join(["#Form", row.parenttype, row.name]),)
data.append([row.name, icon, row.posting_date, row.posting_time, row.item_code, row.item_name,
row.description, row.warehouse, row.qty, row.base_rate,
row.qty and (buying_amount / row.qty) or 0, row.base_amount, buying_amount,
gross_profit, gross_profit_percent, row.project])
return columns, data
def get_stock_ledger_entries(filters):
query = """select item_code, voucher_type, voucher_no,
voucher_detail_no, posting_date, posting_time, stock_value,
warehouse, actual_qty as qty
from `tabStock Ledger Entry`"""
if filters.get("company"):
query += """ where company=%(company)s"""
query += " order by item_code desc, warehouse desc, posting_date desc, posting_time desc, name desc"
res = frappe.db.sql(query, filters, as_dict=True)
out = {}
for r in res:
if (r.item_code, r.warehouse) not in out:
out[(r.item_code, r.warehouse)] = []
out[(r.item_code, r.warehouse)].append(r)
return out
def get_item_sales_bom():
item_sales_bom = {}
for d in frappe.db.sql("""select parenttype, parent, parent_item,
item_code, warehouse, -1*qty as total_qty, parent_detail_docname
from `tabPacked Item` where docstatus=1""", as_dict=True):
item_sales_bom.setdefault(d.parenttype, frappe._dict()).setdefault(d.parent,
frappe._dict()).setdefault(d.parent_item, []).append(d)
return item_sales_bom
def get_source_data(filters):
conditions = ""
if filters.get("company"):
conditions += " and company=%(company)s"
if filters.get("from_date"):
conditions += " and posting_date>=%(from_date)s"
if filters.get("to_date"):
conditions += " and posting_date<=%(to_date)s"
delivery_note_items = frappe.db.sql("""select item.parenttype, dn.name,
dn.posting_date, dn.posting_time, dn.project_name,
item.item_code, item.item_name, item.description, item.warehouse,
item.qty, item.base_rate, item.base_amount, item.name as "item_row",
timestamp(dn.posting_date, dn.posting_time) as posting_datetime
from `tabDelivery Note` dn, `tabDelivery Note Item` item
where item.parent = dn.name and dn.docstatus = 1 %s
order by dn.posting_date desc, dn.posting_time desc""" % (conditions,), filters, as_dict=1)
sales_invoice_items = frappe.db.sql("""select item.parenttype, si.name,
si.posting_date, si.posting_time, si.project_name,
item.item_code, item.item_name, item.description, item.warehouse,
item.qty, item.base_rate, item.base_amount, item.name as "item_row",
timestamp(si.posting_date, si.posting_time) as posting_datetime
from `tabSales Invoice` si, `tabSales Invoice Item` item
where item.parent = si.name and si.docstatus = 1 %s
and si.update_stock = 1
order by si.posting_date desc, si.posting_time desc""" % (conditions,), filters, as_dict=1)
source = delivery_note_items + sales_invoice_items
if len(source) > len(delivery_note_items):
source.sort(key=lambda d: d.posting_datetime, reverse=True)
return source | agpl-3.0 | -8,837,677,750,861,082,000 | 39.456897 | 101 | 0.70162 | false |
thnee/ansible | lib/ansible/module_utils/splitter.py | 197 | 9433 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def _get_quote_state(token, quote_char):
'''
the goal of this block is to determine if the quoted string
is unterminated in which case it needs to be put back together
'''
# the char before the current one, used to see if
# the current character is escaped
prev_char = None
for idx, cur_char in enumerate(token):
if idx > 0:
prev_char = token[idx - 1]
if cur_char in '"\'' and prev_char != '\\':
if quote_char:
if cur_char == quote_char:
quote_char = None
else:
quote_char = cur_char
return quote_char
def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
'''
this function counts the number of opening/closing blocks for a
given opening/closing type and adjusts the current depth for that
block based on the difference
'''
num_open = token.count(open_token)
num_close = token.count(close_token)
if num_open != num_close:
cur_depth += (num_open - num_close)
if cur_depth < 0:
cur_depth = 0
return cur_depth
def split_args(args):
'''
Splits args on whitespace, but intelligently reassembles
those that may have been split over a jinja2 block or quotes.
When used in a remote module, we won't ever have to be concerned about
jinja2 blocks, however this function is/will be used in the
core portions as well before the args are templated.
example input: a=b c="foo bar"
example output: ['a=b', 'c="foo bar"']
Basically this is a variation shlex that has some more intelligence for
how Ansible needs to use it.
'''
# the list of params parsed out of the arg string
# this is going to be the result value when we are donei
params = []
# here we encode the args, so we have a uniform charset to
# work with, and split on white space
args = args.strip()
try:
args = args.encode('utf-8')
do_decode = True
except UnicodeDecodeError:
do_decode = False
items = args.split('\n')
# iterate over the tokens, and reassemble any that may have been
# split on a space inside a jinja2 block.
# ex if tokens are "{{", "foo", "}}" these go together
# These variables are used
# to keep track of the state of the parsing, since blocks and quotes
# may be nested within each other.
quote_char = None
inside_quotes = False
print_depth = 0 # used to count nested jinja2 {{ }} blocks
block_depth = 0 # used to count nested jinja2 {% %} blocks
comment_depth = 0 # used to count nested jinja2 {# #} blocks
# now we loop over each split chunk, coalescing tokens if the white space
# split occurred within quotes or a jinja2 block of some kind
for itemidx, item in enumerate(items):
# we split on spaces and newlines separately, so that we
# can tell which character we split on for reassembly
# inside quotation characters
tokens = item.strip().split(' ')
line_continuation = False
for idx, token in enumerate(tokens):
# if we hit a line continuation character, but
# we're not inside quotes, ignore it and continue
# on to the next token while setting a flag
if token == '\\' and not inside_quotes:
line_continuation = True
continue
# store the previous quoting state for checking later
was_inside_quotes = inside_quotes
quote_char = _get_quote_state(token, quote_char)
inside_quotes = quote_char is not None
# multiple conditions may append a token to the list of params,
# so we keep track with this flag to make sure it only happens once
# append means add to the end of the list, don't append means concatenate
# it to the end of the last token
appended = False
# if we're inside quotes now, but weren't before, append the token
# to the end of the list, since we'll tack on more to it later
# otherwise, if we're inside any jinja2 block, inside quotes, or we were
# inside quotes (but aren't now) concat this token to the last param
if inside_quotes and not was_inside_quotes:
params.append(token)
appended = True
elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
if idx == 0 and not inside_quotes and was_inside_quotes:
params[-1] = "%s%s" % (params[-1], token)
elif len(tokens) > 1:
spacer = ''
if idx > 0:
spacer = ' '
params[-1] = "%s%s%s" % (params[-1], spacer, token)
else:
spacer = ''
if not params[-1].endswith('\n') and idx == 0:
spacer = '\n'
params[-1] = "%s%s%s" % (params[-1], spacer, token)
appended = True
# if the number of paired block tags is not the same, the depth has changed, so we calculate that here
# and may append the current token to the params (if we haven't previously done so)
prev_print_depth = print_depth
print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
if print_depth != prev_print_depth and not appended:
params.append(token)
appended = True
prev_block_depth = block_depth
block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
if block_depth != prev_block_depth and not appended:
params.append(token)
appended = True
prev_comment_depth = comment_depth
comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
if comment_depth != prev_comment_depth and not appended:
params.append(token)
appended = True
# finally, if we're at zero depth for all blocks and not inside quotes, and have not
# yet appended anything to the list of params, we do so now
if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
params.append(token)
# if this was the last token in the list, and we have more than
# one item (meaning we split on newlines), add a newline back here
# to preserve the original structure
if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
if not params[-1].endswith('\n') or item == '':
params[-1] += '\n'
# always clear the line continuation flag
line_continuation = False
# If we're done and things are not at zero depth or we're still inside quotes,
# raise an error to indicate that the args were unbalanced
if print_depth or block_depth or comment_depth or inside_quotes:
raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
# finally, we decode each param back to the unicode it was in the arg string
if do_decode:
params = [x.decode('utf-8') for x in params]
return params
def is_quoted(data):
return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if is_quoted(data):
return data[1:-1]
return data
| gpl-3.0 | 5,943,040,259,923,911,000 | 42.671296 | 120 | 0.629174 | false |
mcaleavya/bcc | examples/networking/vlan_filter/data-plane-tracing.py | 4 | 7701 | #!/usr/bin/python
from __future__ import print_function
from bcc import BPF
import sys
import socket
import os
import argparse
import time
import netifaces as ni
from sys import argv
from kafka import KafkaProducer
from kafka.errors import KafkaError
from datetime import datetime
#args
def usage():
print("USAGE: %s [-i <if_name>]" % argv[0])
print("")
print("Try '%s -h' for more options." % argv[0])
exit()
#help
def help():
print("USAGE: %s [-i <if_name>][-k <kafka_server_name:kafka_port>]" % argv[0])
print("")
print("optional arguments:")
print(" -h print this help")
print(" -i if_name select interface if_name. Default is eth0")
print(" -k kafka_server_name select kafka server name. Default is save to file")
print(" If -k option is not specified data will be saved to file.")
print("")
print("examples:")
print(" data-plane-tracing # bind socket to eth0")
print(" data-plane-tracing -i eno2 -k vc.manage.overcloud:9092 # bind socket to eno2 and send data to kafka server in iovisor-topic.")
exit()
#arguments
interface="eth0"
kafkaserver=''
#check provided arguments
if len(argv) == 2:
if str(argv[1]) == '-h':
help()
else:
usage()
if len(argv) == 3:
if str(argv[1]) == '-i':
interface = argv[2]
elif str(argv[1]) == '-k':
kafkaserver = argv[2]
else:
usage()
if len(argv) == 5:
if str(argv[1]) == '-i':
interface = argv[2]
kafkaserver = argv[4]
elif str(argv[1]) == '-k':
kafkaserver = argv[2]
interface = argv[4]
else:
usage()
if len(argv) > 5:
usage()
print ("binding socket to '%s'" % interface)
#initialize BPF - load source code from http-parse-simple.c
bpf = BPF(src_file = "data-plane-tracing.c", debug = 0)
#load eBPF program http_filter of type SOCKET_FILTER into the kernel eBPF vm
#more info about eBPF program types http://man7.org/linux/man-pages/man2/bpf.2.html
function_vlan_filter = bpf.load_func("vlan_filter", BPF.SOCKET_FILTER)
#create raw socket, bind it to eth0
#attach bpf program to socket created
BPF.attach_raw_socket(function_vlan_filter, interface)
#get file descriptor of the socket previously created inside BPF.attach_raw_socket
socket_fd = function_vlan_filter.sock
#create python socket object, from the file descriptor
sock = socket.fromfd(socket_fd,socket.PF_PACKET,socket.SOCK_RAW,socket.IPPROTO_IP)
#set it as blocking socket
sock.setblocking(True)
#get interface ip address. In case ip is not set then just add 127.0.0.1.
ni.ifaddresses(interface)
try:
ip = ni.ifaddresses(interface)[ni.AF_INET][0]['addr']
except:
ip = '127.0.0.1'
print("| Timestamp | Host Name | Host IP | IP Version | Source Host IP | Dest Host IP | Source Host Port | Dest Host Port | VNI | Source VM MAC | Dest VM MAC | VLAN ID | Source VM IP | Dest VM IP | Protocol | Source VM Port | Dest VM Port | Packet Length |")
while 1:
#retrieve raw packet from socket
packet_str = os.read(socket_fd, 2048)
#convert packet into bytearray
packet_bytearray = bytearray(packet_str)
#ethernet header length
ETH_HLEN = 14
#VXLAN header length
VXLAN_HLEN = 8
#VLAN header length
VLAN_HLEN = 4
#Inner TCP/UDP header length
TCP_HLEN = 20
UDP_HLEN = 8
#calculate packet total length
total_length = packet_bytearray[ETH_HLEN + 2] #load MSB
total_length = total_length << 8 #shift MSB
total_length = total_length + packet_bytearray[ETH_HLEN+3] #add LSB
#calculate ip header length
ip_header_length = packet_bytearray[ETH_HLEN] #load Byte
ip_header_length = ip_header_length & 0x0F #mask bits 0..3
ip_header_length = ip_header_length << 2 #shift to obtain length
#calculate payload offset
payload_offset = ETH_HLEN + ip_header_length + UDP_HLEN + VXLAN_HLEN
#parsing ip version from ip packet header
ipversion = str(bin(packet_bytearray[14])[2:5])
#parsing source ip address, destination ip address from ip packet header
src_host_ip = str(packet_bytearray[26]) + "." + str(packet_bytearray[27]) + "." + str(packet_bytearray[28]) + "." + str(packet_bytearray[29])
dest_host_ip = str(packet_bytearray[30]) + "." + str(packet_bytearray[31]) + "." + str(packet_bytearray[32]) + "." + str(packet_bytearray[33])
#parsing source port and destination port
src_host_port = packet_bytearray[34] << 8 | packet_bytearray[35]
dest_host_port = packet_bytearray[36] << 8 | packet_bytearray[37]
#parsing VNI from VXLAN header
VNI = str((packet_bytearray[46])+(packet_bytearray[47])+(packet_bytearray[48]))
#parsing source mac address and destination mac address
mac_add = [packet_bytearray[50], packet_bytearray[51], packet_bytearray[52], packet_bytearray[53], packet_bytearray[54], packet_bytearray[55]]
src_vm_mac = ":".join(map(lambda b: format(b, "02x"), mac_add))
mac_add = [packet_bytearray[56], packet_bytearray[57], packet_bytearray[58], packet_bytearray[59], packet_bytearray[60], packet_bytearray[61]]
dest_vm_mac = ":".join(map(lambda b: format(b, "02x"), mac_add))
#parsing VLANID from VLAN header
VLANID=""
VLANID = str((packet_bytearray[64])+(packet_bytearray[65]))
#parsing source vm ip address, destination vm ip address from encapsulated ip packet header
src_vm_ip = str(packet_bytearray[80]) + "." + str(packet_bytearray[81]) + "." + str(packet_bytearray[82]) + "." + str(packet_bytearray[83])
dest_vm_ip = str(packet_bytearray[84]) + "." + str(packet_bytearray[85]) + "." + str(packet_bytearray[86]) + "." + str(packet_bytearray[87])
#parsing source port and destination port
if (packet_bytearray[77]==6 or packet_bytearray[77]==17):
src_vm_port = packet_bytearray[88] << 8 | packet_bytearray[88]
dest_vm_port = packet_bytearray[90] << 8 | packet_bytearray[91]
elif (packet_bytearray[77]==1):
src_vm_port = -1
dest_vm_port = -1
type = str(packet_bytearray[88])
else:
continue
timestamp = str(datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S.%f'))
#send data to remote server via Kafka Messaging Bus
if kafkaserver:
MESSAGE = (timestamp, socket.gethostname(),ip, str(int(ipversion, 2)), str(src_host_ip), str(dest_host_ip), str(src_host_port), str(dest_host_port), str(int(VNI)), str(src_vm_mac), str(dest_vm_mac), str(int(VLANID)), src_vm_ip, dest_vm_ip, str(packet_bytearray[77]), str(src_vm_port), str(dest_vm_port), str(total_length))
print (MESSAGE)
MESSAGE = ','.join(MESSAGE)
MESSAGE = MESSAGE.encode()
producer = KafkaProducer(bootstrap_servers=[kafkaserver])
producer.send('iovisor-topic', key=b'iovisor', value=MESSAGE)
#save data to files
else:
MESSAGE = timestamp+","+socket.gethostname()+","+ip+","+str(int(ipversion, 2))+","+src_host_ip+","+dest_host_ip+","+str(src_host_port)+","+str(dest_host_port)+","+str(int(VNI))+","+str(src_vm_mac)+","+str(dest_vm_mac)+","+str(int(VLANID))+","+src_vm_ip+","+dest_vm_ip+","+str(packet_bytearray[77])+","+str(src_vm_port)+","+str(dest_vm_port)+","+str(total_length)
print (MESSAGE)
#save data to a file on hour basis
filename = "./vlan-data-"+time.strftime("%Y-%m-%d-%H")+"-00"
with open(filename, "a") as f:
f.write("%s\n" % MESSAGE)
| apache-2.0 | 1,250,353,873,064,427,800 | 38.901554 | 370 | 0.626542 | false |
massmutual/scikit-learn | sklearn/utils/estimator_checks.py | 1 | 54609 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM, BaseSVC
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import NMF, ProjectedGradientNMF
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
# Estimators with deprecated transform methods. Should be removed in 0.19 when
# _LearntSelectorMixin is removed.
DEPRECATED_TRANSFORM = [
"RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier",
"ExtraTreesRegressor", "RandomTreesEmbedding", "DecisionTreeClassifier",
"DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor",
"LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron",
"LogisticRegression", "LogisticRegressionCV",
"GradientBoostingClassifier", "GradientBoostingRegressor"]
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
if name not in DEPRECATED_TRANSFORM:
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if isinstance(estimator, NMF):
if not isinstance(estimator, ProjectedGradientNMF):
estimator.set_params(solver='cd')
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
if name in DEPRECATED_TRANSFORM:
funcs = ["score"]
else:
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
if name in DEPRECATED_TRANSFORM:
funcs = ["fit", "score", "partial_fit", "fit_predict"]
else:
funcs = [
"fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
if name in DEPRECATED_TRANSFORM:
methods = ["predict", "decision_function", "predict_proba"]
else:
methods = [
"predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
if name in DEPRECATED_TRANSFORM:
check_methods = ["predict", "decision_function", "predict_proba"]
else:
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause | -1,967,102,036,242,952,700 | 35.381746 | 81 | 0.620484 | false |
spauka/therm_flask | scripts/BlueFors Scripts/TC_monitor.py | 1 | 7020 | import os, os.path
import sys
import re
import time, datetime
import csv, json
import urllib.request, urllib.error
PATH = "C:\\BlueFors logs"
FOLDER_PATTERN = r"([0-9]{2})-([0-9]{2})-([0-9]{2})"
FRIDGE = 'BlueFors_QT1'
SENSORS = ((1, "Fifty_K"),
(2, "Four_K"),
(3, "Magnet"),
(5, "Still"),
(6, "MC"),
(9, "Probe"))
# Finds the latest folder
def find_newest_folder():
newest = (0,0,0) # y, m, d
newest_folder = ""
# Look through all the folders in the path and find the newest
for filename in os.listdir(PATH):
match = re.findall(FOLDER_PATTERN, filename)
if not match:
continue
date = tuple(int(x) for x in match[0])
if date > newest:
newest_folder = filename
newest = date
return newest_folder
# Parse the file and returns the next set of sensor values
# Select time from the last sensor read (Normally MC)
def parse_file(folder, channels, seek=None, oldest=datetime.datetime.min):
ch1 = channels[-1][0] # get the number of the last valid channel
path = os.path.join(PATH, folder, "CH%d T %s.log"%(ch1, folder))
try:
fhandle = open(path, 'rU')
except FileNotFoundError:
return (None, seek, True)
if seek:
fhandle.seek(seek[-1]) # Seek the first channel
else:
seek = [0]*len(channels) # Initialize list with [channels] zeros
while True:
line = fhandle.readline().strip() # Read the next line of the last channel file
iseof = (fhandle.tell() == os.fstat(fhandle.fileno()).st_size)
if not line:
return (None, seek, iseof)
data = line.split(',')
# Read out the next date
try:
date = datetime.datetime.strptime(data[0]+" "+data[1], "%d-%m-%y %H:%M:%S")
except (IndexError, ValueError):
# Couldn't extract time, skip line
return (None, seek, iseof)
if date < oldest:
continue
else:
# Read in all the previous sensors
data = {'Time': date}
for i, channel in enumerate(channels):
try:
s_path = os.path.join(PATH, folder, "CH%d T %s.log"%(channel[0], folder))
s_fhandle = open(s_path, 'rU')
s_fhandle.seek(seek[i])
line = s_fhandle.readline().strip(' \n\r\x00')
seek[i] = s_fhandle.tell()
line = line.split(",")
if line and len(line) == 3:
s_date = datetime.datetime.strptime(line[0]+" "+line[1], "%d-%m-%y %H:%M:%S")
temp = float(line[2])
# Check that the time is not too far in the past, if it is try to fast forward
s_eof = False
while date - s_date > datetime.timedelta(seconds=90):
line = s_fhandle.readline().strip()
seek[i] = s_fhandle.tell()
line = line.split(",")
if line and len(line) == 3:
s_date = datetime.datetime.strptime(line[0]+" "+line[1], "%d-%m-%y %H:%M:%S")
temp = float(line[2])
else:
# If we hit the end of the file and we are still in the past, move to the next sensor
s_eof = True
break
if s_eof:
# We hit the end of the file in the past. Move on to next sensor.
print("Skipping sensor: %s"%(channel[1]))
continue
# Check that this record is not more than 1.5 minutes out from the first one
if abs(s_date - date) > datetime.timedelta(seconds=90):
data[channels[i][1]] = float('NaN')
elif temp > 400000 or temp <= 0:
data[channels[i][1]] = float('NaN')
else:
data[channels[i][1]] = float(line[2])
else:
data[channels[i][1]] = float('NaN')
except FileNotFoundError:
data[channels[i][1]] = float('NaN')
return (data, seek, iseof)
def find_oldest():
URL = 'https://qphys1114.research.ext.sydney.edu.au/therm_flask/%s/data/?current' % FRIDGE
try:
line = urllib.request.urlopen(URL).read().decode('utf-8')
except urllib.error.HTTPError:
return datetime.datetime.min
line = json.loads(line)
if "Time" not in line:
return datetime.datetime.min
else:
date = datetime.datetime.strptime(line["Time"], "%a %b %d %H:%M:%S %Y")
print(date)
return date
def post(data):
URL = 'https://qphys1114.research.ext.sydney.edu.au/therm_flask/%s/data/' % FRIDGE
data['Time'] = data['Time'].timestamp()
print("Updating at %r" % (data['Time'],))
while True:
try:
request = urllib.request.urlopen(URL, urllib.parse.urlencode(data).encode('utf-8'))
response = request.read().decode('utf-8')
request.close()
return response
except urllib.error.URLError:
print("URLOpen Error")
continue
if __name__ == '__main__':
if len(sys.argv) == 2:
if sys.argv[1] in ('--help', '-h'):
print("Usage: %s [--parse-all]")
exit(0)
if sys.argv[1] == '--parse-all':
oldest = find_oldest()
print("Parsing all data from date %r" % oldest)
for filename in os.listdir(PATH):
if re.findall(FOLDER_PATTERN, filename):
print("Parsing file: %s" % filename)
seek = None
eof = False
while not eof:
data, seek, eof = parse_file(filename, SENSORS, seek=seek, oldest=oldest)
if seek == 0:
print(post({'Time': data['Time']-datetime.timedelta(0, 1)}))
if data:
print('%r, %s' % (data['Time'], post(data)))
print('Done')
#time.sleep(10)
exit(0)
oldest = find_oldest()
cfile = find_newest_folder()
seek = None
# Post a blank
print(post({'Time': oldest}))
while True:
time.sleep(1)
filename = find_newest_folder()
if filename != cfile:
seek = None
cfile = filename
print("Starting new folder: %s", filename)
eof = False
while not eof:
data, seek, eof = parse_file(cfile, SENSORS, seek=seek, oldest=oldest)
if data:
oldest = data['Time']
print(post(data))
| mit | 7,036,134,591,124,994,000 | 38.661017 | 117 | 0.488462 | false |
dannyboi104/SickRage | lib/github/tests/Issue214.py | 39 | 3361 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github
import Framework
class Issue214(Framework.TestCase): # https://github.com/jacquev6/PyGithub/issues/214
def setUp(self):
Framework.TestCase.setUp(self)
self.repo = self.g.get_user().get_repo("PyGithub")
self.issue = self.repo.get_issue(1)
def testAssignees(self):
self.assertTrue(self.repo.has_in_assignees('farrd'))
self.assertFalse(self.repo.has_in_assignees('fake'))
def testCollaborators(self):
self.assertTrue(self.repo.has_in_collaborators('farrd'))
self.assertFalse(self.repo.has_in_collaborators('fake'))
self.assertFalse(self.repo.has_in_collaborators('marcmenges'))
self.repo.add_to_collaborators('marcmenges')
self.assertTrue(self.repo.has_in_collaborators('marcmenges'))
self.repo.remove_from_collaborators('marcmenges')
self.assertFalse(self.repo.has_in_collaborators('marcmenges'))
def testEditIssue(self):
self.assertEqual(self.issue.assignee, None)
self.issue.edit(assignee='farrd')
self.assertEqual(self.issue.assignee.login, 'farrd')
self.issue.edit(assignee=None)
self.assertEqual(self.issue.assignee, None)
def testCreateIssue(self):
issue = self.repo.create_issue("Issue created by PyGithub", assignee='farrd')
self.assertEqual(issue.assignee.login, 'farrd')
def testGetIssues(self):
issues = self.repo.get_issues(assignee='farrd')
for issue in issues:
self.assertEqual(issue.assignee.login, 'farrd')
| gpl-3.0 | -2,778,031,219,372,326,400 | 47.710145 | 86 | 0.525439 | false |
NihilistBrew/Dust | typeclasses/objects.py | 10 | 8575 | """
Object
The Object is the "naked" base class for things in the game world.
Note that the default Character, Room and Exit does not inherit from
this Object, but from their respective default implementations in the
evennia library. If you want to use this class as a parent to change
the other types, you can do so by adding this as a multiple
inheritance.
"""
from evennia import DefaultObject
class Object(DefaultObject):
"""
This is the root typeclass object, implementing an in-game Evennia
game object, such as having a location, being able to be
manipulated or looked at, etc. If you create a new typeclass, it
must always inherit from this object (or any of the other objects
in this file, since they all actually inherit from BaseObject, as
seen in src.object.objects).
The BaseObject class implements several hooks tying into the game
engine. By re-implementing these hooks you can control the
system. You should never need to re-implement special Python
methods, such as __init__ and especially never __getattribute__ and
__setattr__ since these are used heavily by the typeclass system
of Evennia and messing with them might well break things for you.
* Base properties defined/available on all Objects
key (string) - name of object
name (string)- same as key
aliases (list of strings) - aliases to the object. Will be saved to
database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
back to this class
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
player (Player) - controlling player (if any, only set together with
sessid below)
sessid (int, read-only) - session id (if any, only set together with
player above). Use `sessions` handler to get the
Sessions directly.
location (Object) - current location. Is None if this is a room
home (Object) - safety start-location
sessions (list of Sessions, read-only) - returns all sessions connected
to this object
has_player (bool, read-only)- will only return *connected* players
contents (list of Objects, read-only) - returns all objects inside this
object (including exits)
exits (list of Objects, read-only) - returns all exits from this
object, if any
destination (Object) - only set if this object is an exit.
is_superuser (bool, read-only) - True/False if this user is a superuser
* Handlers available
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this
self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not create
a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
sessions - sessions-handler. Get Sessions connected to this
object with sessions.get()
* Helper methods (see src.objects.objects.py for full headers)
search(ostring, global_search=False, attribute_name=None,
use_nicks=False, location=None, ignore_errors=False, player=False)
execute_cmd(raw_string)
msg(text=None, **kwargs)
msg_contents(message, exclude=None, from_obj=None, **kwargs)
move_to(destination, quiet=False, emit_to_obj=None, use_destination=True)
copy(new_key=None)
delete()
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hooks (these are class methods, so args should start with self):
basetype_setup() - only called once, used for behind-the-scenes
setup. Normally not modified.
basetype_posthook_setup() - customization in basetype, after the object
has been created; Normally not modified.
at_object_creation() - only called once, when object is first created.
Object customizations go here.
at_object_delete() - called just before deleting an object. If returning
False, deletion is aborted. Note that all objects
inside a deleted object are automatically moved
to their <home>, they don't need to be removed here.
at_init() - called whenever typeclass is cached from memory,
at least once every server restart/reload
at_cmdset_get(**kwargs) - this is called just before the command handler
requests a cmdset from this object. The kwargs are
not normally used unless the cmdset is created
dynamically (see e.g. Exits).
at_pre_puppet(player)- (player-controlled objects only) called just
before puppeting
at_post_puppet() - (player-controlled objects only) called just
after completing connection player<->object
at_pre_unpuppet() - (player-controlled objects only) called just
before un-puppeting
at_post_unpuppet(player) - (player-controlled objects only) called just
after disconnecting player<->object link
at_server_reload() - called before server is reloaded
at_server_shutdown() - called just before server is fully shut down
at_access(result, accessing_obj, access_type) - called with the result
of a lock access check on this object. Return value
does not affect check result.
at_before_move(destination) - called just before moving object
to the destination. If returns False, move is cancelled.
announce_move_from(destination) - called in old location, just
before move, if obj.move_to() has quiet=False
announce_move_to(source_location) - called in new location, just
after move, if obj.move_to() has quiet=False
at_after_move(source_location) - always called after a move has
been successfully performed.
at_object_leave(obj, target_location) - called when an object leaves
this object in any fashion
at_object_receive(obj, source_location) - called when this object receives
another object
at_traverse(traversing_object, source_loc) - (exit-objects only)
handles all moving across the exit, including
calling the other exit hooks. Use super() to retain
the default functionality.
at_after_traverse(traversing_object, source_location) - (exit-objects only)
called just after a traversal has happened.
at_failed_traverse(traversing_object) - (exit-objects only) called if
traversal fails and property err_traverse is not defined.
at_msg_receive(self, msg, from_obj=None, **kwargs) - called when a message
(via self.msg()) is sent to this obj.
If returns false, aborts send.
at_msg_send(self, msg, to_obj=None, **kwargs) - called when this objects
sends a message to someone via self.msg().
return_appearance(looker) - describes this object. Used by "look"
command by default
at_desc(looker=None) - called by 'look' whenever the
appearance is requested.
at_get(getter) - called after object has been picked up.
Does not stop pickup.
at_drop(dropper) - called when this object has been dropped.
at_say(speaker, message) - by default, called if an object inside this
object speaks
"""
pass
| mit | -1,748,344,436,981,123,600 | 51.932099 | 81 | 0.621108 | false |
eayun/ovirt-engine | packaging/setup/ovirt_engine_setup/util.py | 5 | 8581 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utils."""
import gettext
import grp
import pwd
import re
from otopi import util
from otopi import plugin
from otopi import constants as otopicons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
def editConfigContent(
content,
params,
keep_existing=False,
changed_lines=None,
comment_re='[#]*\s*',
param_re='\w+',
new_comment_tpl='{spaces}# {original}',
separator_re='\s*=\s*',
new_line_tpl='{spaces}{param} = {value}',
added_params=None,
):
"""Return edited content of a config file.
Keyword arguments:
content - a list of strings, the content prior to calling us
params - a dict of params/values that should be in the output
If the value for a param is None, param is deleted
keep_existing - if True, existing params are not changed, only missing
ones are added.
changed_lines - an output parameter, a list of dictionaries with
added and removed lines.
comment_re - a regular expression that a comment marker prefixed
to param should match. If a commented param line is found,
a new line will be added after it.
param_re - a regular expression that should match params
new_comment_tpl - a template for a comment. {original} will be replaced
with this template, {spaces} will be replaced with
original whitespace prefix.
separator_re - a regular expression that the separator between
param and value should match
new_line_tpl - a template for a new line. {param} will be replaced
with param, {value} with value.
added_params - an output parameter, a list of params that were added
in the end because they were not found in content.
Params that appear uncommented in the input, are commented, and new
values are added after the commented lines. Params that appear only
commented in the input, the comments are copied as-is, and new lines
are added after the comments. Params that do not appear in the input
are added in the end.
"""
params = params.copy()
pattern = r"""
^
(?P<spaces>\s*)
(?P<comment>{comment_re})
(?P<original>
(?P<param>{param_re})
(?P<separator>{separator_re})
(?P<value>.*)
)
$
""".format(
comment_re=comment_re,
param_re=param_re,
separator_re=separator_re,
)
re_obj = re.compile(flags=re.VERBOSE, pattern=pattern)
# Find params which are uncommented in the input.
uncommented = set()
for line in content:
f = re_obj.match(line)
if (
f is not None and
f.group('param') in params and
not f.group('comment')
):
uncommented.add(f.group('param'))
if changed_lines is None:
changed_lines = []
if added_params is None:
added_params = []
newcontent = []
processed = set()
for line in content:
f = re_obj.match(line)
if (
f is not None and
f.group('param') in params and
not (
f.group('param') in uncommented and
f.group('comment')
)
# If param in uncommented and current line is comment,
# we do not need to process it - we process the uncommented
# line when we see it
):
if (
not f.group('comment') and
(
str(f.group('value')) == str(params[f.group('param')]) or
keep_existing
)
):
# value is not changed, or we do not care. do nothing
processed.add(f.group('param'))
else:
if (
f.group('param') in uncommented and
not f.group('comment')
):
# Add current line, commented, before new line
currentline = new_comment_tpl.format(
spaces=f.group('spaces'),
original=f.group('original'),
)
changed_lines.append(
{
'added': currentline,
'removed': line,
}
)
newcontent.append(currentline)
else:
# Only possible option here is that current line is
# a comment and param is not in uncommented. Keep it.
# Other two options are in "if"s above.
# The last option - param is not in uncommented
# and current line is not a comment - is not possible.
newcontent.append(line)
newline = new_line_tpl.format(
spaces=f.group('spaces'),
param=f.group('param'),
value=params[f.group('param')],
)
changed_lines.append(
{
'added': newline,
}
)
processed.add(f.group('param'))
line = newline
newcontent.append(line)
# Add remaining params at the end
for param, value in params.items():
if param not in processed:
newline = new_line_tpl.format(
spaces='',
param=param,
value=value,
)
newcontent.append(newline)
changed_lines.append(
{
'added': newline,
}
)
added_params.append(param)
return newcontent
@util.export
def getUid(user):
return pwd.getpwnam(user)[2]
@util.export
def getGid(group):
return grp.getgrnam(group)[2]
@util.export
def parsePort(port):
try:
port = int(port)
except ValueError:
raise ValueError(
_('Invalid port {number}').format(
number=port,
)
)
if port < 0 or port > 0xffff:
raise ValueError(
_('Invalid number {number}').format(
number=port,
)
)
return port
@util.export
def addExitCode(environment, code, priority=plugin.Stages.PRIORITY_DEFAULT):
environment[
otopicons.BaseEnv.EXIT_CODE
].append(
{
'code': code,
'priority': priority,
}
)
@util.export
def getPackageManager(logger=None):
"""Return a tuple with the package manager printable name string, the mini
implementation class and the sink base class, for the preferred package
manager available in the system.
The only parameter accepted by this function is a logger instance, that
can be ommited (or None) if the user don't wants logs.
"""
try:
from otopi import minidnf
minidnf.MiniDNF()
if logger is not None:
logger.debug('Using DNF as package manager')
return 'DNF', minidnf.MiniDNF, minidnf.MiniDNFSinkBase
except (ImportError, RuntimeError):
try:
from otopi import miniyum
# yum does not raises validation exceptions in constructor,
# then its not worth instantiating it to test.
if logger is not None:
logger.debug('Using Yum as package manager')
return 'Yum', miniyum.MiniYum, miniyum.MiniYumSinkBase
except ImportError:
raise RuntimeError(
_(
'No supported package manager found in your system'
)
)
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 | -2,239,322,857,381,195,000 | 31.138577 | 78 | 0.547139 | false |
ivanvladimir/gensim | gensim/test/test_doc2vec.py | 1 | 16473 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
from __future__ import with_statement
import logging
import unittest
import os
import tempfile
from six.moves import zip as izip
from collections import namedtuple
import numpy as np
from gensim import utils, matutils
from gensim.models import doc2vec
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
class DocsLeeCorpus(object):
def __init__(self, string_tags=False):
self.string_tags = string_tags
def _tag(self, i):
return i if not self.string_tags else '_*%d' % i
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for i, line in enumerate(f):
yield doc2vec.TaggedDocument(utils.simple_preprocess(line), [self._tag(i)])
list_corpus = list(DocsLeeCorpus())
raw_sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
def testfile():
# temporary data will be stored to this file
return os.path.join(tempfile.gettempdir(), 'gensim_doc2vec.tst')
class TestDoc2VecModel(unittest.TestCase):
def test_persistence(self):
"""Test storing/loading the entire model."""
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
model.save(testfile())
self.models_equal(model, doc2vec.Doc2Vec.load(testfile()))
def test_load_mmap(self):
"""Test storing/loading the entire model."""
model = doc2vec.Doc2Vec(sentences, min_count=1)
# test storing the internal arrays into separate files
model.save(testfile(), sep_limit=0)
self.models_equal(model, doc2vec.Doc2Vec.load(testfile()))
# make sure mmaping the arrays back works, too
self.models_equal(model, doc2vec.Doc2Vec.load(testfile(), mmap='r'))
def test_int_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (300,))
self.assertRaises(KeyError, model.__getitem__, '_*0')
def test_string_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (300,))
self.assertEqual(model.docvecs['_*0'].shape, (300,))
self.assertTrue(all(model.docvecs['_*0'] == model.docvecs[0]))
self.assertTrue(max(d.offset for d in model.docvecs.doctags.values()) < len(model.docvecs.doctags))
self.assertTrue(max(model.docvecs._int_index(str_key) for str_key in model.docvecs.doctags.keys()) < len(model.docvecs.doctag_syn0))
def test_empty_errors(self):
# no input => "RuntimeError: you must first build vocabulary before training the model"
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, [])
# input not empty, but rather completely filtered out
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, list_corpus, min_count=10000)
def model_sanity(self, model):
"""Any non-trivial model on DocsLeeCorpus can pass these sanity checks"""
fire1 = 0 # doc 0 sydney fires
fire2 = 8 # doc 8 sydney fires
tennis1 = 6 # doc 6 tennis
# inferred vector should be top10 close to bulk-trained one
doc0_inferred = model.infer_vector(list(DocsLeeCorpus())[0].words)
sims_to_infer = model.docvecs.most_similar([doc0_inferred], topn=len(model.docvecs))
f_rank = [docid for docid, sim in sims_to_infer].index(fire1)
self.assertLess(fire1, 10)
# fire2 should be top30 close to fire1
sims = model.docvecs.most_similar(fire1, topn=len(model.docvecs))
f2_rank = [docid for docid, sim in sims].index(fire2)
self.assertLess(f2_rank, 30)
# same sims should appear in lookup by vec as by index
doc0_vec = model.docvecs[fire1]
sims2 = model.docvecs.most_similar(positive=[doc0_vec], topn=21)
sims2 = [(id, sim) for id, sim in sims2 if id != fire1] # ignore the doc itself
sims = sims[:20]
self.assertEqual(list(zip(*sims))[0], list(zip(*sims2))[0]) # same doc ids
self.assertTrue(np.allclose(list(zip(*sims))[1], list(zip(*sims2))[1])) # close-enough dists
# tennis doc should be out-of-place among fire news
self.assertEqual(model.docvecs.doesnt_match([fire1, tennis1, fire2]), tennis1)
# fire docs should be closer than fire-tennis
self.assertTrue(model.docvecs.similarity(fire1, fire2) > model.docvecs.similarity(fire1, tennis1))
def test_training(self):
"""Test doc2vec training."""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(size=100, min_count=2, iter=20)
model.build_vocab(corpus)
self.assertEqual(model.docvecs.doctag_syn0.shape, (300, 100))
model.train(corpus)
self.model_sanity(model)
# build vocab and train in one step; must be the same as above
model2 = doc2vec.Doc2Vec(corpus, size=100, min_count=2, iter=20)
self.models_equal(model, model2)
def test_dbow_hs(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=1, negative=0, min_count=2, iter=20)
self.model_sanity(model)
def test_dmm_hs(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=1, negative=0,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dms_hs(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=1, negative=0,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dmc_hs(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=1, negative=0,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dbow_neg(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=0, negative=10, min_count=2, iter=20)
self.model_sanity(model)
def test_dmm_neg(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=0, negative=10,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dms_neg(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=0, negative=10,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_dmc_neg(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=0, negative=10,
alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
def test_parallel(self):
"""Test doc2vec parallel training."""
if doc2vec.FAST_VERSION < 0: # don't test the plain NumPy version for parallelism (too slow)
return
corpus = utils.RepeatCorpus(DocsLeeCorpus(), 10000)
for workers in [2, 4]:
model = doc2vec.Doc2Vec(corpus, workers=workers)
self.model_sanity(model)
def test_deterministic_hs(self):
"""Test doc2vec results identical with identical RNG seed."""
# hs
model = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_neg(self):
"""Test doc2vec results identical with identical RNG seed."""
# neg
model = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_dmc(self):
"""Test doc2vec results identical with identical RNG seed."""
# bigger, dmc
model = doc2vec.Doc2Vec(DocsLeeCorpus(), dm=1, dm_concat=1, size=24, window=4, hs=1, negative=3,
seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), dm=1, dm_concat=1, size=24, window=4, hs=1, negative=3,
seed=42, workers=1)
self.models_equal(model, model2)
def test_mixed_tag_types(self):
"""Ensure alternating int/string tags don't share indexes in doctag_syn0"""
mixed_tag_corpus = [doc2vec.TaggedDocument(words, [i, words[0]]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec()
model.build_vocab(mixed_tag_corpus)
expected_length = len(sentences) + len(model.docvecs.doctags) # 9 sentences, 7 unique first tokens
print(model.docvecs.doctags)
print(model.docvecs.count)
self.assertEquals(len(model.docvecs.doctag_syn0), expected_length)
def models_equal(self, model, model2):
# check words/hidden-weights
self.assertEqual(len(model.vocab), len(model2.vocab))
self.assertTrue(np.allclose(model.syn0, model2.syn0))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
# check docvecs
self.assertEqual(len(model.docvecs.doctags), len(model2.docvecs.doctags))
self.assertEqual(len(model.docvecs.offset2doctag), len(model2.docvecs.offset2doctag))
self.assertTrue(np.allclose(model.docvecs.doctag_syn0, model2.docvecs.doctag_syn0))
#endclass TestDoc2VecModel
if not hasattr(TestDoc2VecModel, 'assertLess'):
# workaround for python 2.6
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg="%s is not less than %s" % (a, b))
setattr(TestDoc2VecModel, 'assertLess', assertLess)
# following code is useful for reproducing paragraph-vectors paper sentiment experiments
class ConcatenatedDoc2Vec(object):
"""
Concatenation of multiple models for reproducing the Paragraph Vectors paper.
Models must have exactly-matching vocabulary and document IDs. (Models should
be trained separately; this wrapper just returns concatenated results.)
"""
def __init__(self, models):
self.models = models
if hasattr(models[0], 'docvecs'):
self.docvecs = ConcatenatedDocvecs([model.docvecs for model in models])
def __getitem__(self, token):
return np.concatenate([model[token] for model in self.models])
def infer_vector(self, document, alpha=0.1, min_alpha=0.0001, steps=5):
return np.concatenate([model.infer_vector(document, alpha, min_alpha, steps) for model in self.models])
def train(self, ignored):
pass # train subcomponents individually
class ConcatenatedDocvecs(object):
def __init__(self, models):
self.models = models
def __getitem__(self, token):
return np.concatenate([model[token] for model in self.models])
SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment')
def read_su_sentiment_rotten_tomatoes(dirname, lowercase=True):
"""
Read and return documents from the Stanford Sentiment Treebank
corpus (Rotten Tomatoes reviews), from http://nlp.Stanford.edu/sentiment/
Initialize the corpus from a given directory, where
http://nlp.stanford.edu/~socherr/stanfordSentimentTreebank.zip
has been expanded. It's not too big, so compose entirely into memory.
"""
logging.info("loading corpus from %s" % dirname)
# many mangled chars in sentences (datasetSentences.txt)
chars_sst_mangled = ['à', 'á', 'â', 'ã', 'æ', 'ç', 'è', 'é', 'í',
'í', 'ï', 'ñ', 'ó', 'ô', 'ö', 'û', 'ü']
sentence_fixups = [(char.encode('utf-8').decode('latin1'), char) for char in chars_sst_mangled]
# more junk, and the replace necessary for sentence-phrase consistency
sentence_fixups.extend([
('Â', ''),
('\xa0', ' '),
('-LRB-', '('),
('-RRB-', ')'),
])
# only this junk in phrases (dictionary.txt)
phrase_fixups = [('\xa0', ' ')]
# sentence_id and split are only positive for the full sentences
# read sentences to temp {sentence -> (id,split) dict, to correlate with dictionary.txt
info_by_sentence = {}
with open(os.path.join(dirname, 'datasetSentences.txt'), 'r') as sentences:
with open(os.path.join(dirname, 'datasetSplit.txt'), 'r') as splits:
next(sentences) # legend
next(splits) # legend
for sentence_line, split_line in izip(sentences, splits):
(id, text) = sentence_line.split('\t')
id = int(id)
text = text.rstrip()
for junk, fix in sentence_fixups:
text = text.replace(junk, fix)
(id2, split_i) = split_line.split(',')
assert id == int(id2)
if text not in info_by_sentence: # discard duplicates
info_by_sentence[text] = (id, int(split_i))
# read all phrase text
phrases = [None] * 239232 # known size of phrases
with open(os.path.join(dirname, 'dictionary.txt'), 'r') as phrase_lines:
for line in phrase_lines:
(text, id) = line.split('|')
for junk, fix in phrase_fixups:
text = text.replace(junk, fix)
phrases[int(id)] = text.rstrip() # for 1st pass just string
SentimentPhrase = namedtuple('SentimentPhrase', SentimentDocument._fields + ('sentence_id',))
# add sentiment labels, correlate with sentences
with open(os.path.join(dirname, 'sentiment_labels.txt'), 'r') as sentiments:
next(sentiments) # legend
for line in sentiments:
(id, sentiment) = line.split('|')
id = int(id)
sentiment = float(sentiment)
text = phrases[id]
words = text.split()
if lowercase:
words = [word.lower() for word in words]
(sentence_id, split_i) = info_by_sentence.get(text, (None, 0))
split = [None, 'train', 'test', 'dev'][split_i]
phrases[id] = SentimentPhrase(words, [id], split, sentiment, sentence_id)
assert len([phrase for phrase in phrases if phrase.sentence_id is not None]) == len(info_by_sentence) # all
# counts don't match 8544, 2210, 1101 because 13 TRAIN and 1 DEV sentences are duplicates
assert len([phrase for phrase in phrases if phrase.split == 'train']) == 8531 # 'train'
assert len([phrase for phrase in phrases if phrase.split == 'test']) == 2210 # 'test'
assert len([phrase for phrase in phrases if phrase.split == 'dev']) == 1100 # 'dev'
logging.info("loaded corpus with %i sentences and %i phrases from %s",
len(info_by_sentence), len(phrases), dirname)
return phrases
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
logging.info("using optimization %s", doc2vec.FAST_VERSION)
unittest.main()
| gpl-3.0 | 4,964,461,634,763,161,000 | 41.084399 | 140 | 0.627043 | false |
40223149/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/__init__.py | 603 | 6082 | ## pygame - Python Game Library
## Copyright (C) 2000-2001 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
'''Top-level Pygame module.
Pygame is a set of Python modules designed for writing games.
It is written on top of the excellent SDL library. This allows you
to create fully featured games and multimedia programs in the Python
language. The package is highly portable, with games running on
Windows, MacOS, OS X, BeOS, FreeBSD, IRIX, and Linux.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import sys
class MissingModule:
def __init__(self, name, info='', urgent=0):
self.name = name
self.info = str(info)
self.urgent = urgent
if urgent:
self.warn()
def __getattr__(self, var):
if not self.urgent:
self.warn()
self.urgent = 1
MissingPygameModule = "%s module not available" % self.name
raise NotImplementedError(MissingPygameModule)
def __nonzero__(self):
return 0
def warn(self):
if self.urgent: type = 'import'
else: type = 'use'
message = '%s %s: %s' % (type, self.name, self.info)
try:
import warnings
if self.urgent: level = 4
else: level = 3
warnings.warn(message, RuntimeWarning, level)
except ImportError:
print(message)
#we need to import like this, each at a time. the cleanest way to import
#our modules is with the import command (not the __import__ function)
#first, the "required" modules
#from pygame.array import * #brython fix me
from pygame.base import *
from pygame.constants import *
from pygame.version import *
from pygame.rect import Rect
import pygame.color
Color = pygame.color.Color
__version__ = ver
#added by earney
from . import time
from . import display
from . import constants
from . import event
from . import font
from . import mixer
from . import sprite
from .surface import Surface
from . import image
from . import mouse
from . import transform
#next, the "standard" modules
#we still allow them to be missing for stripped down pygame distributions
'''
try: import pygame.cdrom
except (ImportError,IOError), msg:cdrom=MissingModule("cdrom", msg, 1)
try: import pygame.cursors
except (ImportError,IOError), msg:cursors=MissingModule("cursors", msg, 1)
try: import pygame.display
except (ImportError,IOError), msg:display=MissingModule("display", msg, 1)
try: import pygame.draw
except (ImportError,IOError), msg:draw=MissingModule("draw", msg, 1)
try: import pygame.event
except (ImportError,IOError), msg:event=MissingModule("event", msg, 1)
try: import pygame.image
except (ImportError,IOError), msg:image=MissingModule("image", msg, 1)
try: import pygame.joystick
except (ImportError,IOError), msg:joystick=MissingModule("joystick", msg, 1)
try: import pygame.key
except (ImportError,IOError), msg:key=MissingModule("key", msg, 1)
try: import pygame.mouse
except (ImportError,IOError), msg:mouse=MissingModule("mouse", msg, 1)
try: import pygame.sprite
except (ImportError,IOError), msg:sprite=MissingModule("sprite", msg, 1)
try: from pygame.surface import Surface
except (ImportError,IOError):Surface = lambda:Missing_Function
try: from pygame.overlay import Overlay
except (ImportError,IOError):Overlay = lambda:Missing_Function
try: import pygame.time
except (ImportError,IOError), msg:time=MissingModule("time", msg, 1)
try: import pygame.transform
except (ImportError,IOError), msg:transform=MissingModule("transform", msg, 1)
#lastly, the "optional" pygame modules
try:
import pygame.font
import pygame.sysfont
pygame.font.SysFont = pygame.sysfont.SysFont
pygame.font.get_fonts = pygame.sysfont.get_fonts
pygame.font.match_font = pygame.sysfont.match_font
except (ImportError,IOError), msg:font=MissingModule("font", msg, 0)
try: import pygame.mixer
except (ImportError,IOError), msg:mixer=MissingModule("mixer", msg, 0)
#try: import pygame.movie
#except (ImportError,IOError), msg:movie=MissingModule("movie", msg, 0)
#try: import pygame.movieext
#except (ImportError,IOError), msg:movieext=MissingModule("movieext", msg, 0)
try: import pygame.surfarray
except (ImportError,IOError), msg:surfarray=MissingModule("surfarray", msg, 0)
try: import pygame.sndarray
except (ImportError,IOError), msg:sndarray=MissingModule("sndarray", msg, 0)
#try: import pygame.fastevent
#except (ImportError,IOError), msg:fastevent=MissingModule("fastevent", msg, 0)
#there's also a couple "internal" modules not needed
#by users, but putting them here helps "dependency finder"
#programs get everything they need (like py2exe)
try: import pygame.imageext; del pygame.imageext
except (ImportError,IOError):pass
try: import pygame.mixer_music; del pygame.mixer_music
except (ImportError,IOError):pass
def packager_imports():
"""
Some additional things that py2app/py2exe will want to see
"""
import OpenGL.GL
'''
#make Rects pickleable
import copyreg
def __rect_constructor(x,y,w,h):
return Rect(x,y,w,h)
def __rect_reduce(r):
assert type(r) == Rect
return __rect_constructor, (r.x, r.y, r.w, r.h)
copyreg.pickle(Rect, __rect_reduce, __rect_constructor)
#cleanup namespace
del pygame, os, sys, #TODO rwobject, surflock, MissingModule, copy_reg
| gpl-3.0 | -2,677,844,959,535,766,000 | 30.512953 | 79 | 0.719336 | false |
mm112287/2015cda-24 | static/Brython3.1.1-20150328-091302/Lib/codecs.py | 739 | 35436 | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import builtins, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % \
(self.__class__.__module__, self.__class__.__name__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private codepoints U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars < 0:
if size < 0:
if self.charbuffer:
break
elif len(self.charbuffer) >= size:
break
else:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| gpl-3.0 | 6,426,372,559,448,843,000 | 31.243858 | 80 | 0.61288 | false |
georgeha/mandelbrot | mandelbrot_core/mandelbrot_pilot_cores.py | 1 | 7097 | __author__ = "George Chantzialexiou"
__copyright__ = "Copyright 2012-2013, The Pilot Project"
__license__ = "MIT"
""" A Mandelbrot Fractal Generator Using Pilot Job
This is an example of mandelbrot Fracatl Generator
using the capabilities of Pilot Job API.
It requires the Python Image Library (PIL) which can be easily
installed with 'easy_install PIL'. Also, it requires the
mandel_lines.py programm to generate the parts of the fractal.
The parameters are the following:
imgX, imgY: the dimensions of the mandelbrot image, e.g. 1024, 1024
xBeg, xEnd: the x-axis portion of the (sub-)image to calculate
yBeg, yEnd: the y-axis portion of the (sub-)image to calculate
This module takes the parameters of the Mandelbrot fractal and decompose
the image into n diferent parts, where n is the number of the cores of
the system. Then it runs for every part the mandelbrot Generator Code
which is the mandel_lines.py. The mandel_lines.py creates n Images and
then we compose the n images into one. The whole fractal Image.
For every part of the image we create one Compute Unit.
You can run this code via command list:
python mandelbrot_pilot.py imgX imgY xBeg xEnd yBeg yEnd
"""
import os, sys, radical.pilot
from PIL import Image
import multiprocessing # this library is used to find the number of the cores.
# DBURL defines the MongoDB server URL and has the format mongodb://host:port.
# For the installation of a MongoDB server, refer to http://docs.mongodb.org.
DBURL = ("RADICAL_PILOT_DBURL")
if DBURL is None:
print "ERROR: RADICAL_PILOT_DBURL (MongoDB server URL) is not defined."
sys.exit(1)
#------------------------------------------------------------------------------
#
def pilot_state_cb(pilot, state):
"""pilot_state_change_cb() is a callback function. It gets called very
time a ComputePilot changes its state.
"""
if state == radical.pilot.states.FAILED:
print "Compute Pilot '%s' failed, exiting ..." % pilot.uid
sys.exit(1)
elif state == radical.pilot.states.ACTIVE:
print "Compute Pilot '%s' became active!" % (pilot.uid)
#------------------------------------------------------------------------------
#
def unit_state_change_cb(unit, state):
"""unit_state_change_cb() is a callback function. It gets called very
time a ComputeUnit changes its state.
"""
if state == radical.pilot.states.FAILED:
print "Compute Unit '%s' failed ..." % unit.uid
sys.exit(1)
elif state == radical.pilot.states.DONE:
print "Compute Unit '%s' finished with output:" % (unit.uid)
print unit.stdout
#------------------------------------------------------------------------------
#
def main():
try:
# reading the input from user:
args = sys.argv[1:]
if len(args) < 6:
print "Usage: python %s imgX imgY xBeg xEnd yBeg yEnd filename" % __file__
sys.exit(-1)
imgX = int(sys.argv[1])
imgY = int(sys.argv[2])
xBeg = int(sys.argv[3])
xEnd = int(sys.argv[4])
yBeg = int(sys.argv[5])
yEnd = int(sys.argv[6])
# end of reading input from the user
# Add the following three lines if you want to run remote
#c = radical.pilot.Context('ssh')
#c.user_id = 'user_id'
#session.add_context(c)
#DBURL = "mongodb://localhost:27017" # this is the default database_url if you run the mongodb on localhost
# here we create a new radical session
DBURL = "mongodb://localhost:27017"
try:
session = radical.pilot.Session(database_url = DBURL)
except Exception, e:
print "An error with mongodb has occured: %s" % (str(e))
return (-1)
# Add a Pilot Manager. Pilot managers manage one or more ComputePilots.
print "Initiliazing Pilot Manager..."
pmgr = radical.pilot.PilotManager(session=session)
# Register our callback with our Pilot Manager. This callback will get
# called every time any of the pilots managed by the PilotManager
# change their state
pmgr.register_callback(pilot_state_cb)
# this describes the requirements and the paramers
pdesc = radical.pilot.ComputePilotDescription()
pdesc.resource = "localhost" # we are running on localhost
pdesc.runtime = 10 # minutes
pdesc.cores = multiprocessing.cpu_count() # we use all the cores we have
pdesc.cleanup = True # delete all the files that are created automatically and we don't need anymore when the job is done
print "Submitting Compute Pilot to PilotManager"
pilot = pmgr.submit_pilots(pdesc)
umgr = radical.pilot.UnitManager(session=session, scheduler = radical.pilot.SCHED_DIRECT_SUBMISSION)
# Combine all the units
print "Initiliazing Unit Manager"
# Combine the ComputePilot, the ComputeUnits and a scheduler via
# a UnitManager object.
umgr = radical.pilot.UnitManager(
session=session,
scheduler=radical.pilot.SCHED_DIRECT_SUBMISSION)
# Register our callback with the UnitManager. This callback will get
# called every time any of the units managed by the UnitManager
# change their state.
print 'Registering the callbacks so we can keep an eye on the CUs'
umgr.register_callback(unit_state_change_cb)
print "Registering Compute Pilot with Unit Manager"
umgr.add_pilots(pilot)
output_data_list = []
mylist = []
for i in range(1,pdesc.cores+1):
output_data_list.append('mandel_%d.gif' % i)
# -------- BEGIN USER DEFINED CU DESCRIPTION --------- #
cudesc = radical.pilot.ComputeUnitDescription()
cudesc.environment = {"mandelx": "%d" % imgX, "mandely": "%d" % imgY, "xBeg": "%d" % xBeg,
"xEnd": "%d" % xEnd, "yBeg": "%d" % yBeg, "yEnd": "%d" % yEnd, "cores": "%d" % pdesc.cores, "iter": "%d" % i }
cudesc.executable = "python"
cudesc.arguments = ['mandel_lines.py','$mandelx','$mandely','$xBeg','$xEnd','$yBeg','$yEnd','$cores','$iter']
cudesc.input_data = ['mandel_lines.py']
cudesc.output_data = output_data_list[i-1]
mylist.append(cudesc)
# -------- END USER DEFINED CU DESCRIPTION --------- #
print 'Submitting the CU to the Unit Manager...'
mylist_units = umgr.submit_units(mylist)
# wait for all units to finish
umgr.wait_units()
print "All Compute Units completed successfully! Now.."
# stitch together the final image
fullimage = Image.new("RGB", (xEnd-xBeg, yEnd-yBeg))
print "Stitching together the whole fractal to : mandelbrot_full.gif"
for i in range(1,pdesc.cores+1):
partimage = Image.open('mandel_%d.gif' % i)
box_top = (xBeg, int((yEnd*(i-1))/pdesc.cores), xEnd ,int((yEnd*(i+1))/pdesc.cores))
mandel_part = partimage.crop(box_top)
fullimage.paste(mandel_part, box_top)
fullimage.save("mandelbrot_full.gif", "GIF")
print 'Images is now saved at the working directory..'
session.close()
print "Session closed, exiting now ..."
sys.exit(0)
except Exception as e:
print "AN ERROR OCCURRED: %s" % ((str(e)))
return(-1)
#------------------------------------------------------------------------------
#
if __name__ == "__main__":
sys.exit(main())
#
#------------------------------------------------------------------------------
| mit | -8,474,553,532,810,824,000 | 32.635071 | 125 | 0.649288 | false |
nexiles/odoo | addons/stock/procurement.py | 227 | 22183 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round
from openerp import SUPERUSER_ID
from dateutil.relativedelta import relativedelta
from datetime import datetime
from psycopg2 import OperationalError
import openerp
class procurement_group(osv.osv):
_inherit = 'procurement.group'
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner')
}
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
result = super(procurement_rule, self)._get_action(cr, uid, context=context)
return result + [('move', _('Move From Another Location'))]
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids):
res += [x.id for x in route.pull_ids]
return res
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'),
'location_src_id': fields.many2one('stock.location', 'Source Location',
help="Source location is action=move"),
'route_id': fields.many2one('stock.location.route', 'Route',
help="If route_id is False, the rule is global"),
'procure_method': fields.selection([('make_to_stock', 'Take From Stock'), ('make_to_order', 'Create Procurement')], 'Move Supply Method', required=True,
help="""Determines the procurement method of the stock move that will be generated: whether it will need to 'take from the available stock' in its source location or needs to ignore its stock and create a procurement over there."""),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'procurement.rule': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type',
help="Picking Type determines the way the picking should be shown in the view, reports, ..."),
'delay': fields.integer('Number of Days'),
'partner_address_id': fields.many2one('res.partner', 'Partner Address'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too'),
'warehouse_id': fields.many2one('stock.warehouse', 'Served Warehouse', help='The warehouse this rule is for'),
'propagate_warehouse_id': fields.many2one('stock.warehouse', 'Warehouse to Propagate', help="The warehouse to propagate on the created move/procurement, which can be different of the warehouse this rule is for (e.g for resupplying rules from another warehouse)"),
}
_defaults = {
'procure_method': 'make_to_stock',
'propagate': True,
'delay': 0,
}
class procurement_order(osv.osv):
_inherit = "procurement.order"
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'), # not required because task may create procurements that aren't linked to a location with sale_service
'partner_dest_id': fields.many2one('res.partner', 'Customer Address', help="In case of dropshipping, we need to know the destination address more precisely"),
'move_ids': fields.one2many('stock.move', 'procurement_id', 'Moves', help="Moves created by the procurement"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Move which caused (created) the procurement"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_procurement', 'procurement_id', 'route_id', 'Preferred Routes', help="Preferred route to be followed by the procurement order. Usually copied from the generating document (SO) but could be set up manually."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Warehouse to consider for the route selection"),
'orderpoint_id': fields.many2one('stock.warehouse.orderpoint', 'Minimum Stock Rule'),
}
def propagate_cancel(self, cr, uid, procurement, context=None):
if procurement.rule_id.action == 'move' and procurement.move_ids:
self.pool.get('stock.move').action_cancel(cr, uid, [m.id for m in procurement.move_ids], context=context)
def cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context)
ctx = context.copy()
#set the context for the propagation of the procurement cancelation
ctx['cancel_procurement'] = True
for procurement in self.browse(cr, uid, to_cancel_ids, context=ctx):
self.propagate_cancel(cr, uid, procurement, context=ctx)
return super(procurement_order, self).cancel(cr, uid, to_cancel_ids, context=ctx)
def _find_parent_locations(self, cr, uid, procurement, context=None):
location = procurement.location_id
res = [location.id]
while location.location_id:
location = location.location_id
res.append(location.id)
return res
def change_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
return {'value': {'location_id': warehouse.lot_stock_id.id}}
return {}
def _search_suitable_rule(self, cr, uid, procurement, domain, context=None):
'''we try to first find a rule among the ones defined on the procurement order group and if none is found, we try on the routes defined for the product, and finally we fallback on the default behavior'''
pull_obj = self.pool.get('procurement.rule')
warehouse_route_ids = []
if procurement.warehouse_id:
domain += ['|', ('warehouse_id', '=', procurement.warehouse_id.id), ('warehouse_id', '=', False)]
warehouse_route_ids = [x.id for x in procurement.warehouse_id.route_ids]
product_route_ids = [x.id for x in procurement.product_id.route_ids + procurement.product_id.categ_id.total_route_ids]
procurement_route_ids = [x.id for x in procurement.route_ids]
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', procurement_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', product_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = warehouse_route_ids and pull_obj.search(cr, uid, domain + [('route_id', 'in', warehouse_route_ids)], order='route_sequence, sequence', context=context) or []
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
return res
def _find_suitable_rule(self, cr, uid, procurement, context=None):
rule_id = super(procurement_order, self)._find_suitable_rule(cr, uid, procurement, context=context)
if not rule_id:
#a rule defined on 'Stock' is suitable for a procurement in 'Stock\Bin A'
all_parent_location_ids = self._find_parent_locations(cr, uid, procurement, context=context)
rule_id = self._search_suitable_rule(cr, uid, procurement, [('location_id', 'in', all_parent_location_ids)], context=context)
rule_id = rule_id and rule_id[0] or False
return rule_id
def _run_move_create(self, cr, uid, procurement, context=None):
''' Returns a dictionary of values that will be used to create a stock move from a procurement.
This function assumes that the given procurement has a rule (action == 'move') set on it.
:param procurement: browse record
:rtype: dictionary
'''
newdate = (datetime.strptime(procurement.date_planned, '%Y-%m-%d %H:%M:%S') - relativedelta(days=procurement.rule_id.delay or 0)).strftime('%Y-%m-%d %H:%M:%S')
group_id = False
if procurement.rule_id.group_propagation_option == 'propagate':
group_id = procurement.group_id and procurement.group_id.id or False
elif procurement.rule_id.group_propagation_option == 'fixed':
group_id = procurement.rule_id.group_id and procurement.rule_id.group_id.id or False
#it is possible that we've already got some move done, so check for the done qty and create
#a new move with the correct qty
already_done_qty = 0
already_done_qty_uos = 0
for move in procurement.move_ids:
already_done_qty += move.product_uom_qty if move.state == 'done' else 0
already_done_qty_uos += move.product_uos_qty if move.state == 'done' else 0
qty_left = max(procurement.product_qty - already_done_qty, 0)
qty_uos_left = max(procurement.product_uos_qty - already_done_qty_uos, 0)
vals = {
'name': procurement.name,
'company_id': procurement.rule_id.company_id.id or procurement.rule_id.location_src_id.company_id.id or procurement.rule_id.location_id.company_id.id or procurement.company_id.id,
'product_id': procurement.product_id.id,
'product_uom': procurement.product_uom.id,
'product_uom_qty': qty_left,
'product_uos_qty': (procurement.product_uos and qty_uos_left) or qty_left,
'product_uos': (procurement.product_uos and procurement.product_uos.id) or procurement.product_uom.id,
'partner_id': procurement.rule_id.partner_address_id.id or (procurement.group_id and procurement.group_id.partner_id.id) or False,
'location_id': procurement.rule_id.location_src_id.id,
'location_dest_id': procurement.location_id.id,
'move_dest_id': procurement.move_dest_id and procurement.move_dest_id.id or False,
'procurement_id': procurement.id,
'rule_id': procurement.rule_id.id,
'procure_method': procurement.rule_id.procure_method,
'origin': procurement.origin,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in procurement.route_ids],
'warehouse_id': procurement.rule_id.propagate_warehouse_id.id or procurement.rule_id.warehouse_id.id,
'date': newdate,
'date_expected': newdate,
'propagate': procurement.rule_id.propagate,
'priority': procurement.priority,
}
return vals
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'move':
if not procurement.rule_id.location_src_id:
self.message_post(cr, uid, [procurement.id], body=_('No source location defined!'), context=context)
return False
move_obj = self.pool.get('stock.move')
move_dict = self._run_move_create(cr, uid, procurement, context=context)
#create the move as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)
move_obj.create(cr, SUPERUSER_ID, move_dict, context=context)
return True
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def run(self, cr, uid, ids, autocommit=False, context=None):
new_ids = [x.id for x in self.browse(cr, uid, ids, context=context) if x.state not in ('running', 'done', 'cancel')]
res = super(procurement_order, self).run(cr, uid, new_ids, autocommit=autocommit, context=context)
#after all the procurements are run, check if some created a draft stock move that needs to be confirmed
#(we do that in batch because it fasts the picking assignation and the picking state computation)
move_to_confirm_ids = []
for procurement in self.browse(cr, uid, new_ids, context=context):
if procurement.state == "running" and procurement.rule_id and procurement.rule_id.action == "move":
move_to_confirm_ids += [m.id for m in procurement.move_ids if m.state == 'draft']
if move_to_confirm_ids:
self.pool.get('stock.move').action_confirm(cr, uid, move_to_confirm_ids, context=context)
return res
def _check(self, cr, uid, procurement, context=None):
''' Implement the procurement checking for rules of type 'move'. The procurement will be satisfied only if all related
moves are done/cancel and if the requested quantity is moved.
'''
if procurement.rule_id and procurement.rule_id.action == 'move':
uom_obj = self.pool.get('product.uom')
# In case Phantom BoM splits only into procurements
if not procurement.move_ids:
return True
cancel_test_list = [x.state == 'cancel' for x in procurement.move_ids]
done_cancel_test_list = [x.state in ('done', 'cancel') for x in procurement.move_ids]
at_least_one_cancel = any(cancel_test_list)
all_done_or_cancel = all(done_cancel_test_list)
all_cancel = all(cancel_test_list)
if not all_done_or_cancel:
return False
elif all_done_or_cancel and not all_cancel:
return True
elif all_cancel:
self.message_post(cr, uid, [procurement.id], body=_('All stock moves have been cancelled for this procurement.'), context=context)
self.write(cr, uid, [procurement.id], {'state': 'cancel'}, context=context)
return False
return super(procurement_order, self)._check(cr, uid, procurement, context)
def do_view_pickings(self, cr, uid, ids, context=None):
'''
This function returns an action that display the pickings of the procurements belonging
to the same procurement group of given ids.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'do_view_pickings')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id])
result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]"
return result
def run_scheduler(self, cr, uid, use_new_cursor=False, company_id=False, context=None):
'''
Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules
and the availability of moves. This function is intended to be run for all the companies at the same time, so
we run functions as SUPERUSER to avoid intercompanies and access rights issues.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
super(procurement_order, self).run_scheduler(cr, uid, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
if context is None:
context = {}
try:
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
move_obj = self.pool.get('stock.move')
#Minimum stock rules
self._procure_orderpoint_confirm(cr, SUPERUSER_ID, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
#Search all confirmed stock_moves and try to assign them
confirmed_ids = move_obj.search(cr, uid, [('state', '=', 'confirmed')], limit=None, order='priority desc, date_expected asc', context=context)
for x in xrange(0, len(confirmed_ids), 100):
move_obj.action_assign(cr, uid, confirmed_ids[x:x + 100], context=context)
if use_new_cursor:
cr.commit()
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
def _get_orderpoint_date_planned(self, cr, uid, orderpoint, start_date, context=None):
date_planned = start_date + relativedelta(days=orderpoint.product_id.seller_delay or 0.0)
return date_planned.strftime(DEFAULT_SERVER_DATE_FORMAT)
def _prepare_orderpoint_procurement(self, cr, uid, orderpoint, product_qty, context=None):
return {
'name': orderpoint.name,
'date_planned': self._get_orderpoint_date_planned(cr, uid, orderpoint, datetime.today(), context=context),
'product_id': orderpoint.product_id.id,
'product_qty': product_qty,
'company_id': orderpoint.company_id.id,
'product_uom': orderpoint.product_uom.id,
'location_id': orderpoint.location_id.id,
'origin': orderpoint.name,
'warehouse_id': orderpoint.warehouse_id.id,
'orderpoint_id': orderpoint.id,
'group_id': orderpoint.group_id.id,
}
def _product_virtual_get(self, cr, uid, order_point):
product_obj = self.pool.get('product.product')
return product_obj._product_available(cr, uid,
[order_point.product_id.id],
context={'location': order_point.location_id.id})[order_point.product_id.id]['virtual_available']
def _procure_orderpoint_confirm(self, cr, uid, use_new_cursor=False, company_id = False, context=None):
'''
Create procurement based on Orderpoint
:param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
'''
if context is None:
context = {}
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
orderpoint_obj = self.pool.get('stock.warehouse.orderpoint')
procurement_obj = self.pool.get('procurement.order')
dom = company_id and [('company_id', '=', company_id)] or []
orderpoint_ids = orderpoint_obj.search(cr, uid, dom)
prev_ids = []
while orderpoint_ids:
ids = orderpoint_ids[:100]
del orderpoint_ids[:100]
for op in orderpoint_obj.browse(cr, uid, ids, context=context):
try:
prods = self._product_virtual_get(cr, uid, op)
if prods is None:
continue
if float_compare(prods, op.product_min_qty, precision_rounding=op.product_uom.rounding) < 0:
qty = max(op.product_min_qty, op.product_max_qty) - prods
reste = op.qty_multiple > 0 and qty % op.qty_multiple or 0.0
if float_compare(reste, 0.0, precision_rounding=op.product_uom.rounding) > 0:
qty += op.qty_multiple - reste
if float_compare(qty, 0.0, precision_rounding=op.product_uom.rounding) <= 0:
continue
qty -= orderpoint_obj.subtract_procurements(cr, uid, op, context=context)
qty_rounded = float_round(qty, precision_rounding=op.product_uom.rounding)
if qty_rounded > 0:
proc_id = procurement_obj.create(cr, uid,
self._prepare_orderpoint_procurement(cr, uid, op, qty_rounded, context=context),
context=context)
self.check(cr, uid, [proc_id])
self.run(cr, uid, [proc_id])
if use_new_cursor:
cr.commit()
except OperationalError:
if use_new_cursor:
orderpoint_ids.append(op.id)
cr.rollback()
continue
else:
raise
if use_new_cursor:
cr.commit()
if prev_ids == ids:
break
else:
prev_ids = ids
if use_new_cursor:
cr.commit()
cr.close()
return {}
| agpl-3.0 | 4,480,869,196,577,223,000 | 55.589286 | 292 | 0.616328 | false |
mailboxly/po | pymongo/auth.py | 23 | 15619 | # Copyright 2013-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authentication helpers."""
import hmac
HAVE_KERBEROS = True
try:
import kerberos
except ImportError:
HAVE_KERBEROS = False
from base64 import standard_b64decode, standard_b64encode
from collections import namedtuple
from hashlib import md5, sha1
from random import SystemRandom
from bson.binary import Binary
from bson.py3compat import b, string_type, _unicode, PY3
from bson.son import SON
from pymongo.errors import ConfigurationError, OperationFailure
MECHANISMS = frozenset(
['GSSAPI', 'MONGODB-CR', 'MONGODB-X509', 'PLAIN', 'SCRAM-SHA-1', 'DEFAULT'])
"""The authentication mechanisms supported by PyMongo."""
MongoCredential = namedtuple(
'MongoCredential',
['mechanism', 'source', 'username', 'password', 'mechanism_properties'])
"""A hashable namedtuple of values used for authentication."""
GSSAPIProperties = namedtuple('GSSAPIProperties', ['service_name'])
"""Mechanism properties for GSSAPI authentication."""
def _build_credentials_tuple(mech, source, user, passwd, extra):
"""Build and return a mechanism specific credentials tuple.
"""
user = _unicode(user)
if mech == 'GSSAPI':
properties = extra.get('authmechanismproperties', {})
service_name = properties.get('SERVICE_NAME', 'mongodb')
props = GSSAPIProperties(service_name=service_name)
# No password, source is always $external.
return MongoCredential(mech, '$external', user, None, props)
elif mech == 'MONGODB-X509':
return MongoCredential(mech, '$external', user, None, None)
else:
if passwd is None:
raise ConfigurationError("A password is required.")
return MongoCredential(mech, source, user, _unicode(passwd), None)
if PY3:
def _xor(fir, sec):
"""XOR two byte strings together (python 3.x)."""
return b"".join([bytes([x ^ y]) for x, y in zip(fir, sec)])
_from_bytes = int.from_bytes
_to_bytes = int.to_bytes
else:
from binascii import (hexlify as _hexlify,
unhexlify as _unhexlify)
def _xor(fir, sec):
"""XOR two byte strings together (python 2.x)."""
return b"".join([chr(ord(x) ^ ord(y)) for x, y in zip(fir, sec)])
def _from_bytes(value, dummy, int=int, _hexlify=_hexlify):
"""An implementation of int.from_bytes for python 2.x."""
return int(_hexlify(value), 16)
def _to_bytes(value, dummy0, dummy1, _unhexlify=_unhexlify):
"""An implementation of int.to_bytes for python 2.x."""
return _unhexlify('%040x' % value)
try:
# The fastest option, if it's been compiled to use OpenSSL's HMAC.
from backports.pbkdf2 import pbkdf2_hmac
def _hi(data, salt, iterations):
return pbkdf2_hmac('sha1', data, salt, iterations)
except ImportError:
try:
# Python 2.7.8+, or Python 3.4+.
from hashlib import pbkdf2_hmac
def _hi(data, salt, iterations):
return pbkdf2_hmac('sha1', data, salt, iterations)
except ImportError:
def _hi(data, salt, iterations):
"""A simple implementation of PBKDF2."""
mac = hmac.HMAC(data, None, sha1)
def _digest(msg, mac=mac):
"""Get a digest for msg."""
_mac = mac.copy()
_mac.update(msg)
return _mac.digest()
from_bytes = _from_bytes
to_bytes = _to_bytes
_u1 = _digest(salt + b'\x00\x00\x00\x01')
_ui = from_bytes(_u1, 'big')
for _ in range(iterations - 1):
_u1 = _digest(_u1)
_ui ^= from_bytes(_u1, 'big')
return to_bytes(_ui, 20, 'big')
try:
from hmac import compare_digest
except ImportError:
if PY3:
def _xor_bytes(a, b):
return a ^ b
else:
def _xor_bytes(a, b, _ord=ord):
return _ord(a) ^ _ord(b)
# Python 2.x < 2.7.7 and Python 3.x < 3.3
# References:
# - http://bugs.python.org/issue14532
# - http://bugs.python.org/issue14955
# - http://bugs.python.org/issue15061
def compare_digest(a, b, _xor_bytes=_xor_bytes):
left = None
right = b
if len(a) == len(b):
left = a
result = 0
if len(a) != len(b):
left = b
result = 1
for x, y in zip(left, right):
result |= _xor_bytes(x, y)
return result == 0
def _parse_scram_response(response):
"""Split a scram response into key, value pairs."""
return dict(item.split(b"=", 1) for item in response.split(b","))
def _authenticate_scram_sha1(credentials, sock_info):
"""Authenticate using SCRAM-SHA-1."""
username = credentials.username
password = credentials.password
source = credentials.source
# Make local
_hmac = hmac.HMAC
_sha1 = sha1
user = username.encode("utf-8").replace(b"=", b"=3D").replace(b",", b"=2C")
nonce = standard_b64encode(
(("%s" % (SystemRandom().random(),))[2:]).encode("utf-8"))
first_bare = b"n=" + user + b",r=" + nonce
cmd = SON([('saslStart', 1),
('mechanism', 'SCRAM-SHA-1'),
('payload', Binary(b"n,," + first_bare)),
('autoAuthorize', 1)])
res = sock_info.command(source, cmd)
server_first = res['payload']
parsed = _parse_scram_response(server_first)
iterations = int(parsed[b'i'])
salt = parsed[b's']
rnonce = parsed[b'r']
if not rnonce.startswith(nonce):
raise OperationFailure("Server returned an invalid nonce.")
without_proof = b"c=biws,r=" + rnonce
salted_pass = _hi(_password_digest(username, password).encode("utf-8"),
standard_b64decode(salt),
iterations)
client_key = _hmac(salted_pass, b"Client Key", _sha1).digest()
stored_key = _sha1(client_key).digest()
auth_msg = b",".join((first_bare, server_first, without_proof))
client_sig = _hmac(stored_key, auth_msg, _sha1).digest()
client_proof = b"p=" + standard_b64encode(_xor(client_key, client_sig))
client_final = b",".join((without_proof, client_proof))
server_key = _hmac(salted_pass, b"Server Key", _sha1).digest()
server_sig = standard_b64encode(
_hmac(server_key, auth_msg, _sha1).digest())
cmd = SON([('saslContinue', 1),
('conversationId', res['conversationId']),
('payload', Binary(client_final))])
res = sock_info.command(source, cmd)
parsed = _parse_scram_response(res['payload'])
if not compare_digest(parsed[b'v'], server_sig):
raise OperationFailure("Server returned an invalid signature.")
# Depending on how it's configured, Cyrus SASL (which the server uses)
# requires a third empty challenge.
if not res['done']:
cmd = SON([('saslContinue', 1),
('conversationId', res['conversationId']),
('payload', Binary(b''))])
res = sock_info.command(source, cmd)
if not res['done']:
raise OperationFailure('SASL conversation failed to complete.')
def _password_digest(username, password):
"""Get a password digest to use for authentication.
"""
if not isinstance(password, string_type):
raise TypeError("password must be an "
"instance of %s" % (string_type.__name__,))
if len(password) == 0:
raise ValueError("password can't be empty")
if not isinstance(username, string_type):
raise TypeError("password must be an "
"instance of %s" % (string_type.__name__,))
md5hash = md5()
data = "%s:mongo:%s" % (username, password)
md5hash.update(data.encode('utf-8'))
return _unicode(md5hash.hexdigest())
def _auth_key(nonce, username, password):
"""Get an auth key to use for authentication.
"""
digest = _password_digest(username, password)
md5hash = md5()
data = "%s%s%s" % (nonce, username, digest)
md5hash.update(data.encode('utf-8'))
return _unicode(md5hash.hexdigest())
def _authenticate_gssapi(credentials, sock_info):
"""Authenticate using GSSAPI.
"""
if not HAVE_KERBEROS:
raise ConfigurationError('The "kerberos" module must be '
'installed to use GSSAPI authentication.')
try:
username = credentials.username
gsn = credentials.mechanism_properties.service_name
# Starting here and continuing through the while loop below - establish
# the security context. See RFC 4752, Section 3.1, first paragraph.
host = sock_info.address[0]
result, ctx = kerberos.authGSSClientInit(
gsn + '@' + host, gssflags=kerberos.GSS_C_MUTUAL_FLAG)
if result != kerberos.AUTH_GSS_COMPLETE:
raise OperationFailure('Kerberos context failed to initialize.')
try:
# pykerberos uses a weird mix of exceptions and return values
# to indicate errors.
# 0 == continue, 1 == complete, -1 == error
# Only authGSSClientStep can return 0.
if kerberos.authGSSClientStep(ctx, '') != 0:
raise OperationFailure('Unknown kerberos '
'failure in step function.')
# Start a SASL conversation with mongod/s
# Note: pykerberos deals with base64 encoded byte strings.
# Since mongo accepts base64 strings as the payload we don't
# have to use bson.binary.Binary.
payload = kerberos.authGSSClientResponse(ctx)
cmd = SON([('saslStart', 1),
('mechanism', 'GSSAPI'),
('payload', payload),
('autoAuthorize', 1)])
response = sock_info.command('$external', cmd)
# Limit how many times we loop to catch protocol / library issues
for _ in range(10):
result = kerberos.authGSSClientStep(ctx,
str(response['payload']))
if result == -1:
raise OperationFailure('Unknown kerberos '
'failure in step function.')
payload = kerberos.authGSSClientResponse(ctx) or ''
cmd = SON([('saslContinue', 1),
('conversationId', response['conversationId']),
('payload', payload)])
response = sock_info.command('$external', cmd)
if result == kerberos.AUTH_GSS_COMPLETE:
break
else:
raise OperationFailure('Kerberos '
'authentication failed to complete.')
# Once the security context is established actually authenticate.
# See RFC 4752, Section 3.1, last two paragraphs.
if kerberos.authGSSClientUnwrap(ctx,
str(response['payload'])) != 1:
raise OperationFailure('Unknown kerberos '
'failure during GSS_Unwrap step.')
if kerberos.authGSSClientWrap(ctx,
kerberos.authGSSClientResponse(ctx),
username) != 1:
raise OperationFailure('Unknown kerberos '
'failure during GSS_Wrap step.')
payload = kerberos.authGSSClientResponse(ctx)
cmd = SON([('saslContinue', 1),
('conversationId', response['conversationId']),
('payload', payload)])
sock_info.command('$external', cmd)
finally:
kerberos.authGSSClientClean(ctx)
except kerberos.KrbError as exc:
raise OperationFailure(str(exc))
def _authenticate_plain(credentials, sock_info):
"""Authenticate using SASL PLAIN (RFC 4616)
"""
source = credentials.source
username = credentials.username
password = credentials.password
payload = ('\x00%s\x00%s' % (username, password)).encode('utf-8')
cmd = SON([('saslStart', 1),
('mechanism', 'PLAIN'),
('payload', Binary(payload)),
('autoAuthorize', 1)])
sock_info.command(source, cmd)
def _authenticate_cram_md5(credentials, sock_info):
"""Authenticate using CRAM-MD5 (RFC 2195)
"""
source = credentials.source
username = credentials.username
password = credentials.password
# The password used as the mac key is the
# same as what we use for MONGODB-CR
passwd = _password_digest(username, password)
cmd = SON([('saslStart', 1),
('mechanism', 'CRAM-MD5'),
('payload', Binary(b'')),
('autoAuthorize', 1)])
response = sock_info.command(source, cmd)
# MD5 as implicit default digest for digestmod is deprecated
# in python 3.4
mac = hmac.HMAC(key=passwd.encode('utf-8'), digestmod=md5)
mac.update(response['payload'])
challenge = username.encode('utf-8') + b' ' + b(mac.hexdigest())
cmd = SON([('saslContinue', 1),
('conversationId', response['conversationId']),
('payload', Binary(challenge))])
sock_info.command(source, cmd)
def _authenticate_x509(credentials, sock_info):
"""Authenticate using MONGODB-X509.
"""
query = SON([('authenticate', 1),
('mechanism', 'MONGODB-X509'),
('user', credentials.username)])
sock_info.command('$external', query)
def _authenticate_mongo_cr(credentials, sock_info):
"""Authenticate using MONGODB-CR.
"""
source = credentials.source
username = credentials.username
password = credentials.password
# Get a nonce
response = sock_info.command(source, {'getnonce': 1})
nonce = response['nonce']
key = _auth_key(nonce, username, password)
# Actually authenticate
query = SON([('authenticate', 1),
('user', username),
('nonce', nonce),
('key', key)])
sock_info.command(source, query)
def _authenticate_default(credentials, sock_info):
if sock_info.max_wire_version >= 3:
return _authenticate_scram_sha1(credentials, sock_info)
else:
return _authenticate_mongo_cr(credentials, sock_info)
_AUTH_MAP = {
'CRAM-MD5': _authenticate_cram_md5,
'GSSAPI': _authenticate_gssapi,
'MONGODB-CR': _authenticate_mongo_cr,
'MONGODB-X509': _authenticate_x509,
'PLAIN': _authenticate_plain,
'SCRAM-SHA-1': _authenticate_scram_sha1,
'DEFAULT': _authenticate_default,
}
def authenticate(credentials, sock_info):
"""Authenticate sock_info."""
mechanism = credentials.mechanism
auth_func = _AUTH_MAP.get(mechanism)
auth_func(credentials, sock_info)
def logout(source, sock_info):
"""Log out from a database."""
sock_info.command(source, {'logout': 1})
| agpl-3.0 | -7,128,809,727,734,575,000 | 34.417234 | 80 | 0.58941 | false |
LeZhang2016/openthread | tests/scripts/thread-cert/ipv6.py | 11 | 35649 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import abc
import io
import struct
import sys
from binascii import hexlify
from ipaddress import ip_address
try:
from itertools import izip_longest as zip_longest
except ImportError:
from itertools import zip_longest
# Next headers for IPv6 protocols
IPV6_NEXT_HEADER_HOP_BY_HOP = 0
IPV6_NEXT_HEADER_TCP = 6
IPV6_NEXT_HEADER_UDP = 17
IPV6_NEXT_HEADER_ICMP = 58
UPPER_LAYER_PROTOCOLS = [
IPV6_NEXT_HEADER_TCP,
IPV6_NEXT_HEADER_UDP,
IPV6_NEXT_HEADER_ICMP,
]
# ICMP Protocol codes
ICMP_DESTINATION_UNREACHABLE = 1
ICMP_ECHO_REQUEST = 128
ICMP_ECHO_RESPONSE = 129
# Default hop limit for IPv6
HOP_LIMIT_DEFAULT = 64
def calculate_checksum(data):
""" Calculate checksum from data bytes.
How to calculate checksum (RFC 2460):
https://tools.ietf.org/html/rfc2460#page-27
Args:
data (bytes): input data from which checksum will be calculated
Returns:
int: calculated checksum
"""
# Create halfwords from data bytes. Example: data[0] = 0x01, data[1] = 0xb2 => 0x01b2
halfwords = [((byte0 << 8) | byte1) for byte0, byte1 in zip_longest(data[::2], data[1::2], fillvalue=0x00)]
checksum = 0
for halfword in halfwords:
checksum += halfword
checksum = (checksum & 0xFFFF) + (checksum >> 16)
checksum ^= 0xFFFF
if checksum == 0:
return 0xFFFF
else:
return checksum
class PacketFactory(object):
""" Interface for classes that produce objects from data. """
def parse(self, data, message_info):
""" Convert data to object.
Args:
data (BytesIO)
message_info (MessageInfo)
"""
raise NotImplementedError
class BuildableFromBytes(object):
""" Interface for classes which can be built from bytes. """
@classmethod
def from_bytes(cls, data):
""" Convert data to object.
Args:
data (bytes)
"""
raise NotImplementedError
class ConvertibleToBytes(object):
""" Interface for classes which can be converted to bytes. """
def to_bytes(self):
""" Convert object to data.
Returns:
bytes
"""
raise NotImplementedError
def __len__(self):
""" Length of data (in bytes).
Returns:
int
"""
raise NotImplementedError
class Header(object):
""" Interface for header classes. """
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def type(self):
""" Number which can be used in the next header field in IPv6 header or next headers.
Returns:
int
"""
class ExtensionHeader(object):
""" Base for classes representing Extension Headers in IPv6 packets. """
def __init__(self, next_header, hdr_ext_len=0):
self.next_header = next_header
self.hdr_ext_len = hdr_ext_len
class UpperLayerProtocol(Header, ConvertibleToBytes):
""" Base for classes representing upper layer protocol payload in IPv6 packets. """
def __init__(self, header):
self.header = header
@property
def checksum(self):
""" Return checksum from upper layer protocol header. """
return self.header.checksum
@checksum.setter
def checksum(self, value):
""" Set checksum value in upper layer protocol header. """
self.header.checksum = value
def is_valid_checksum(self):
""" Return information if set checksum is valid.
It is not possible to get zero from checksum calculation.
Zero indicates invalid checksum value.
Returns:
bool
"""
return self.checksum != 0
class IPv6PseudoHeader(ConvertibleToBytes):
""" Class representing IPv6 pseudo header which is required to calculate
upper layer protocol (like e.g. UDP or ICMPv6) checksum.
This class is used only during upper layer protocol checksum calculation. Do not use it outside of this module.
"""
def __init__(self, source_address, destination_address, payload_length, next_header):
self._source_address = self._convert_to_ipaddress(source_address)
self._destination_address = self._convert_to_ipaddress(destination_address)
self.payload_length = payload_length
self.next_header = next_header
def _convert_to_ipaddress(self, value):
if isinstance(value, bytearray):
value = bytes(value)
elif isinstance(value, str) and sys.version_info[0] == 2:
value = value.decode("utf-8")
return ip_address(value)
@property
def source_address(self):
return self._source_address
@source_address.setter
def source_address(self, value):
self._source_address = self._convert_to_ipaddress(value)
@property
def destination_address(self):
return self._destination_address
@destination_address.setter
def destination_address(self, value):
self._source_address = self._convert_to_ipaddress(value)
def to_bytes(self):
data = bytearray()
data += self.source_address.packed
data += self.destination_address.packed
data += struct.pack(">I", self.payload_length)
data += struct.pack(">I", self.next_header)
return data
class IPv6Header(ConvertibleToBytes, BuildableFromBytes):
""" Class representing IPv6 packet header. """
_version = 6
_header_length = 40
def __init__(self, source_address, destination_address, traffic_class=0, flow_label=0, hop_limit=64,
payload_length=0, next_header=0):
self.version = self._version
self._source_address = self._convert_to_ipaddress(source_address)
self._destination_address = self._convert_to_ipaddress(destination_address)
self.traffic_class = traffic_class
self.flow_label = flow_label
self.hop_limit = hop_limit
self.payload_length = payload_length
self.next_header = next_header
def _convert_to_ipaddress(self, value):
if isinstance(value, bytearray):
value = bytes(value)
elif isinstance(value, str) and sys.version_info[0] == 2:
value = value.decode("utf-8")
return ip_address(value)
@property
def source_address(self):
return self._source_address
@source_address.setter
def source_address(self, value):
self._source_address = self._convert_to_ipaddress(value)
@property
def destination_address(self):
return self._destination_address
def to_bytes(self):
data = bytearray([
((self.version & 0x0F) << 4) | ((self.traffic_class >> 4) & 0x0F),
((self.traffic_class & 0x0F) << 4) | ((self.flow_label >> 16) & 0x0F),
((self.flow_label >> 8) & 0xFF),
((self.flow_label & 0xFF))
])
data += struct.pack(">H", self.payload_length)
data += bytearray([self.next_header, self.hop_limit])
data += self.source_address.packed
data += self.destination_address.packed
return data
@classmethod
def from_bytes(cls, data):
b = bytearray(data.read(4))
version = (b[0] >> 4) & 0x0F
traffic_class = ((b[0] & 0x0F) << 4) | ((b[1] >> 4) & 0x0F)
flow_label = ((b[1] & 0x0F) << 16) | (b[2] << 8) | b[3]
payload_length = struct.unpack(">H", data.read(2))[0]
next_header = ord(data.read(1))
hop_limit = ord(data.read(1))
src_addr = bytearray(data.read(16))
dst_addr = bytearray(data.read(16))
return cls(src_addr,
dst_addr,
traffic_class,
flow_label,
hop_limit,
payload_length,
next_header)
def __repr__(self):
return "IPv6Header(source_address={}, destination_address={}, next_header={}, payload_length={}, \
hop_limit={}, traffic_class={}, flow_label={})".format(self.source_address.compressed,
self.destination_address.compressed,
self.next_header,
self.payload_length,
self.hop_limit,
self.traffic_class,
self.flow_label)
def __len__(self):
return self._header_length
class IPv6Packet(ConvertibleToBytes):
""" Class representing IPv6 packet.
IPv6 packet consists of IPv6 header, optional extension header, and upper layer protocol.
IPv6 packet
+-------------+----------------------------------+----------------------------------------------+
| | | |
| IPv6 header | extension headers (zero or more) | upper layer protocol (e.g. UDP, TCP, ICMPv6) |
| | | |
+-------------+----------------------------------+----------------------------------------------+
Extension headers:
- HopByHop
- Routing header (not implemented in this module)
Upper layer protocols:
- ICMPv6
- UDP
- TCP (not implemented in this module)
Example:
IPv6 packet construction without extension headers:
ipv6_packet = IPv6Packet(IPv6Header("fd00:1234:4555::ff:fe00:1800", "ff03::1"),
ICMPv6(ICMPv6Header(128, 0),
ICMPv6EchoBody(0, 2, bytes([0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01,
0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
0x41, 0x41]))))
IPv6 packet construction with extension headers:
ipv6_packet = IPv6Packet(IPv6Header("fd00:1234:4555::ff:fe00:1800", "ff03::1"),
ICMPv6(ICMPv6Header(128, 0),
ICMPv6EchoBody(0, 2, bytes([0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01,
0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
0x41, 0x41])),
[HopByHop(options=[
HopByHopOption(HopByHopOptionHeader(_type=0x6d),
MPLOption(S=1, M=0, V=0, sequence=2, seed_id=bytes([0x00, 0x18])))
])])
"""
def __init__(self, ipv6_header, upper_layer_protocol, extension_headers=None):
self.ipv6_header = ipv6_header
self.upper_layer_protocol = upper_layer_protocol
self.extension_headers = extension_headers if extension_headers is not None else []
self._update_next_header_values_in_headers()
if not upper_layer_protocol.is_valid_checksum():
self.upper_layer_protocol.checksum = self.calculate_checksum()
def _validate_checksum(self):
checksum = self.calculate_checksum()
if self.upper_layer_protocol.checksum != checksum:
raise RuntimeError("Could not create IPv6 packet. "
"Invalid checksum: {}!={}".format(self.upper_layer_protocol.checksum, checksum))
self.upper_layer_protocol.checksum = checksum
def _update_payload_length_value_in_ipv6_header(self):
self.ipv6_header.payload_length = len(self.upper_layer_protocol) + \
sum([len(extension_header) for extension_header in self.extension_headers])
def _update_next_header_values_in_headers(self):
last_header = self.ipv6_header
for extension_header in self.extension_headers:
last_header.next_header = extension_header.type
last_header = extension_header
last_header.next_header = self.upper_layer_protocol.type
def calculate_checksum(self):
saved_checksum = self.upper_layer_protocol.checksum
self.upper_layer_protocol.checksum = 0
upper_layer_protocol_bytes = self.upper_layer_protocol.to_bytes()
self.upper_layer_protocol.checksum = saved_checksum
pseudo_header = IPv6PseudoHeader(self.ipv6_header.source_address,
self.ipv6_header.destination_address,
len(upper_layer_protocol_bytes),
self.upper_layer_protocol.type)
return calculate_checksum(pseudo_header.to_bytes() + upper_layer_protocol_bytes)
def to_bytes(self):
self._update_payload_length_value_in_ipv6_header()
self._update_next_header_values_in_headers()
self.upper_layer_protocol.checksum = self.calculate_checksum()
ipv6_packet = self.ipv6_header.to_bytes()
for extension_header in self.extension_headers:
ipv6_packet += extension_header.to_bytes()
ipv6_packet += self.upper_layer_protocol.to_bytes()
return ipv6_packet
def __repr__(self):
return "IPv6Packet(header={}, upper_layer_protocol={})".format(self.ipv6_header, self.upper_layer_protocol)
class UDPHeader(ConvertibleToBytes, BuildableFromBytes):
""" Class representing UDP datagram header.
This header is required to construct UDP datagram.
"""
_header_length = 8
def __init__(self, src_port, dst_port, payload_length=0, checksum=0):
self.src_port = src_port
self.dst_port = dst_port
self._payload_length = payload_length
self.checksum = checksum
@property
def type(self):
return 17
@property
def payload_length(self):
return self._payload_length
@payload_length.setter
def payload_length(self, value):
self._payload_length = self._header_length + value
def to_bytes(self):
data = struct.pack(">H", self.src_port)
data += struct.pack(">H", self.dst_port)
data += struct.pack(">H", self.payload_length)
data += struct.pack(">H", self.checksum)
return data
@classmethod
def from_bytes(cls, data):
src_port = struct.unpack(">H", data.read(2))[0]
dst_port = struct.unpack(">H", data.read(2))[0]
payload_length = struct.unpack(">H", data.read(2))[0]
checksum = struct.unpack(">H", data.read(2))[0]
return cls(src_port, dst_port, payload_length, checksum)
def __len__(self):
return self._header_length
class UDPDatagram(UpperLayerProtocol):
""" Class representing UDP datagram.
UDP is an upper layer protocol for IPv6 so it can be passed to IPv6 packet as upper_layer_protocol.
This class consists of a UDP header and payload. The example below shows how a UDP datagram can be constructed.
Example:
udp_dgram = UDPDatagram(UDPHeader(src_port=19788, dst_port=19788),
bytes([0x00, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0x09, 0x01, 0x01, 0x0b, 0x03,
0x04, 0xc6, 0x69, 0x73, 0x51, 0x0e, 0x01, 0x80,
0x12, 0x02, 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef]))
"""
@property
def type(self):
return 17
def __init__(self, header, payload):
super(UDPDatagram, self).__init__(header)
self.payload = payload
def to_bytes(self):
self.header.payload_length = len(self.payload)
data = bytearray()
data += self.header.to_bytes()
data += self.payload.to_bytes()
return data
def __len__(self):
return len(self.header) + len(self.payload)
class ICMPv6Header(ConvertibleToBytes, BuildableFromBytes):
""" Class representing ICMPv6 message header.
This header is required to construct ICMPv6 message.
"""
_header_length = 4
def __init__(self, _type, code, checksum=0):
self.type = _type
self.code = code
self.checksum = checksum
def to_bytes(self):
return bytearray([self.type, self.code]) + struct.pack(">H", self.checksum)
@classmethod
def from_bytes(cls, data):
_type = ord(data.read(1))
code = ord(data.read(1))
checksum = struct.unpack(">H", data.read(2))[0]
return cls(_type, code, checksum)
def __len__(self):
return self._header_length
class ICMPv6(UpperLayerProtocol):
""" Class representing ICMPv6 message.
ICMPv6 is an upper layer protocol for IPv6 so it can be passed to IPv6 packet as upper_layer_protocol.
This class consists of an ICMPv6 header and body. The example below shows how an ICMPv6 message can be constructed.
Example:
icmpv6_msg = ICMPv6(ICMPv6Header(128, 0),
ICMPv6EchoBody(0, 2, bytes([0x80, 0x00, 0xc7, 0xbf, 0x00, 0x00, 0x00, 0x01,
0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
0x41, 0x41])))
"""
@property
def type(self):
return 58
def __init__(self, header, body):
super(ICMPv6, self).__init__(header)
self.body = body
def to_bytes(self):
return bytearray(self.header.to_bytes() + self.body.to_bytes())
def __len__(self):
return len(self.header) + len(self.body)
class HopByHop(ExtensionHeader):
""" Class representing HopByHop extension header.
HopByHop extension header consists of:
- next_header type
- extension header length which is multiple of 8
- options
"""
_one_byte_padding = 0x00
_many_bytes_padding = 0x01
@property
def type(self):
return 0
def __init__(self, next_header=None, options=None, hdr_ext_len=None):
super(HopByHop, self).__init__(next_header, hdr_ext_len)
self.options = options if options is not None else []
if hdr_ext_len is not None:
self.hdr_ext_len = hdr_ext_len
else:
payload_length = self._calculate_payload_length()
self.hdr_ext_len = self._calculate_hdr_ext_len(payload_length)
def _calculate_payload_length(self):
payload_length = 2
for option in self.options:
payload_length += len(option)
return payload_length
def _calculate_hdr_ext_len(self, payload_length):
count = payload_length >> 3
if (payload_length & 0x7) == 0 and count > 0:
return count - 1
return count
def to_bytes(self):
data = bytearray([self.next_header, self.hdr_ext_len])
for option in self.options:
data += option.to_bytes()
# Padding
#
# More details:
# https://tools.ietf.org/html/rfc2460#section-4.2
#
excess_bytes = len(data) & 0x7
if excess_bytes > 0:
padding_length = 8 - excess_bytes
if padding_length == 1:
data += bytearray([self._one_byte_padding])
else:
padding_length -= 2
data += bytearray([self._many_bytes_padding, padding_length])
data += bytearray([0x00 for _ in range(padding_length)])
return data
def __len__(self):
""" HopByHop extension header length
More details:
https://tools.ietf.org/html/rfc2460#section-4.3
"""
return (self.hdr_ext_len + 1) * 8
class HopByHopOptionHeader(ConvertibleToBytes, BuildableFromBytes):
""" Class representing HopByHop option header. """
_header_length = 2
def __init__(self, _type, length=None):
self.type = _type
self.length = length if length is not None else 0
def to_bytes(self):
return bytearray([self.type, self.length])
@classmethod
def from_bytes(cls, data):
_type = ord(data.read(1))
length = ord(data.read(1))
return cls(_type, length)
def __len__(self):
return self._header_length
def __repr__(self):
return "HopByHopOptionHeader(type={}, length={})".format(self.type, self.length)
class HopByHopOption(ConvertibleToBytes):
""" Class representing HopByHop option.
Class consists of two elements: HopByHopOptionHeader and value (e.g. for MPLOption).
The following example shows how any HopByHop option can be constructed.
Example:
HopByHop(next_header=0x3a,
options=[HopByHopOption(HopByHopOptionHeader(_type=0x6d),
MPLOption(S=1, M=0, V=0, sequence=2, seed_id=bytes([0x00, 0x18])))
"""
def __init__(self, header, value):
self.value = value
self.header = header
self.header.length = len(self.value)
def to_bytes(self):
return self.header.to_bytes() + self.value.to_bytes()
def __len__(self):
return len(self.header) + len(self.value)
def __repr__(self):
return "HopByHopOption(header={}, value={})".format(self.header, self.value)
class MPLOption(ConvertibleToBytes):
""" Class representing MPL option. """
_header_length = 2
_seed_id_length = {
0: 0,
1: 2,
2: 8,
3: 16
}
def __init__(self, S, M, V, sequence, seed_id):
self.S = S
self.M = M
self.V = V
self.sequence = sequence
self.seed_id = seed_id
def to_bytes(self):
smv = ((self.S & 0x03) << 6) | ((self.M & 0x01) << 5) | ((self.V & 0x01) << 4)
return bytearray([smv, self.sequence]) + self.seed_id
@classmethod
def from_bytes(cls, data):
b = ord(data.read(1))
s = ((b >> 6) & 0x03)
m = ((b >> 5) & 0x01)
v = ((b >> 4) & 0x01)
sequence = ord(data.read(1))
seed_id = data.read(cls._seed_id_length[s])
return cls(s, m, v, sequence, seed_id)
def __len__(self):
return self._header_length + self._seed_id_length[self.S]
def __repr__(self):
return "MPLOption(S={}, M={}, V={}, sequence={}, seed_id={})".format(self.S, self.M, self.V, self.sequence, hexlify(self.seed_id))
class IPv6PacketFactory(PacketFactory):
""" Factory that produces IPv6 packets from data.
This factory must be initialized with factories which allow to parse extension headers and upper layer protocols.
The following example shows preferable setup of IPv6PacketFactory.
Header types:
0: HopByHop
17: UDP
58: ICMPv6
Option types:
109: MPL
ICMPv6 body types:
128: Echo request
129: Echo response
Example usage:
ipv6_factory = IPv6PacketFactory(
ehf={
0: HopByHopFactory(options_factories={
109: MPLOptionFactory()
})
},
ulpf={
17: UDPDatagramFactory(dst_port_factories={
19788: MLEMessageFactory(),
19789: CoAPMessageFactory()
}),
58: ICMPv6Factory(body_factories={
128: ICMPv6EchoBodyFactory(),
129: ICMPv6EchoBodyFactory()
})
}
)
"""
def __init__(self, ehf=None, ulpf=None):
"""
ehf - Extension Header Factory
ulpf - Upper Layer Protocol Factory
Args:
ehf(dict[int: PacketFactory]): Dictionary mapping extension header types on specialized factories.
ulpf(dict[int: PacketFactory]): Dictionary mapping upper layer protocol types on specialized factories.
"""
self._ehf = ehf if ehf is not None else {}
self._ulpf = ulpf if ulpf is not None else {}
def _is_extension_header(self, header_type):
return not header_type in UPPER_LAYER_PROTOCOLS
def _get_extension_header_factory_for(self, next_header):
try:
return self._ehf[next_header]
except KeyError:
raise RuntimeError("Could not get Extension Header factory for next_header={}.".format(next_header))
def _get_upper_layer_protocol_factory_for(self, next_header):
try:
return self._ulpf[next_header]
except KeyError:
raise RuntimeError("Could not get Upper Layer Protocol factory for next_header={}.".format(next_header))
def _parse_extension_headers(self, data, next_header, message_info):
extension_headers = []
while self._is_extension_header(next_header):
factory = self._get_extension_header_factory_for(next_header)
extension_header = factory.parse(data, message_info)
next_header = extension_header.next_header
extension_headers.append(extension_header)
return next_header, extension_headers
def _parse_upper_layer_protocol(self, data, next_header, message_info):
factory = self._get_upper_layer_protocol_factory_for(next_header)
return factory.parse(data, message_info)
def parse(self, data, message_info):
ipv6_header = IPv6Header.from_bytes(data)
message_info.source_ipv6 = ipv6_header.source_address
message_info.destination_ipv6 = ipv6_header.destination_address
next_header, extension_headers = self._parse_extension_headers(data, ipv6_header.next_header, message_info)
upper_layer_protocol = self._parse_upper_layer_protocol(data, next_header, message_info)
return IPv6Packet(ipv6_header, upper_layer_protocol, extension_headers)
class HopByHopOptionsFactory(object):
""" Factory that produces HopByHop options. """
_one_byte_padding = 0x00
_many_bytes_padding = 0x01
def __init__(self, options_factories=None):
self._options_factories = options_factories if options_factories is not None else {}
def _get_HopByHopOption_value_factory(self, _type):
try:
return self._options_factories[_type]
except KeyError:
raise RuntimeError("Could not find HopByHopOption value factory for type={}.".format(_type))
def parse(self, data, message_info):
options = []
while data.tell() < len(data.getvalue()):
option_header = HopByHopOptionHeader.from_bytes(data)
if option_header.type == self._one_byte_padding:
# skip one byte padding
data.read(1)
elif option_header.type == self._many_bytes_padding:
# skip n bytes padding
data.read(option_header.length)
else:
factory = self._get_HopByHopOption_value_factory(option_header.type)
option_data = data.read(option_header.length)
option = HopByHopOption(option_header, factory.parse(io.BytesIO(option_data), message_info))
options.append(option)
return options
class HopByHopFactory(PacketFactory):
""" Factory that produces HopByHop extension headers from data. """
def __init__(self, hop_by_hop_options_factory):
self._hop_by_hop_options_factory = hop_by_hop_options_factory
def _calculate_extension_header_length(self, hdr_ext_len):
return (hdr_ext_len + 1) * 8
def parse(self, data, message_info):
next_header = ord(data.read(1))
hdr_ext_len = ord(data.read(1))
# Note! Two bytes were read (next_header and hdr_ext_len) so they must be substracted from header length
hop_by_hop_length = self._calculate_extension_header_length(hdr_ext_len) - 2
hop_by_hop_data = data.read(hop_by_hop_length)
options = self._hop_by_hop_options_factory.parse(io.BytesIO(hop_by_hop_data), message_info)
hop_by_hop = HopByHop(next_header, options, hdr_ext_len)
message_info.payload_length += len(hop_by_hop)
return hop_by_hop
class MPLOptionFactory(PacketFactory):
""" Factory that produces MPL options for HopByHop extension header. """
def parse(self, data, message_info):
return MPLOption.from_bytes(data)
class UDPHeaderFactory:
""" Factory that produces UDP header. """
def parse(self, data, message_info):
return UDPHeader.from_bytes(data)
class UdpBasedOnSrcDstPortsPayloadFactory:
# TODO: Unittests
""" Factory that produces UDP payload. """
def __init__(self, src_dst_port_based_payload_factories):
"""
Args:
src_dst_port_based_payload_factories (PacketFactory): Factories parse UDP payload based on source or destination port.
"""
self._factories = src_dst_port_based_payload_factories
def parse(self, data, message_info):
factory = None
if message_info.dst_port in self._factories:
factory = self._factories[message_info.dst_port]
if message_info.src_port in self._factories:
factory = self._factories[message_info.src_port]
if factory is None:
raise RuntimeError("Could not find factory to build UDP payload.")
return factory.parse(data, message_info)
class UDPDatagramFactory(PacketFactory):
# TODO: Unittests
""" Factory that produces UDP datagrams. """
def __init__(self, udp_header_factory, udp_payload_factory):
self._udp_header_factory = udp_header_factory
self._udp_payload_factory = udp_payload_factory
def parse(self, data, message_info):
header = self._udp_header_factory.parse(data, message_info)
# Update message payload length: UDP header (8B) + payload length
message_info.payload_length += len(header) + (len(data.getvalue()) - data.tell())
message_info.src_port = header.src_port
message_info.dst_port = header.dst_port
payload = self._udp_payload_factory.parse(data, message_info)
return UDPDatagram(header, payload)
class ICMPv6Factory(PacketFactory):
""" Factory that produces ICMPv6 messages from data. """
def __init__(self, body_factories=None):
self._body_factories = body_factories if body_factories is not None else {}
def _get_icmpv6_body_factory(self, _type):
try:
return self._body_factories[_type]
except KeyError:
if "default" not in self._body_factories:
raise RuntimeError("Could not find specialized factory to parse ICMP body. "
"Unsupported ICMP type: {}".format(_type))
default_factory = self._body_factories["default"]
print("Could not find specialized factory to parse ICMP body. "
"Take the default one: {}".format(type(default_factory)))
return default_factory
def parse(self, data, message_info):
header = ICMPv6Header.from_bytes(data)
factory = self._get_icmpv6_body_factory(header.type)
message_info.payload_length += len(header) + (len(data.getvalue()) - data.tell())
return ICMPv6(header, factory.parse(data, message_info))
class ICMPv6EchoBodyFactory(PacketFactory):
""" Factory that produces ICMPv6 echo message body. """
def parse(self, data, message_info):
return ICMPv6EchoBody.from_bytes(data)
class BytesPayload(ConvertibleToBytes, BuildableFromBytes):
""" Class representing bytes payload. """
def __init__(self, data):
self.data = data
def to_bytes(self):
return bytearray(self.data)
@classmethod
def from_bytes(cls, data):
return cls(data)
def __len__(self):
return len(self.data)
class BytesPayloadFactory(PacketFactory):
""" Factory that produces bytes payload. """
def parse(self, data, message_info):
return BytesPayload(data.read())
class ICMPv6EchoBody(ConvertibleToBytes, BuildableFromBytes):
""" Class representing body of ICMPv6 echo messages. """
_header_length = 4
def __init__(self, identifier, sequence_number, data):
self.identifier = identifier
self.sequence_number = sequence_number
self.data = data
def to_bytes(self):
data = struct.pack(">H", self.identifier)
data += struct.pack(">H", self.sequence_number)
data += self.data
return data
@classmethod
def from_bytes(cls, data):
identifier = struct.unpack(">H", data.read(2))[0]
sequence_number = struct.unpack(">H", data.read(2))[0]
return cls(identifier, sequence_number, data.read())
def __len__(self):
return self._header_length + len(self.data)
class ICMPv6DestinationUnreachableFactory(PacketFactory):
""" Factory that produces ICMPv6 echo message body. """
def parse(self, data, message_info):
return ICMPv6DestinationUnreachable.from_bytes(data)
class ICMPv6DestinationUnreachable(ConvertibleToBytes, BuildableFromBytes):
""" Class representing body of ICMPv6 Destination Unreachable messages. """
_header_length = 4
_unused = 0
def __init__(self, data):
self.data = data
def to_bytes(self):
data = bytearray(struct.pack(">I", self._unused))
data += self.data
return data
@classmethod
def from_bytes(cls, data):
unused = struct.unpack(">I", data.read(4))[0]
if unused != 0:
raise RuntimeError(
"Invalid value of unused field in the ICMPv6 Destination Unreachable data. Expected value: 0.")
return cls(bytearray(data.read()))
def __len__(self):
return self._header_length + len(self.data)
| bsd-3-clause | 1,892,638,632,973,712,000 | 30.053136 | 138 | 0.596062 | false |
southpawtech/TACTIC-DEV | 3rd_party/CherryPy/cherrypy/process/plugins.py | 6 | 20583 | """Site services for use with a Web Site Process Bus."""
import os
import re
try:
set
except NameError:
from sets import Set as set
import signal as _signal
import sys
import time
import thread
import threading
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file has
# "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
By default, instantiating this object subscribes the following signals
and listeners:
TERM: bus.exit
HUP : bus.restart
USR1: bus.graceful
"""
# Map from signal numbers to names
signals = {}
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
self._previous_handlers = {}
def subscribe(self):
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd, grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to Gavin Baker: http://antonym.org/node/100.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid, doc="The uid under which to run.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid, doc="The gid under which to run.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(_get_umask, _set_umask, doc="The umask under which to run.")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via:
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError, exc:
# Python raises OSError rather than returning negative numbers.
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError, exc:
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(str(pid))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(threading._Timer):
"""A subclass of threading._Timer whose run() method repeats."""
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception, x:
self.bus.log("Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread.
bus: a Web Site Process Bus object.
callback: the function to call at intervals.
frequency: the time in seconds between callback runs.
"""
frequency = 60
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own perpetual timer thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = PerpetualTimer(self.frequency, self.callback)
self.thread.bus = self.bus
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's perpetual timer thread."""
if self.thread is None:
self.bus.log("No thread running for %s." % self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's perpetual timer thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change."""
frequency = 1
match = '.*'
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own perpetual timer thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of filenames which the Autoreloader will monitor."""
files = set()
for k, m in sys.modules.items():
if re.match(self.match, k):
if hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive'):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app doesn't break me
f = os.path.normpath(os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." % filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." % self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('release_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = thread.get_ident()
if thread_ident not in self.threads:
# We can't just use _get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = threading._get_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
| epl-1.0 | 7,515,089,356,924,826,000 | 35.624555 | 92 | 0.552738 | false |
smn/onadata | onadata/apps/main/tests/test_form_edit.py | 13 | 5646 | from django.core.urlresolvers import reverse
from onadata.apps.main.models import MetaData
from onadata.apps.main.views import edit
from onadata.apps.logger.models import XForm
from onadata.apps.logger.views import delete_xform
from test_base import TestBase
class TestFormEdit(TestBase):
def setUp(self):
TestBase.setUp(self)
self._create_user_and_login()
self._publish_transportation_form_and_submit_instance()
self.edit_url = reverse(edit, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
def test_anon_no_edit_post(self):
self.xform.shared = True
self.xform.save()
desc = 'Snooky'
response = self.anon.post(self.edit_url, {'description': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertNotEqual(
XForm.objects.get(pk=self.xform.pk).description, desc)
self.assertEqual(response.status_code, 302)
def test_not_owner_no_edit_post(self):
self.xform.shared = True
self.xform.save()
desc = 'Snooky'
self._create_user_and_login("jo")
response = self.client.post(self.edit_url, {'description': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 403)
self.assertNotEqual(
XForm.objects.get(pk=self.xform.pk).description, desc)
def test_user_description_edit_updates(self):
desc = 'Snooky'
response = self.client.post(self.edit_url, {'description': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).description, desc)
def test_user_title_edit_updates(self):
desc = 'Snooky'
response = self.client.post(self.edit_url, {'title': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).title, desc)
def test_user_form_license_edit_updates(self):
desc = 'Snooky'
response = self.client.post(self.edit_url, {'form-license': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(MetaData.form_license(self.xform).data_value, desc)
def test_user_data_license_edit_updates(self):
desc = 'Snooky'
response = self.client.post(self.edit_url, {'data-license': desc},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(MetaData.data_license(self.xform).data_value, desc)
def test_user_toggle_data_privacy(self):
self.assertEqual(self.xform.shared, False)
response = self.client.post(self.edit_url, {'toggle_shared': 'data'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).shared_data, True)
def test_user_toggle_data_privacy_off(self):
self.xform.shared_data = True
self.xform.save()
response = self.client.post(self.edit_url, {'toggle_shared': 'data'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(
XForm.objects.get(pk=self.xform.pk).shared_data, False)
def test_user_toggle_form_privacy(self):
self.assertEqual(self.xform.shared, False)
response = self.client.post(self.edit_url, {'toggle_shared': 'form'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).shared, True)
def test_user_toggle_form_privacy_off(self):
self.xform.shared = True
self.xform.save()
response = self.client.post(self.edit_url, {'toggle_shared': 'form'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(XForm.objects.get(pk=self.xform.pk).shared, False)
def test_user_toggle_form_downloadable(self):
self.xform.downloadable = False
self.xform.save()
self.assertEqual(self.xform.downloadable, False)
response = self.client.post(self.edit_url, {'toggle_shared': 'active'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(
XForm.objects.get(pk=self.xform.pk).downloadable, True)
def test_user_toggle_form_downloadable_off(self):
self.xform.downloadable = True
self.xform.save()
response = self.client.post(self.edit_url, {'toggle_shared': 'active'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertEqual(
XForm.objects.get(pk=self.xform.pk).downloadable, False)
def test_delete_404(self):
bad_delete_url = reverse(delete_xform, kwargs={
'username': self.user.username,
'id_string': 'non_existent_id_string'
})
response = self.client.post(bad_delete_url)
self.assertEqual(response.status_code, 404)
| bsd-2-clause | -7,252,004,297,335,132,000 | 43.809524 | 79 | 0.617251 | false |
kwrobert/heat-templates | tests/software_config/test_heat_config_kubelet.py | 7 | 4615 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import tempfile
import fixtures
from testtools import matchers
from tests.software_config import common
class HeatConfigKubeletORCTest(common.RunScriptTest):
fake_hooks = ['kubelet']
data = [{
"id": "abcdef001",
"group": "kubelet",
"name": "mysql",
"config": {
"version": "v1beta2",
"volumes": [{
"name": "mariadb-data"
}],
"containers": [{
"image": "mariadb_image",
"volumeMounts": [{
"mountPath": "/var/lib/mysql",
"name": "mariadb-data"
}],
"name": "mariadb",
"env": [{
"name": "DB_ROOT_PASSWORD",
"value": "mariadb_password"
}],
"ports": [{
"containerPort": 3306
}]
}]}
}, {
"id": "abcdef002",
"group": "kubelet",
"name": "rabbitmq",
"config": {
"version": "v1beta2",
"containers": [{
"image": "rabbitmq_image",
"name": "rabbitmq",
"ports": [{
"containerPort": 5672
}]
}]
}
}, {
"id": "abcdef003",
"group": "kubelet",
"name": "heat_api_engine",
"config": {
"version": "v1beta2",
"containers": [{
"image": "heat_engine_image",
"name": "heat-engine",
"env": [{
"name": "DB_ROOT_PASSWORD",
"value": "mariadb_password"
}, {
"name": "HEAT_DB_PASSWORD",
"value": "heatdb_password"
}, {
"name": "HEAT_KEYSTONE_PASSWORD",
"value": "password"
}]
}, {
"image": "heat_api_image",
"name": "heat-api",
"ports": [{
"containerPort": 8004
}]
}]
}
}]
def setUp(self):
super(HeatConfigKubeletORCTest, self).setUp()
self.fake_hook_path = self.relative_path(__file__, 'hook-fake.py')
self.heat_config_kubelet_path = self.relative_path(
__file__,
'../..',
'hot/software-config/elements',
'heat-config-kubelet/os-refresh-config/configure.d/'
'50-heat-config-kubelet')
self.manifests_dir = self.useFixture(fixtures.TempDir())
with open(self.fake_hook_path) as f:
fake_hook = f.read()
for hook in self.fake_hooks:
hook_name = self.manifests_dir.join(hook)
with open(hook_name, 'w') as f:
os.utime(hook_name, None)
f.write(fake_hook)
f.flush()
os.chmod(hook_name, 0o755)
def write_config_file(self, data):
config_file = tempfile.NamedTemporaryFile()
config_file.write(json.dumps(data))
config_file.flush()
return config_file
def test_run_heat_config(self):
with self.write_config_file(self.data) as config_file:
env = os.environ.copy()
env.update({
'HEAT_KUBELET_MANIFESTS': self.manifests_dir.join(),
'HEAT_SHELL_CONFIG': config_file.name
})
returncode, stdout, stderr = self.run_cmd(
[self.heat_config_kubelet_path], env)
self.assertEqual(0, returncode, stderr)
for config in self.data:
manifest_name = '%s.json' % config['id']
manifest_path = self.manifests_dir.join(manifest_name)
self.assertThat(manifest_path, matchers.FileExists())
# manifest file should match manifest config
self.assertEqual(config['config'],
self.json_from_file(manifest_path))
| apache-2.0 | 6,697,983,440,104,976,000 | 30.394558 | 78 | 0.486024 | false |
enlighter/ndl-question-papers-search-hub | qp_search_project/searcher/models.py | 1 | 3438 | from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager
from django.utils import timezone
from django.dispatch import receiver
from django.db.models.signals import post_save
from localflavor.in_ import in_states
gettext_noop = lambda s: s
EDUCATIONAL_ROLE= (
('vb', gettext_noop('Till Class VIII')),
('mc', gettext_noop('Class IX to X')),
('ss', gettext_noop('Class XI to XII')),
('gr', gettext_noop('UG or PG')),
('cd', gettext_noop('Career Development or Technical Study')),
('ae', gettext_noop('Adult Education')),
('ll', gettext_noop('Lifelong Learner')),
)
LANGUAGES = (
('en', gettext_noop('English')),
('hi', gettext_noop('Hindi')),
('bn', gettext_noop('Bengali')),
)
class board(models.Model):
name = models.CharField(max_length=15)
#example : name = CBSE
def __str__(self):
return self.name
class exam(models.Model):
name = models.CharField(max_length=15)
# example : type = AISSCE
def __str__(self):
return self.name
class educational_institute(models.Model):
name = models.CharField(max_length=90, unique=True)
state = models.CharField(max_length=21, choices=in_states.STATE_CHOICES)
city = models.CharField(max_length=21)
def __str__(self):
return self.name
class student(AbstractUser):
#user = models.OneToOneField(User, primary_key=True, on_delete=models.CASCADE)
state = models.CharField(max_length=21, null=True, blank=True, choices=in_states.STATE_CHOICES)
city = models.CharField(max_length=21, null=True, blank=True)
educational_role = models.CharField(max_length=39, choices=EDUCATIONAL_ROLE)
institute = models.ForeignKey(educational_institute, null=True, blank=True)
language = models.CharField(max_length=8, choices=LANGUAGES)
#is_staff = models.BooleanField(u'staff status', default=False,
# help_text=u'Designates whether the user can log into this admin '
# 'site.')
#is_active = models.BooleanField(u'active', default=True,
# help_text=u'Designates whether this user should be treated as '
# 'active. Unselect this instead of deleting accounts.')
REQUIRED_FIELDS = ['email', 'educational_role', 'language']
objects = UserManager()
def __str__(self):
return str(self.username)
#@receiver(post_save, sender=User)
#def create_profile(sender, **kwargs):
# user = kwargs["instance"]
# if kwargs["created"]:
# user_profile = student(user=user)
# user_profile.save()
#post_save.connect(create_profile, sender=User)
# Create student instance on access - very useful if you plan to always have a Student obj associated with a User object anyway
#User.student = property(lambda u: student.objects.get_or_create(user=u)[0])
class search_result(models.Model):
year_month = models.DateField()
type = models.ForeignKey(exam, on_delete=models.CASCADE)
source = models.ForeignKey(board, null=True, blank=True)
subject = models.CharField(max_length=45)
location = models.URLField(max_length=120)
def get_year(self):
return self.year_month.year
def get_month(self):
return self.year_month.month
def __str__(self):
return str(self.get_year()) + str(self.type) + self.subject | mit | -7,797,993,381,692,575,000 | 32.067308 | 127 | 0.655032 | false |
shaulkf/bitcoin | qa/rpc-tests/proxy_test.py | 93 | 7769 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
])
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| mit | -8,663,886,744,985,916,000 | 42.161111 | 146 | 0.622731 | false |
nguyentruongtho/buck | programs/subprocutils.py | 5 | 3349 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
#
# an almost exact copy of the shutil.which() implementation from python3.4
#
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to
# the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
def propagate_failure(status):
"""
Propagate the failure mode of a subprocess to the current process.
"""
# The subprocess died with via a signal, so re-raise it.
if status < 0:
os.kill(os.getpid(), -status)
# The subprocess died with an error code, propagate it.
if status > 0:
sys.exit(status)
| apache-2.0 | -4,354,652,863,883,181,000 | 34.252632 | 83 | 0.643774 | false |
ppries/tensorflow | tensorflow/contrib/distributions/python/ops/transformed_distribution.py | 1 | 9906 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Transformed Distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution as distributions
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.ops import math_ops
_condition_kwargs_dict = {
"bijector_kwargs": ("Python dictionary of arg names/values "
"forwarded to the bijector."),
"distribution_kwargs": ("Python dictionary of arg names/values "
"forwarded to the distribution."),
}
class TransformedDistribution(distributions.Distribution):
"""A Transformed Distribution.
A `TransformedDistribution` models `p(y)` given a base distribution `p(x)`,
and a deterministic, invertible, differentiable transform, `Y = g(X)`. The
transform is typically an instance of the `Bijector` class and the base
distribution is typically an instance of the `Distribution` class.
A `Bijector` is expected to implement the following functions:
- `forward`,
- `inverse`,
- `inverse_log_det_jacobian`.
The semantics of these functions are outlined in the `Bijector` documentation.
Shapes, type, and reparameterization are taken from the base distribution.
Write `P(Y=y)` for cumulative density function of random variable (rv) `Y` and
`p` for its derivative wrt to `Y`. Assume that `Y=g(X)` where `g` is
continuous and `X=g^{-1}(Y)`. Write `J` for the Jacobian (of some function).
A `TransformedDistribution` alters the input/outputs of a `Distribution`
associated with rv `X` in the following ways:
* `sample`:
Mathematically:
```none
Y = g(X)
```
Programmatically:
```python
return bijector.forward(distribution.sample(...))
```
* `log_prob`:
Mathematically:
```none
(log o p o g^{-1})(y) + (log o det o J o g^{-1})(y)
```
Programmatically:
```python
return (bijector.inverse_log_det_jacobian(x) +
distribution.log_prob(bijector.inverse(x))
```
* `log_cdf`:
Mathematically:
```none
(log o P o g^{-1})(y)
```
Programmatically:
```python
return distribution.log_prob(bijector.inverse(x))
```
* and similarly for: `cdf`, `prob`, `log_survival_function`,
`survival_function`.
A simple example constructing a Log-Normal distribution from a Normal
distribution:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(mu=mu, sigma=sigma),
bijector=ds.bijector.Exp(),
name="LogNormalTransformedDistribution")
```
A `LogNormal` made from callables:
```python
ds = tf.contrib.distributions
log_normal = ds.TransformedDistribution(
distribution=ds.Normal(mu=mu, sigma=sigma),
bijector=ds.bijector.Inline(
forward_fn=tf.exp,
inverse_fn=tf.log,
inverse_log_det_jacobian_fn=(
lambda y: -tf.reduce_sum(tf.log(y), reduction_indices=-1)),
name="LogNormalTransformedDistribution")
```
Another example constructing a Normal from a StandardNormal:
```python
ds = tf.contrib.distributions
normal = ds.TransformedDistribution(
distribution=ds.Normal(mu=0, sigma=1),
bijector=ds.bijector.ScaleAndShift(loc=mu, scale=sigma, event_ndims=0),
name="NormalTransformedDistribution")
```
"""
def __init__(self,
distribution,
bijector,
validate_args=False,
name=None):
"""Construct a Transformed Distribution.
Args:
distribution: The base distribution instance to transform. Typically an
instance of `Distribution`.
bijector: The object responsible for calculating the transformation.
Typically an instance of `Bijector`.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
name: The name for the distribution. Default:
`bijector.name + distribution.name`.
"""
parameters = locals()
parameters.pop("self")
name = name or bijector.name + distribution.name
self._distribution = distribution
self._bijector = bijector
super(TransformedDistribution, self).__init__(
dtype=self._distribution.dtype,
is_continuous=self._distribution.is_continuous,
is_reparameterized=self._distribution.is_reparameterized,
validate_args=validate_args,
allow_nan_stats=self._distribution.allow_nan_stats,
parameters=parameters,
# We let TransformedDistribution access _graph_parents since this class
# is more like a baseclass than derived.
graph_parents=(distribution._graph_parents + # pylint: disable=protected-access
bijector.graph_parents),
name=name)
@property
def distribution(self):
"""Base distribution, p(x)."""
return self._distribution
@property
def bijector(self):
"""Function transforming x => y."""
return self._bijector
def _event_shape(self):
return self.bijector.forward_event_shape(
self.distribution.event_shape())
def _get_event_shape(self):
return self.bijector.get_forward_event_shape(
self.distribution.get_event_shape())
def _batch_shape(self):
return self.distribution.batch_shape()
def _get_batch_shape(self):
return self.distribution.get_batch_shape()
@distribution_util.AppendDocstring(
"""Samples from the base distribution and then passes through
the bijector's forward transform.""",
condition_kwargs_dict=_condition_kwargs_dict)
def _sample_n(self, n, seed=None,
bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.distribution.sample(sample_shape=n, seed=seed,
**distribution_kwargs)
# Recall that a bijector is named for its forward transform, i.e.,
# `Y = g(X)`,
return self.bijector.forward(x, **bijector_kwargs)
@distribution_util.AppendDocstring(
"""Implements `(log o p o g^{-1})(y) + (log o det o J o g^{-1})(y)`,
where `g^{-1}` is the inverse of `transform`.
Also raises a `ValueError` if `inverse` was not provided to the
distribution and `y` was not returned from `sample`.""",
condition_kwargs_dict=_condition_kwargs_dict)
def _log_prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian(
y, **bijector_kwargs)
return ildj + self.distribution.log_prob(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
"""Implements `p(g^{-1}(y)) det|J(g^{-1}(y))|`, where `g^{-1}` is the
inverse of `transform`.
Also raises a `ValueError` if `inverse` was not provided to the
distribution and `y` was not returned from `sample`.""",
condition_kwargs_dict=_condition_kwargs_dict)
def _prob(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x, ildj = self.bijector.inverse_and_inverse_log_det_jacobian(
y, **bijector_kwargs)
return math_ops.exp(ildj) * self.distribution.prob(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _log_cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _cdf(self, y, bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.cdf(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _log_survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.log_survival_function(x, **distribution_kwargs)
@distribution_util.AppendDocstring(
condition_kwargs_dict=_condition_kwargs_dict)
def _survival_function(self, y,
bijector_kwargs=None, distribution_kwargs=None):
bijector_kwargs = bijector_kwargs or {}
distribution_kwargs = distribution_kwargs or {}
x = self.bijector.inverse(y, **bijector_kwargs)
return self.distribution.survival_function(x, **distribution_kwargs)
| apache-2.0 | -1,960,353,723,900,015,000 | 35.285714 | 88 | 0.673127 | false |
adw0rd/lettuce | tests/integration/lib/Django-1.3/django/contrib/localflavor/fi/fi_municipalities.py | 394 | 10822 | # -*- coding: utf-8 -*-
"""
An alphabetical list of Finnish municipalities for use as `choices` in a
formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
MUNICIPALITY_CHOICES = (
('akaa', u"Akaa"),
('alajarvi', u"Alajärvi"),
('alavieska', u"Alavieska"),
('alavus', u"Alavus"),
('artjarvi', u"Artjärvi"),
('asikkala', u"Asikkala"),
('askola', u"Askola"),
('aura', u"Aura"),
('brando', u"Brändö"),
('eckero', u"Eckerö"),
('enonkoski', u"Enonkoski"),
('enontekio', u"Enontekiö"),
('espoo', u"Espoo"),
('eura', u"Eura"),
('eurajoki', u"Eurajoki"),
('evijarvi', u"Evijärvi"),
('finstrom', u"Finström"),
('forssa', u"Forssa"),
('foglo', u"Föglö"),
('geta', u"Geta"),
('haapajarvi', u"Haapajärvi"),
('haapavesi', u"Haapavesi"),
('hailuoto', u"Hailuoto"),
('halsua', u"Halsua"),
('hamina', u"Hamina"),
('hammarland', u"Hammarland"),
('hankasalmi', u"Hankasalmi"),
('hanko', u"Hanko"),
('harjavalta', u"Harjavalta"),
('hartola', u"Hartola"),
('hattula', u"Hattula"),
('haukipudas', u"Haukipudas"),
('hausjarvi', u"Hausjärvi"),
('heinola', u"Heinola"),
('heinavesi', u"Heinävesi"),
('helsinki', u"Helsinki"),
('hirvensalmi', u"Hirvensalmi"),
('hollola', u"Hollola"),
('honkajoki', u"Honkajoki"),
('huittinen', u"Huittinen"),
('humppila', u"Humppila"),
('hyrynsalmi', u"Hyrynsalmi"),
('hyvinkaa', u"Hyvinkää"),
('hameenkoski', u"Hämeenkoski"),
('hameenkyro', u"Hämeenkyrö"),
('hameenlinna', u"Hämeenlinna"),
('ii', u"Ii"),
('iisalmi', u"Iisalmi"),
('iitti', u"Iitti"),
('ikaalinen', u"Ikaalinen"),
('ilmajoki', u"Ilmajoki"),
('ilomantsi', u"Ilomantsi"),
('imatra', u"Imatra"),
('inari', u"Inari"),
('inkoo', u"Inkoo"),
('isojoki', u"Isojoki"),
('isokyro', u"Isokyrö"),
('jalasjarvi', u"Jalasjärvi"),
('janakkala', u"Janakkala"),
('joensuu', u"Joensuu"),
('jokioinen', u"Jokioinen"),
('jomala', u"Jomala"),
('joroinen', u"Joroinen"),
('joutsa', u"Joutsa"),
('juankoski', u"Juankoski"),
('juuka', u"Juuka"),
('juupajoki', u"Juupajoki"),
('juva', u"Juva"),
('jyvaskyla', u"Jyväskylä"),
('jamijarvi', u"Jämijärvi"),
('jamsa', u"Jämsä"),
('jarvenpaa', u"Järvenpää"),
('kaarina', u"Kaarina"),
('kaavi', u"Kaavi"),
('kajaani', u"Kajaani"),
('kalajoki', u"Kalajoki"),
('kangasala', u"Kangasala"),
('kangasniemi', u"Kangasniemi"),
('kankaanpaa', u"Kankaanpää"),
('kannonkoski', u"Kannonkoski"),
('kannus', u"Kannus"),
('karijoki', u"Karijoki"),
('karjalohja', u"Karjalohja"),
('karkkila', u"Karkkila"),
('karstula', u"Karstula"),
('karttula', u"Karttula"),
('karvia', u"Karvia"),
('kaskinen', u"Kaskinen"),
('kauhajoki', u"Kauhajoki"),
('kauhava', u"Kauhava"),
('kauniainen', u"Kauniainen"),
('kaustinen', u"Kaustinen"),
('keitele', u"Keitele"),
('kemi', u"Kemi"),
('kemijarvi', u"Kemijärvi"),
('keminmaa', u"Keminmaa"),
('kemionsaari', u"Kemiönsaari"),
('kempele', u"Kempele"),
('kerava', u"Kerava"),
('kerimaki', u"Kerimäki"),
('kesalahti', u"Kesälahti"),
('keuruu', u"Keuruu"),
('kihnio', u"Kihniö"),
('kiikoinen', u"Kiikoinen"),
('kiiminki', u"Kiiminki"),
('kinnula', u"Kinnula"),
('kirkkonummi', u"Kirkkonummi"),
('kitee', u"Kitee"),
('kittila', u"Kittilä"),
('kiuruvesi', u"Kiuruvesi"),
('kivijarvi', u"Kivijärvi"),
('kokemaki', u"Kokemäki"),
('kokkola', u"Kokkola"),
('kolari', u"Kolari"),
('konnevesi', u"Konnevesi"),
('kontiolahti', u"Kontiolahti"),
('korsnas', u"Korsnäs"),
('koskitl', u"Koski Tl"),
('kotka', u"Kotka"),
('kouvola', u"Kouvola"),
('kristiinankaupunki', u"Kristiinankaupunki"),
('kruunupyy', u"Kruunupyy"),
('kuhmalahti', u"Kuhmalahti"),
('kuhmo', u"Kuhmo"),
('kuhmoinen', u"Kuhmoinen"),
('kumlinge', u"Kumlinge"),
('kuopio', u"Kuopio"),
('kuortane', u"Kuortane"),
('kurikka', u"Kurikka"),
('kustavi', u"Kustavi"),
('kuusamo', u"Kuusamo"),
('kylmakoski', u"Kylmäkoski"),
('kyyjarvi', u"Kyyjärvi"),
('karkola', u"Kärkölä"),
('karsamaki', u"Kärsämäki"),
('kokar', u"Kökar"),
('koylio', u"Köyliö"),
('lahti', u"Lahti"),
('laihia', u"Laihia"),
('laitila', u"Laitila"),
('lapinjarvi', u"Lapinjärvi"),
('lapinlahti', u"Lapinlahti"),
('lappajarvi', u"Lappajärvi"),
('lappeenranta', u"Lappeenranta"),
('lapua', u"Lapua"),
('laukaa', u"Laukaa"),
('lavia', u"Lavia"),
('lemi', u"Lemi"),
('lemland', u"Lemland"),
('lempaala', u"Lempäälä"),
('leppavirta', u"Leppävirta"),
('lestijarvi', u"Lestijärvi"),
('lieksa', u"Lieksa"),
('lieto', u"Lieto"),
('liminka', u"Liminka"),
('liperi', u"Liperi"),
('lohja', u"Lohja"),
('loimaa', u"Loimaa"),
('loppi', u"Loppi"),
('loviisa', u"Loviisa"),
('luhanka', u"Luhanka"),
('lumijoki', u"Lumijoki"),
('lumparland', u"Lumparland"),
('luoto', u"Luoto"),
('luumaki', u"Luumäki"),
('luvia', u"Luvia"),
('lansi-turunmaa', u"Länsi-Turunmaa"),
('maalahti', u"Maalahti"),
('maaninka', u"Maaninka"),
('maarianhamina', u"Maarianhamina"),
('marttila', u"Marttila"),
('masku', u"Masku"),
('merijarvi', u"Merijärvi"),
('merikarvia', u"Merikarvia"),
('miehikkala', u"Miehikkälä"),
('mikkeli', u"Mikkeli"),
('muhos', u"Muhos"),
('multia', u"Multia"),
('muonio', u"Muonio"),
('mustasaari', u"Mustasaari"),
('muurame', u"Muurame"),
('mynamaki', u"Mynämäki"),
('myrskyla', u"Myrskylä"),
('mantsala', u"Mäntsälä"),
('mantta-vilppula', u"Mänttä-Vilppula"),
('mantyharju', u"Mäntyharju"),
('naantali', u"Naantali"),
('nakkila', u"Nakkila"),
('nastola', u"Nastola"),
('nilsia', u"Nilsiä"),
('nivala', u"Nivala"),
('nokia', u"Nokia"),
('nousiainen', u"Nousiainen"),
('nummi-pusula', u"Nummi-Pusula"),
('nurmes', u"Nurmes"),
('nurmijarvi', u"Nurmijärvi"),
('narpio', u"Närpiö"),
('oravainen', u"Oravainen"),
('orimattila', u"Orimattila"),
('oripaa', u"Oripää"),
('orivesi', u"Orivesi"),
('oulainen', u"Oulainen"),
('oulu', u"Oulu"),
('oulunsalo', u"Oulunsalo"),
('outokumpu', u"Outokumpu"),
('padasjoki', u"Padasjoki"),
('paimio', u"Paimio"),
('paltamo', u"Paltamo"),
('parikkala', u"Parikkala"),
('parkano', u"Parkano"),
('pedersore', u"Pedersöre"),
('pelkosenniemi', u"Pelkosenniemi"),
('pello', u"Pello"),
('perho', u"Perho"),
('pertunmaa', u"Pertunmaa"),
('petajavesi', u"Petäjävesi"),
('pieksamaki', u"Pieksämäki"),
('pielavesi', u"Pielavesi"),
('pietarsaari', u"Pietarsaari"),
('pihtipudas', u"Pihtipudas"),
('pirkkala', u"Pirkkala"),
('polvijarvi', u"Polvijärvi"),
('pomarkku', u"Pomarkku"),
('pori', u"Pori"),
('pornainen', u"Pornainen"),
('porvoo', u"Porvoo"),
('posio', u"Posio"),
('pudasjarvi', u"Pudasjärvi"),
('pukkila', u"Pukkila"),
('punkaharju', u"Punkaharju"),
('punkalaidun', u"Punkalaidun"),
('puolanka', u"Puolanka"),
('puumala', u"Puumala"),
('pyhtaa', u"Pyhtää"),
('pyhajoki', u"Pyhäjoki"),
('pyhajarvi', u"Pyhäjärvi"),
('pyhanta', u"Pyhäntä"),
('pyharanta', u"Pyhäranta"),
('palkane', u"Pälkäne"),
('poytya', u"Pöytyä"),
('raahe', u"Raahe"),
('raasepori', u"Raasepori"),
('raisio', u"Raisio"),
('rantasalmi', u"Rantasalmi"),
('ranua', u"Ranua"),
('rauma', u"Rauma"),
('rautalampi', u"Rautalampi"),
('rautavaara', u"Rautavaara"),
('rautjarvi', u"Rautjärvi"),
('reisjarvi', u"Reisjärvi"),
('riihimaki', u"Riihimäki"),
('ristiina', u"Ristiina"),
('ristijarvi', u"Ristijärvi"),
('rovaniemi', u"Rovaniemi"),
('ruokolahti', u"Ruokolahti"),
('ruovesi', u"Ruovesi"),
('rusko', u"Rusko"),
('raakkyla', u"Rääkkylä"),
('saarijarvi', u"Saarijärvi"),
('salla', u"Salla"),
('salo', u"Salo"),
('saltvik', u"Saltvik"),
('sastamala', u"Sastamala"),
('sauvo', u"Sauvo"),
('savitaipale', u"Savitaipale"),
('savonlinna', u"Savonlinna"),
('savukoski', u"Savukoski"),
('seinajoki', u"Seinäjoki"),
('sievi', u"Sievi"),
('siikainen', u"Siikainen"),
('siikajoki', u"Siikajoki"),
('siikalatva', u"Siikalatva"),
('siilinjarvi', u"Siilinjärvi"),
('simo', u"Simo"),
('sipoo', u"Sipoo"),
('siuntio', u"Siuntio"),
('sodankyla', u"Sodankylä"),
('soini', u"Soini"),
('somero', u"Somero"),
('sonkajarvi', u"Sonkajärvi"),
('sotkamo', u"Sotkamo"),
('sottunga', u"Sottunga"),
('sulkava', u"Sulkava"),
('sund', u"Sund"),
('suomenniemi', u"Suomenniemi"),
('suomussalmi', u"Suomussalmi"),
('suonenjoki', u"Suonenjoki"),
('sysma', u"Sysmä"),
('sakyla', u"Säkylä"),
('taipalsaari', u"Taipalsaari"),
('taivalkoski', u"Taivalkoski"),
('taivassalo', u"Taivassalo"),
('tammela', u"Tammela"),
('tampere', u"Tampere"),
('tarvasjoki', u"Tarvasjoki"),
('tervo', u"Tervo"),
('tervola', u"Tervola"),
('teuva', u"Teuva"),
('tohmajarvi', u"Tohmajärvi"),
('toholampi', u"Toholampi"),
('toivakka', u"Toivakka"),
('tornio', u"Tornio"),
('turku', u"Turku"),
('tuusniemi', u"Tuusniemi"),
('tuusula', u"Tuusula"),
('tyrnava', u"Tyrnävä"),
('toysa', u"Töysä"),
('ulvila', u"Ulvila"),
('urjala', u"Urjala"),
('utajarvi', u"Utajärvi"),
('utsjoki', u"Utsjoki"),
('uurainen', u"Uurainen"),
('uusikaarlepyy', u"Uusikaarlepyy"),
('uusikaupunki', u"Uusikaupunki"),
('vaala', u"Vaala"),
('vaasa', u"Vaasa"),
('valkeakoski', u"Valkeakoski"),
('valtimo', u"Valtimo"),
('vantaa', u"Vantaa"),
('varkaus', u"Varkaus"),
('varpaisjarvi', u"Varpaisjärvi"),
('vehmaa', u"Vehmaa"),
('vesanto', u"Vesanto"),
('vesilahti', u"Vesilahti"),
('veteli', u"Veteli"),
('vierema', u"Vieremä"),
('vihanti', u"Vihanti"),
('vihti', u"Vihti"),
('viitasaari', u"Viitasaari"),
('vimpeli', u"Vimpeli"),
('virolahti', u"Virolahti"),
('virrat', u"Virrat"),
('vardo', u"Vårdö"),
('vahakyro', u"Vähäkyrö"),
('voyri-maksamaa', u"Vöyri-Maksamaa"),
('yli-ii', u"Yli-Ii"),
('ylitornio', u"Ylitornio"),
('ylivieska', u"Ylivieska"),
('ylojarvi', u"Ylöjärvi"),
('ypaja', u"Ypäjä"),
('ahtari', u"Ähtäri"),
('aanekoski', u"Äänekoski")
) | gpl-3.0 | -8,824,018,512,322,498,000 | 29.27762 | 74 | 0.544961 | false |
cmbruns/vr_samples | test/python/test_sphere.py | 1 | 2362 | #!/bin/env python
import unittest
import glfw
from OpenGL import GL
from PIL import Image
import numpy
from vrprim.imposter import sphere
def images_are_identical(img1, img2):
ar1 = numpy.array(img1.convert('RGBA'))
ar2 = numpy.array(img2.convert('RGBA'))
return numpy.array_equiv(ar1, ar2)
class TestGLRendering(unittest.TestCase):
def setUp(self):
if not glfw.init():
raise Exception("GLFW Initialization error")
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 1)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.VISIBLE, False) # Hidden window is next best thing to offscreen
window = glfw.create_window(16, 16, "Little test window", None, None)
if window is None:
glfw.terminate()
raise Exception("GLFW window creation error")
glfw.make_context_current(window)
with open('../images/red16x16.png', 'rb') as fh:
self.red_image = Image.open(fh)
self.red_image.load()
def tearDown(self):
glfw.terminate()
def test_sphere_imposter(self):
GL.glClearColor(1, 0, 0, 1) # red
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
s = sphere.SphereActor()
s.init_gl()
s.display_gl(None)
s.dispose_gl()
# Save render as image
GL.glFlush()
data = GL.glReadPixels(0, 0, 16, 16, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE)
observed = Image.frombytes('RGBA', (16, 16), data)
observed.save("test.png")
self.assertFalse(images_are_identical(observed, self.red_image))
def test_red_render(self):
'Test minimal screen clear in OpenGL'
# Color the entire display red
GL.glClearColor(1, 0, 0, 1) # red
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
# Save render as image
GL.glFlush()
data = GL.glReadPixels(0, 0, 16, 16, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE)
observed = Image.frombytes('RGBA', (16, 16), data)
expected = self.red_image
self.assertTrue(images_are_identical(observed, expected))
print ("Red GL test completed")
if __name__ == '__main__':
unittest.main()
| mit | 2,724,550,455,750,067,000 | 33.253731 | 93 | 0.594412 | false |
pdee/pdee | python-libs/rope/base/project.py | 40 | 11976 | import cPickle as pickle
import os
import shutil
import sys
import warnings
import rope.base.fscommands
from rope.base import exceptions, taskhandle, prefs, history, pycore, utils
from rope.base.resourceobserver import *
from rope.base.resources import File, Folder, _ResourceMatcher
class _Project(object):
def __init__(self, fscommands):
self.observers = []
self.fscommands = fscommands
self.prefs = prefs.Prefs()
self.data_files = _DataFiles(self)
def get_resource(self, resource_name):
"""Get a resource in a project.
`resource_name` is the path of a resource in a project. It is
the path of a resource relative to project root. Project root
folder address is an empty string. If the resource does not
exist a `exceptions.ResourceNotFound` exception would be
raised. Use `get_file()` and `get_folder()` when you need to
get nonexistent `Resource`\s.
"""
path = self._get_resource_path(resource_name)
if not os.path.exists(path):
raise exceptions.ResourceNotFoundError(
'Resource <%s> does not exist' % resource_name)
elif os.path.isfile(path):
return File(self, resource_name)
elif os.path.isdir(path):
return Folder(self, resource_name)
else:
raise exceptions.ResourceNotFoundError('Unknown resource '
+ resource_name)
def validate(self, folder):
"""Validate files and folders contained in this folder
It validates all of the files and folders contained in this
folder if some observers are interested in them.
"""
for observer in list(self.observers):
observer.validate(folder)
def add_observer(self, observer):
"""Register a `ResourceObserver`
See `FilteredResourceObserver`.
"""
self.observers.append(observer)
def remove_observer(self, observer):
"""Remove a registered `ResourceObserver`"""
if observer in self.observers:
self.observers.remove(observer)
def do(self, changes, task_handle=taskhandle.NullTaskHandle()):
"""Apply the changes in a `ChangeSet`
Most of the time you call this function for committing the
changes for a refactoring.
"""
self.history.do(changes, task_handle=task_handle)
def get_pycore(self):
return self.pycore
def get_file(self, path):
"""Get the file with `path` (it may not exist)"""
return File(self, path)
def get_folder(self, path):
"""Get the folder with `path` (it may not exist)"""
return Folder(self, path)
def is_ignored(self, resource):
return False
def get_prefs(self):
return self.prefs
def _get_resource_path(self, name):
pass
@property
@utils.saveit
def history(self):
return history.History(self)
@property
@utils.saveit
def pycore(self):
return pycore.PyCore(self)
def close(self):
warnings.warn('Cannot close a NoProject',
DeprecationWarning, stacklevel=2)
ropefolder = None
class Project(_Project):
"""A Project containing files and folders"""
def __init__(self, projectroot, fscommands=None,
ropefolder='.ropeproject', **prefs):
"""A rope project
:parameters:
- `projectroot`: The address of the root folder of the project
- `fscommands`: Implements the file system operations used
by rope; have a look at `rope.base.fscommands`
- `ropefolder`: The name of the folder in which rope stores
project configurations and data. Pass `None` for not using
such a folder at all.
- `prefs`: Specify project preferences. These values
overwrite config file preferences.
"""
if projectroot != '/':
projectroot = _realpath(projectroot).rstrip('/\\')
self._address = projectroot
self._ropefolder_name = ropefolder
if not os.path.exists(self._address):
os.mkdir(self._address)
elif not os.path.isdir(self._address):
raise exceptions.RopeError('Project root exists and'
' is not a directory')
if fscommands is None:
fscommands = rope.base.fscommands.create_fscommands(self._address)
super(Project, self).__init__(fscommands)
self.ignored = _ResourceMatcher()
self.file_list = _FileListCacher(self)
self.prefs.add_callback('ignored_resources', self.ignored.set_patterns)
if ropefolder is not None:
self.prefs['ignored_resources'] = [ropefolder]
self._init_prefs(prefs)
def get_files(self):
return self.file_list.get_files()
def _get_resource_path(self, name):
return os.path.join(self._address, *name.split('/'))
def _init_ropefolder(self):
if self.ropefolder is not None:
if not self.ropefolder.exists():
self._create_recursively(self.ropefolder)
if not self.ropefolder.has_child('config.py'):
config = self.ropefolder.create_file('config.py')
config.write(self._default_config())
def _create_recursively(self, folder):
if folder.parent != self.root and not folder.parent.exists():
self._create_recursively(folder.parent)
folder.create()
def _init_prefs(self, prefs):
run_globals = {}
if self.ropefolder is not None:
config = self.get_file(self.ropefolder.path + '/config.py')
run_globals.update({'__name__': '__main__',
'__builtins__': __builtins__,
'__file__': config.real_path})
if config.exists():
config = self.ropefolder.get_child('config.py')
execfile(config.real_path, run_globals)
else:
exec(self._default_config(), run_globals)
if 'set_prefs' in run_globals:
run_globals['set_prefs'](self.prefs)
for key, value in prefs.items():
self.prefs[key] = value
self._init_other_parts()
self._init_ropefolder()
if 'project_opened' in run_globals:
run_globals['project_opened'](self)
def _default_config(self):
import rope.base.default_config
import inspect
return inspect.getsource(rope.base.default_config)
def _init_other_parts(self):
# Forcing the creation of `self.pycore` to register observers
self.pycore
def is_ignored(self, resource):
return self.ignored.does_match(resource)
def sync(self):
"""Closes project open resources"""
self.close()
def close(self):
"""Closes project open resources"""
self.data_files.write()
def set(self, key, value):
"""Set the `key` preference to `value`"""
self.prefs.set(key, value)
@property
def ropefolder(self):
if self._ropefolder_name is not None:
return self.get_folder(self._ropefolder_name)
def validate(self, folder=None):
if folder is None:
folder = self.root
super(Project, self).validate(folder)
root = property(lambda self: self.get_resource(''))
address = property(lambda self: self._address)
class NoProject(_Project):
"""A null object for holding out of project files.
This class is singleton use `get_no_project` global function
"""
def __init__(self):
fscommands = rope.base.fscommands.FileSystemCommands()
super(NoProject, self).__init__(fscommands)
def _get_resource_path(self, name):
real_name = name.replace('/', os.path.sep)
return _realpath(real_name)
def get_resource(self, name):
universal_name = _realpath(name).replace(os.path.sep, '/')
return super(NoProject, self).get_resource(universal_name)
def get_files(self):
return []
_no_project = None
def get_no_project():
if NoProject._no_project is None:
NoProject._no_project = NoProject()
return NoProject._no_project
class _FileListCacher(object):
def __init__(self, project):
self.project = project
self.files = None
rawobserver = ResourceObserver(
self._changed, self._invalid, self._invalid,
self._invalid, self._invalid)
self.project.add_observer(rawobserver)
def get_files(self):
if self.files is None:
self.files = set()
self._add_files(self.project.root)
return self.files
def _add_files(self, folder):
for child in folder.get_children():
if child.is_folder():
self._add_files(child)
elif not self.project.is_ignored(child):
self.files.add(child)
def _changed(self, resource):
if resource.is_folder():
self.files = None
def _invalid(self, resource, new_resource=None):
self.files = None
class _DataFiles(object):
def __init__(self, project):
self.project = project
self.hooks = []
def read_data(self, name, compress=False, import_=False):
if self.project.ropefolder is None:
return None
compress = compress and self._can_compress()
opener = self._get_opener(compress)
file = self._get_file(name, compress)
if not compress and import_:
self._import_old_files(name)
if file.exists():
input = opener(file.real_path, 'rb')
try:
result = []
try:
while True:
result.append(pickle.load(input))
except EOFError:
pass
if len(result) == 1:
return result[0]
if len(result) > 1:
return result
finally:
input.close()
def write_data(self, name, data, compress=False):
if self.project.ropefolder is not None:
compress = compress and self._can_compress()
file = self._get_file(name, compress)
opener = self._get_opener(compress)
output = opener(file.real_path, 'wb')
try:
pickle.dump(data, output, 2)
finally:
output.close()
def add_write_hook(self, hook):
self.hooks.append(hook)
def write(self):
for hook in self.hooks:
hook()
def _can_compress(self):
try:
import gzip
return True
except ImportError:
return False
def _import_old_files(self, name):
old = self._get_file(name + '.pickle', False)
new = self._get_file(name, False)
if old.exists() and not new.exists():
shutil.move(old.real_path, new.real_path)
def _get_opener(self, compress):
if compress:
try:
import gzip
return gzip.open
except ImportError:
pass
return open
def _get_file(self, name, compress):
path = self.project.ropefolder.path + '/' + name
if compress:
path += '.gz'
return self.project.get_file(path)
def _realpath(path):
"""Return the real path of `path`
Is equivalent to ``realpath(abspath(expanduser(path)))``.
"""
# there is a bug in cygwin for os.path.abspath() for abs paths
if sys.platform == 'cygwin':
if path[1:3] == ':\\':
return path
return os.path.abspath(os.path.expanduser(path))
return os.path.realpath(os.path.abspath(os.path.expanduser(path)))
| gpl-3.0 | -1,300,507,421,648,576,300 | 30.936 | 79 | 0.580745 | false |
giovannimanzoni/project2 | test/RS485/python/readFromRS485.py | 2 | 1384 | #!/usr/bin/env python
#
# Python sample application that reads from the raspicomm's RS-485 Port
#
# Thanks to Acmesystems, program edited by Giovanni Manzoni @ HardElettroSoft
#
# 9600 8N1 flow control Xon/Xoff
#
import array
import serial
maxReadCount=10
readBuffer = array.array('c')
print('this sample application reads from the rs-485 port')
# open the port
print('opening device /dev/ttys1')
try:
ser = serial.Serial(port='/dev/ttyS1', baudrate=9600) # or ttyS2
except:
print('failed.')
print('possible causes:')
print('1) the raspicomm device driver is not loaded. type \'lsmod\' and verify that you \'raspicommrs485\' is loaded.')
print('2) the raspicomm device driver is in use. Is another application using the device driver?')
print('3) something went wrong when loading the device driver. type \'dmesg\' and check the kernel messages')
exit()
print('successful.')
# read in a loop
print('start reading from the rs-485 port a maximum of ' + str(maxReadCount) + ' bytes')
readCount=0
i=0
while readCount < maxReadCount:
readBuffer.append(ser.read(1))
readCount=readCount+1
# print the received bytes
print('we received the following bytes:')
val=ord(readBuffer[i])
hx=''
if val >= 32 and val <= 126:
hx=' - \'{0}\''.format(readBuffer[i])
print('[{0:d}]: 0x{1:x}{2}'.format(i, val, hx))
i=i+1
| cc0-1.0 | 1,092,960,362,548,122,100 | 28.446809 | 123 | 0.684249 | false |
FrankBian/kuma | kuma/humans/models.py | 5 | 2264 | from __future__ import with_statement
import json
import os
import subprocess
import urllib
from django.conf import settings
GITHUB_REPOS = "https://api.github.com/repos/mozilla/kuma/contributors"
class Human(object):
def __init__(self):
self.name = None
self.website = None
class HumansTXT(object):
def generate_file(self):
githubbers = self.get_github(json.load(urllib.urlopen(GITHUB_REPOS)))
localizers = self.get_mdn()
path = os.path.join(settings.HUMANSTXT_ROOT, "humans.txt")
with open(path, 'w') as target:
self.write_to_file(githubbers, target,
"Contributors on Github", "Developer")
self.write_to_file(localizers, target,
"Localization Contributors", "Localizer")
def write_to_file(self, humans, target, message, role):
target.write("%s \n" % message)
for h in humans:
target.write("%s: %s \n" %
(role, h.name.encode('ascii', 'ignore')))
if h.website is not None:
target.write("Website: %s \n" % h.website)
target.write('\n')
target.write('\n')
def get_github(self, data=None):
if not data:
raw_data = json.load(urllib.urlopen(GITHUB_REPOS))
else:
raw_data = data
humans = []
for contributor in raw_data:
human = Human()
human.name = contributor.get('name', contributor['login'])
human.website = contributor.get('blog', None)
humans.append(human)
return humans
def split_name(self, name):
if '@' in name:
name = name.split('@')[0]
return name
def get_mdn(self):
p = subprocess.Popen("svn log --quiet http://svn.mozilla.org/projects/\
mdn/trunk/locale/ | grep '^r' | awk '{print $3}' | sort | uniq",
shell=True, stdout=subprocess.PIPE)
localizers_list = p.communicate()[0].rstrip().split('\n', -1)
humans = []
for localizer in localizers_list:
human = Human()
human.name = self.split_name(localizer)
humans.append(human)
return humans
| mpl-2.0 | 4,215,389,603,359,651,000 | 29.186667 | 79 | 0.555654 | false |
ravello/ansible | v2/ansible/plugins/lookup/flattened.py | 60 | 2408 | # (c) 2013, Serge van Ginderachter <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
def _check_list_of_one_list(self, term):
# make sure term is not a list of one (list of one..) item
# return the final non list item if so
if isinstance(term,list) and len(term) == 1:
term = term[0]
if isinstance(term,list):
term = self._check_list_of_one_list(term)
return term
def _do_flatten(self, terms, variables):
ret = []
for term in terms:
term = self._check_list_of_one_list(term)
if term == 'None' or term == 'null':
# ignore undefined items
break
if isinstance(term, basestring):
# convert a variable to a list
term2 = listify_lookup_plugin_terms(term, variables, loader=self._loader)
# but avoid converting a plain string to a list of one string
if term2 != [ term ]:
term = term2
if isinstance(term, list):
# if it's a list, check recursively for items that are a list
term = self._do_flatten(term, variables)
ret.extend(term)
else:
ret.append(term)
return ret
def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
raise AnsibleError("with_flattened expects a list")
return self._do_flatten(terms, variables)
| gpl-3.0 | -4,433,044,808,361,023,500 | 33.4 | 89 | 0.627076 | false |
openpaperwork/paperwork | paperwork-gtk/src/paperwork/frontend/beacon/__init__.py | 1 | 7272 | import datetime
import dateutil.parser
import http
import http.client
import json
import logging
import multiprocessing
import os
import platform
import ssl
import re
import threading
import urllib
logger = logging.getLogger(__name__)
class Beacon(object):
USER_AGENT = "Paperwork"
UPDATE_CHECK_INTERVAL = datetime.timedelta(days=7)
POST_STATISTICS_INTERVAL = datetime.timedelta(days=7)
SSL_CONTEXT = ssl._create_unverified_context()
GITHUB_RELEASES = {
'host': 'api.github.com',
'path': '/repos/openpaperwork/paperwork/releases',
}
OPENPAPERWORK_RELEASES = {
'host': os.getenv("OPENPAPER_SERVER", 'openpaper.work'),
'path': '/beacon/latest',
}
OPENPAPERWORK_STATS = {
'host': os.getenv("OPENPAPER_SERVER", 'openpaper.work'),
'path': '/beacon/post_statistics',
}
PROTOCOL = os.getenv("OPENPAPER_PROTOCOL", "https")
def __init__(self, config, flatpak):
super().__init__()
self.config = config
self.flatpak = flatpak
def get_version_github(self):
logger.info("Querying GitHub ...")
h = http.client.HTTPSConnection(
host=self.GITHUB_RELEASES['host'],
)
h.request('GET', url=self.GITHUB_RELEASES['path'], headers={
'User-Agent': self.USER_AGENT
})
r = h.getresponse()
r = r.read().decode('utf-8')
r = json.loads(r)
last_tag_date = None
last_tag_name = None
for release in r:
date = dateutil.parser.parse(release['created_at'])
tag = release['tag_name']
if not re.match("\d+\.\d+(|\.\d+)", tag):
continue
if last_tag_date is None or last_tag_date < date:
last_tag_date = date
last_tag_name = tag
return last_tag_name
def get_version_openpaperwork(self):
logger.info("Querying OpenPaper.work ...")
if self.PROTOCOL == "http":
h = http.client.HTTPConnection(
host=self.OPENPAPERWORK_RELEASES['host'],
)
else:
h = http.client.HTTPSConnection(
host=self.OPENPAPERWORK_RELEASES['host'],
context=self.SSL_CONTEXT
)
h.request('GET', url=self.OPENPAPERWORK_RELEASES['path'], headers={
'User-Agent': self.USER_AGENT
})
r = h.getresponse()
r = r.read().decode('utf-8')
r = json.loads(r)
return r['paperwork'][os.name]
def check_update(self):
if not self.config['check_for_update'].value:
logger.info("Update checking is disabled")
return
now = datetime.datetime.now()
last_check = self.config['last_update_check'].value
logger.info("Updates were last checked: {}".format(last_check))
if (last_check is not None and
last_check + self.UPDATE_CHECK_INTERVAL >= now):
logger.info("No need to check for new updates yet")
return
logger.info("Checking for updates ...")
version = None
try:
version = self.get_version_openpaperwork()
except Exception as exc:
logger.exception(
"Failed to get latest Paperwork release from OpenPaper.work. "
"Falling back to Github ...",
exc_info=exc
)
if version is None:
try:
version = self.get_version_github()
except Exception as exc:
logger.exception(
"Failed to get latest Paperwork from Github",
exc_info=exc
)
if version is None:
return
logger.info("Latest Paperwork release: {}".format(version))
self.config['last_update_found'].value = version
self.config['last_update_check'].value = now
self.config.write()
def get_statistics(self, version, docsearch):
distribution = platform.linux_distribution()
if distribution[0] == '':
distribution = platform.win32_ver()
processor = ""
os_name = os.name
if os_name != 'nt': # contains too much infos on Windows
processor = platform.processor()
if self.flatpak:
os_name += " (flatpak)"
return {
'uuid': int(self.config['uuid'].value),
'paperwork_version': str(version),
'nb_documents': int(docsearch.nb_docs),
'os_name': str(os_name),
'platform_architecture': str(platform.architecture()),
'platform_processor': str(processor),
'platform_distribution': str(distribution),
'cpu_count': int(multiprocessing.cpu_count()),
}
def send_statistics(self, version, docsearch):
if not self.config['send_statistics'].value:
logger.info("Anonymous statistics are disabled")
return
now = datetime.datetime.now()
last_post = self.config['last_statistics_post'].value
logger.info("Statistics were last posted: {}".format(last_post))
logger.info("Next post date: {}".format(
last_post + self.POST_STATISTICS_INTERVAL))
logger.info("Now: {}".format(now))
if (last_post is not None and
last_post + self.POST_STATISTICS_INTERVAL >= now):
logger.info("No need to post statistics")
return
logger.info("Sending anonymous statistics ...")
stats = self.get_statistics(version, docsearch)
logger.info("Statistics: {}".format(stats))
logger.info("Posting statistics on openpaper.work ...")
if self.PROTOCOL == "http":
h = http.client.HTTPConnection(
host=self.OPENPAPERWORK_STATS['host'],
)
else:
h = http.client.HTTPSConnection(
host=self.OPENPAPERWORK_STATS['host'],
context=self.SSL_CONTEXT
)
h.request('POST', url=self.OPENPAPERWORK_STATS['path'], headers={
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
'User-Agent': self.USER_AGENT,
}, body=urllib.parse.urlencode({
'statistics': json.dumps(stats),
}))
r = h.getresponse()
logger.info("Getting reply from openpaper.work ({})".format(r.status))
reply = r.read().decode('utf-8')
if r.status == http.client.OK:
logger.info("Openpaper.work replied: {} | {}".format(
r.status, r.reason
))
else:
logger.warning("Openpaper.work replied: {} | {}".format(
r.status, r.reason
))
logger.warning("Openpaper.work: {}".format(reply))
self.config['last_statistics_post'].value = now
self.config.write()
def check_update(beacon):
thread = threading.Thread(target=beacon.check_update)
thread.start()
def send_statistics(beacon, version, docsearch):
thread = threading.Thread(target=beacon.send_statistics, kwargs={
'version': version,
'docsearch': docsearch,
})
thread.start()
| gpl-3.0 | -85,576,949,341,282,530 | 32.823256 | 78 | 0.561881 | false |
atodorov/dnf-plugins-core | plugins/needs_restarting.py | 2 | 6405 | # needs_restarting.py
# DNF plugin to check for running binaries in a need of restarting.
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# the mechanism of scanning smaps for opened files and matching them back to
# packages is heavily inspired by the original needs-restarting.py:
# http://yum.baseurl.org/gitweb?p=yum-utils.git;a=blob;f=needs-restarting.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from dnfpluginscore import logger, _
import dnf
import dnf.cli
import dnfpluginscore
import functools
import os
import re
import stat
def list_opened_files(uid):
for (pid, smaps) in list_smaps():
try:
if uid is not None and uid != owner_uid(smaps):
continue
with open(smaps, 'r') as smaps_file:
lines = smaps_file.readlines()
except EnvironmentError:
logger.warning("Failed to read PID %d's smaps.", pid)
continue
for line in lines:
ofile = smap2opened_file(pid, line)
if ofile is not None:
yield ofile
def list_smaps():
for dir_ in os.listdir('/proc'):
try:
pid = int(dir_)
except ValueError:
continue
smaps = '/proc/%d/smaps' % pid
yield (pid, smaps)
def memoize(func):
sentinel = object()
cache = {}
def wrapper(param):
val = cache.get(param, sentinel)
if val is not sentinel:
return val
val = func(param)
cache[param] = val
return val
return wrapper
def owner_uid(fname):
return os.stat(fname)[stat.ST_UID]
def owning_package(sack, fname):
matches = sack.query().filter(file=fname).run()
if matches:
return matches[0]
return None
def parse_args(args):
parser = dnfpluginscore.ArgumentParser(NeedsRestartingCommand.aliases[0])
parser.add_argument('-u', '--useronly', action='store_true',
help=_("only consider this user's processes"))
return parser.parse_args(args)
def print_cmd(pid):
cmdline = '/proc/%d/cmdline' % pid
with open(cmdline) as cmdline_file:
command = dnf.i18n.ucd(cmdline_file.read())
command = ' '.join(command.split('\000'))
print('%d : %s' % (pid, command))
def smap2opened_file(pid, line):
slash = line.find('/')
if slash < 0:
return None
if line.find('00:') >= 0:
# not a regular file
return None
fn = line[slash:].strip()
suffix_index = fn.rfind(' (deleted)')
if suffix_index < 0:
return OpenedFile(pid, fn, False)
else:
return OpenedFile(pid, fn[:suffix_index], True)
class OpenedFile(object):
RE_TRANSACTION_FILE = re.compile('^(.+);[0-9A-Fa-f]{8,}$')
def __init__(self, pid, name, deleted):
self.deleted = deleted
self.name = name
self.pid = pid
@property
def presumed_name(self):
"""Calculate the name of the file pre-transaction.
In case of a file that got deleted during the transactionm, possibly
just because of an upgrade to a newer version of the same file, RPM
renames the old file to the same name with a hexadecimal suffix just
before delting it.
"""
if self.deleted:
match = self.RE_TRANSACTION_FILE.match(self.name)
if match:
return match.group(1)
return self.name
class ProcessStart(object):
def __init__(self):
self.boot_time = self.get_boot_time()
self.sc_clk_tck = self.get_sc_clk_tck()
@staticmethod
def get_boot_time():
with open('/proc/stat') as stat_file:
for line in stat_file.readlines():
if not line.startswith('btime '):
continue
return int(line[len('btime '):].strip())
@staticmethod
def get_sc_clk_tck():
return os.sysconf(os.sysconf_names['SC_CLK_TCK'])
def __call__(self, pid):
stat_fn = '/proc/%d/stat' % pid
with open(stat_fn) as stat_file:
stats = stat_file.read().strip().split()
ticks_after_boot = int(stats[21])
secs_after_boot = ticks_after_boot // self.sc_clk_tck
return self.boot_time + secs_after_boot
class NeedsRestarting(dnf.Plugin):
name = 'needs-restarting'
def __init__(self, base, cli):
super(NeedsRestarting, self).__init__(base, cli)
if cli is None:
return
cli.register_command(NeedsRestartingCommand)
class NeedsRestartingCommand(dnf.cli.Command):
aliases = ('needs-restarting',)
summary = _('determine updated binaries that need restarting')
usage = ''
def configure(self, _):
demands = self.cli.demands
demands.sack_activation = True
def run(self, args):
opts = parse_args(args)
process_start = ProcessStart()
owning_pkg_fn = functools.partial(owning_package, self.base.sack)
owning_pkg_fn = memoize(owning_pkg_fn)
stale_pids = set()
uid = os.geteuid() if opts.useronly else None
for ofile in list_opened_files(uid):
pkg = owning_pkg_fn(ofile.presumed_name)
if pkg is None:
continue
if pkg.installtime > process_start(ofile.pid):
stale_pids.add(ofile.pid)
for pid in sorted(stale_pids):
print_cmd(pid)
| gpl-2.0 | -7,182,448,113,677,368,000 | 29.942029 | 77 | 0.624044 | false |
zx8/youtube-dl | youtube_dl/extractor/dump.py | 120 | 1036 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class DumpIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?dump\.com/(?P<id>[a-zA-Z0-9]+)/'
_TEST = {
'url': 'http://www.dump.com/oneus/',
'md5': 'ad71704d1e67dfd9e81e3e8b42d69d99',
'info_dict': {
'id': 'oneus',
'ext': 'flv',
'title': "He's one of us.",
'thumbnail': 're:^https?://.*\.jpg$',
},
}
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r's1.addVariable\("file",\s*"([^"]+)"', webpage, 'video URL')
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
}
| unlicense | -4,632,474,669,795,308,000 | 25.564103 | 73 | 0.506757 | false |
songfj/calibre | src/calibre/ebooks/markdown/extensions/toc.py | 46 | 8336 | """
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import etree
from .headerid import slugify, unique, itertext
import re
def order_toc_list(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
if not remaining_list:
return [], []
current = remaining_list.pop(0)
if not 'children' in current.keys():
current['children'] = []
if not prev_elements:
# This happens for instance with [8, 1, 1], ie. when some
# header level is outside a scope. We treat it as a
# top-level
next_elements, children = build_correct(remaining_list, [current])
current['children'].append(children)
return [current] + next_elements, []
prev_element = prev_elements.pop()
children = []
next_elements = []
# Is current part of the child list or next list?
if current['level'] > prev_element['level']:
#print "%d is a child of %d" % (current['level'], prev_element['level'])
prev_elements.append(prev_element)
prev_elements.append(current)
prev_element['children'].append(current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children += children2
next_elements += next_elements2
else:
#print "%d is ancestor of %d" % (current['level'], prev_element['level'])
if not prev_elements:
#print "No previous elements, so appending to the next set"
next_elements.append(current)
prev_elements = [current]
next_elements2, children2 = build_correct(remaining_list, prev_elements)
current['children'].extend(children2)
else:
#print "Previous elements, comparing to those first"
remaining_list.insert(0, current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children.extend(children2)
next_elements += next_elements2
return next_elements, children
ordered_list, __ = build_correct(toc_list)
return ordered_list
class TocTreeprocessor(Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def add_anchor(self, c, elem_id): #@ReservedAssignment
if self.use_anchors:
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c.getchildren():
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def build_toc_etree(self, div, toc_list):
# Add title to the div
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
header_rgx = re.compile("[Hh][123456]")
self.use_anchors = self.config["anchorlink"] in [1, '1', True, 'True', 'true']
# Get a list of id attributes
used_ids = set()
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.add(c.attrib["id"])
toc_list = []
marker_found = False
for (p, c) in self.iterparent(doc):
text = ''.join(itertext(c)).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
# Do not override pre-existing ids
if not "id" in c.attrib:
elem_id = unique(self.config["slugify"](text, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1])
toc_list.append({'level': tag_level,
'id': elem_id,
'name': text})
self.add_anchor(c, elem_id)
toc_list_nested = order_toc_list(toc_list)
self.build_toc_etree(div, toc_list_nested)
prettify = self.markdown.treeprocessors.get('prettify')
if prettify: prettify.run(div)
if not marker_found:
# serialize and attach to markdown instance.
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, configs=[]):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [slugify,
"Function to generate anchors based on header text-"
"Defaults to the headerid ext's slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md)
tocext.config = self.getConfigs()
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "_end")
def makeExtension(configs={}):
return TocExtension(configs=configs)
| gpl-3.0 | -211,858,965,639,474,370 | 36.719457 | 88 | 0.523273 | false |
40423107/2017springcd_hw | theme/pelican-bootstrap3_local/static/glow/primitive.py | 161 | 4838 | from javascript import JSConstructor, JSObject
from .vector import vec
class primitive:
def __init__(self, prim, **kwargs):
for _key in kwargs.keys():
if isinstance(kwargs[_key], vec):
kwargs[_key]=kwargs[_key]._vec
self._prim=prim(kwargs)
def rotate(self, **kwargs):
if 'axis' in kwargs:
#for now lets assume axis is a vector
kwargs['axis']=kwargs['axis']._vec
self._prim.rotate(kwargs)
@property
def pos(self):
_v=vec()
_v._set_vec(self._prim.pos)
return _v
@pos.setter
def pos(self, value):
if isinstance(value, vec):
self._prim.pos=value._vec
else:
print("Error! pos must be a vector")
@property
def color(self):
_v=vec()
_v._set_vec(self._prim.color)
return _v
@color.setter
def color(self, value):
if isinstance(value, vec):
self._prim.color=value._vec
else:
print("Error! color must be a vec")
@property
def axis(self):
_v=vec()
_v._set_vec(self._prim.axis)
return _v
@axis.setter
def axis(self, value):
if isinstance(value, vec):
self._prim.axis=value._vec
else:
print("Error! axis must be a vec")
@property
def size(self):
return self._prim.size
@size.setter
def size(self, value):
self._prim.size=value
@property
def up(self):
_v=vec()
_v._set_vec(self._prim.up)
return _v
@up.setter
def up(self, value):
if isinstance(value, vec):
self._prim.up=value._vec
else:
print("Error! up must be a vec")
@property
def opacity(self):
return self._prim.opacity
@opacity.setter
def opacity(self, value):
self._prim.opacity=value
@property
def shininess(self):
return self._prim.shininess
@shininess.setter
def shininess(self, value):
self._prim.shininess=value
@property
def emissive(self):
return self._prim.emissive
@emissive.setter
def emissive(self, value):
self._prim.emissive=value
@property
def texture(self):
return self._prim.texture
@texture.setter
def texture(self, **kwargs):
self._prim.texture=kwargs
@property
def visible(self):
return self._prim.visible
@visible.setter
def visible(self, flag):
assert isinstance(flag, bool)
self._prim.visble=flag
class arrow(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.arrow), **kwargs)
class box(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.box), **kwargs)
class cone(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.cone), **kwargs)
class curve(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.curve), **kwargs)
def push(self, v):
if isinstance(v, vec):
self._prim.push(v._vec)
elif isinstance(v, dict):
for _key in v.keys():
if isinstance(_key, vec):
v[_key]=v[_key]._vec
self._prim.push(v)
def append(self, v):
self.push(v)
class cylinder(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.cylinder), **kwargs)
class helix(cylinder):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.helix), **kwargs)
class pyramid(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.pyramid), **kwargs)
#class ring(curve):
class sphere(primitive):
def __init__(self, **kwargs):
primitive.__init__(self, JSConstructor(glowscript.sphere), **kwargs)
#triangle
#class triangle:
# def __init__(self, **kwargs):
# self._tri = JSConstructor(glowscript.triangle)(kwargs)
#vertex
#class vertex:
# def __init__(self, **kwargs):
# self._ver = JSConstructor(glowscript.vertex)(kwargs)
#quad
#compound
#class compound(box):
# def __init__(self, **kwargs):
# box.__init__(self, kwargs)
# I'm not sure if the declarations below are correct. Will fix later.
class distinct_light:
def __init__(self, **kwargs):
self._dl=JSConstructor(glowscript.distant_light)(kwargs)
class local_light:
def __init__(self, **kwargs):
self._ll=JSConstructor(glowscript.local_light)(kwargs)
class draw:
def __init__(self, **kwargs):
self._draw=JSConstructor(glowscript.draw)(kwargs)
class label:
def __init__(self, **kwargs):
self._label=JSConstructor(glowscript.label)(kwargs)
def attach_trail(object, **kwargs):
if isinstance(object, primitive):
JSObject(glowscript.attach_trail)(object._prim, kwargs)
else:
JSObject(glowscript.attach_trail)(object, kwargs)
| agpl-3.0 | -5,092,828,974,536,535,000 | 22.038095 | 76 | 0.621124 | false |
Emergya/icm-openedx-educamadrid-platform-basic | cms/djangoapps/models/settings/course_grading.py | 143 | 8907 | from datetime import timedelta
from xmodule.modulestore.django import modulestore
class CourseGradingModel(object):
"""
Basically a DAO and Model combo for CRUD operations pertaining to grading policy.
"""
# Within this class, allow access to protected members of client classes.
# This comes up when accessing kvs data and caches during kvs saves and modulestore writes.
def __init__(self, course_descriptor):
self.graders = [
CourseGradingModel.jsonize_grader(i, grader) for i, grader in enumerate(course_descriptor.raw_grader)
] # weights transformed to ints [0..100]
self.grade_cutoffs = course_descriptor.grade_cutoffs
self.grace_period = CourseGradingModel.convert_set_grace_period(course_descriptor)
self.minimum_grade_credit = course_descriptor.minimum_grade_credit
@classmethod
def fetch(cls, course_key):
"""
Fetch the course grading policy for the given course from persistence and return a CourseGradingModel.
"""
descriptor = modulestore().get_course(course_key)
model = cls(descriptor)
return model
@staticmethod
def fetch_grader(course_key, index):
"""
Fetch the course's nth grader
Returns an empty dict if there's no such grader.
"""
descriptor = modulestore().get_course(course_key)
index = int(index)
if len(descriptor.raw_grader) > index:
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
# return empty model
else:
return {"id": index,
"type": "",
"min_count": 0,
"drop_count": 0,
"short_label": None,
"weight": 0
}
@staticmethod
def update_from_json(course_key, jsondict, user):
"""
Decode the json into CourseGradingModel and save any changes. Returns the modified model.
Probably not the usual path for updates as it's too coarse grained.
"""
descriptor = modulestore().get_course(course_key)
graders_parsed = [CourseGradingModel.parse_grader(jsonele) for jsonele in jsondict['graders']]
descriptor.raw_grader = graders_parsed
descriptor.grade_cutoffs = jsondict['grade_cutoffs']
modulestore().update_item(descriptor, user.id)
CourseGradingModel.update_grace_period_from_json(course_key, jsondict['grace_period'], user)
CourseGradingModel.update_minimum_grade_credit_from_json(course_key, jsondict['minimum_grade_credit'], user)
return CourseGradingModel.fetch(course_key)
@staticmethod
def update_grader_from_json(course_key, grader, user):
"""
Create or update the grader of the given type (string key) for the given course. Returns the modified
grader which is a full model on the client but not on the server (just a dict)
"""
descriptor = modulestore().get_course(course_key)
# parse removes the id; so, grab it before parse
index = int(grader.get('id', len(descriptor.raw_grader)))
grader = CourseGradingModel.parse_grader(grader)
if index < len(descriptor.raw_grader):
descriptor.raw_grader[index] = grader
else:
descriptor.raw_grader.append(grader)
modulestore().update_item(descriptor, user.id)
return CourseGradingModel.jsonize_grader(index, descriptor.raw_grader[index])
@staticmethod
def update_cutoffs_from_json(course_key, cutoffs, user):
"""
Create or update the grade cutoffs for the given course. Returns sent in cutoffs (ie., no extra
db fetch).
"""
descriptor = modulestore().get_course(course_key)
descriptor.grade_cutoffs = cutoffs
modulestore().update_item(descriptor, user.id)
return cutoffs
@staticmethod
def update_grace_period_from_json(course_key, graceperiodjson, user):
"""
Update the course's default grace period. Incoming dict is {hours: h, minutes: m} possibly as a
grace_period entry in an enclosing dict. It is also safe to call this method with a value of
None for graceperiodjson.
"""
descriptor = modulestore().get_course(course_key)
# Before a graceperiod has ever been created, it will be None (once it has been
# created, it cannot be set back to None).
if graceperiodjson is not None:
if 'grace_period' in graceperiodjson:
graceperiodjson = graceperiodjson['grace_period']
grace_timedelta = timedelta(**graceperiodjson)
descriptor.graceperiod = grace_timedelta
modulestore().update_item(descriptor, user.id)
@staticmethod
def update_minimum_grade_credit_from_json(course_key, minimum_grade_credit, user):
"""Update the course's default minimum grade requirement for credit.
Args:
course_key(CourseKey): The course identifier
minimum_grade_json(Float): Minimum grade value
user(User): The user object
"""
descriptor = modulestore().get_course(course_key)
# 'minimum_grade_credit' cannot be set to None
if minimum_grade_credit is not None:
minimum_grade_credit = minimum_grade_credit
descriptor.minimum_grade_credit = minimum_grade_credit
modulestore().update_item(descriptor, user.id)
@staticmethod
def delete_grader(course_key, index, user):
"""
Delete the grader of the given type from the given course.
"""
descriptor = modulestore().get_course(course_key)
index = int(index)
if index < len(descriptor.raw_grader):
del descriptor.raw_grader[index]
# force propagation to definition
descriptor.raw_grader = descriptor.raw_grader
modulestore().update_item(descriptor, user.id)
@staticmethod
def delete_grace_period(course_key, user):
"""
Delete the course's grace period.
"""
descriptor = modulestore().get_course(course_key)
del descriptor.graceperiod
modulestore().update_item(descriptor, user.id)
@staticmethod
def get_section_grader_type(location):
descriptor = modulestore().get_item(location)
return {
"graderType": descriptor.format if descriptor.format is not None else 'notgraded',
"location": unicode(location),
}
@staticmethod
def update_section_grader_type(descriptor, grader_type, user):
if grader_type is not None and grader_type != u'notgraded':
descriptor.format = grader_type
descriptor.graded = True
else:
del descriptor.format
del descriptor.graded
modulestore().update_item(descriptor, user.id)
return {'graderType': grader_type}
@staticmethod
def convert_set_grace_period(descriptor):
# 5 hours 59 minutes 59 seconds => converted to iso format
rawgrace = descriptor.graceperiod
if rawgrace:
hours_from_days = rawgrace.days * 24
seconds = rawgrace.seconds
hours_from_seconds = int(seconds / 3600)
hours = hours_from_days + hours_from_seconds
seconds -= hours_from_seconds * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
graceperiod = {'hours': 0, 'minutes': 0, 'seconds': 0}
if hours > 0:
graceperiod['hours'] = hours
if minutes > 0:
graceperiod['minutes'] = minutes
if seconds > 0:
graceperiod['seconds'] = seconds
return graceperiod
else:
return None
@staticmethod
def parse_grader(json_grader):
# manual to clear out kruft
result = {"type": json_grader["type"],
"min_count": int(json_grader.get('min_count', 0)),
"drop_count": int(json_grader.get('drop_count', 0)),
"short_label": json_grader.get('short_label', None),
"weight": float(json_grader.get('weight', 0)) / 100.0
}
return result
@staticmethod
def jsonize_grader(i, grader):
# Warning: converting weight to integer might give unwanted results due
# to the reason how floating point arithmetic works
# e.g, "0.29 * 100 = 28.999999999999996"
return {
"id": i,
"type": grader["type"],
"min_count": grader.get('min_count', 0),
"drop_count": grader.get('drop_count', 0),
"short_label": grader.get('short_label', ""),
"weight": grader.get('weight', 0) * 100,
}
| agpl-3.0 | 8,758,491,024,203,329,000 | 36.1125 | 116 | 0.614348 | false |
HBEE/odoo-addons | stock_picking_locations/__openerp__.py | 3 | 1744 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#######################################################s#######################
{
'name': 'Stock Picking Locations',
'version': '8.0.1.0.0',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock Picking Locations
=======================
Add Location and Destiny Location to stock picking. When stock moves are
created they are taken by default.
Add a button to stock picking to update the stock move Location and Destiny
Location.
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'stock',
],
'data': [
'stock_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,205,728,772,784,360,000 | 31.90566 | 79 | 0.583142 | false |
quantumlib/OpenFermion | src/openfermion/testing/lih_integration_test.py | 1 | 6590 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests many modules to compute energy of LiH."""
import os
import unittest
import numpy
from openfermion.config import DATA_DIRECTORY
from openfermion.chem import MolecularData
from openfermion.transforms.opconversions import (get_fermion_operator,
normal_ordered, jordan_wigner,
reverse_jordan_wigner)
from openfermion.transforms.repconversions import freeze_orbitals
from openfermion.measurements import get_interaction_rdm
from openfermion.linalg import get_sparse_operator, get_ground_state
from openfermion.linalg.sparse_tools import (expectation, jw_hartree_fock_state,
get_density_matrix)
from openfermion.utils.operator_utils import count_qubits
class LiHIntegrationTest(unittest.TestCase):
def setUp(self):
# Set up molecule.
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., 1.45))]
basis = 'sto-3g'
multiplicity = 1
filename = os.path.join(DATA_DIRECTORY, 'H1-Li1_sto-3g_singlet_1.45')
self.molecule = MolecularData(geometry,
basis,
multiplicity,
filename=filename)
self.molecule.load()
# Get molecular Hamiltonian
self.molecular_hamiltonian = self.molecule.get_molecular_hamiltonian()
self.molecular_hamiltonian_no_core = (
self.molecule.get_molecular_hamiltonian(
occupied_indices=[0],
active_indices=range(1, self.molecule.n_orbitals)))
# Get FCI RDM.
self.fci_rdm = self.molecule.get_molecular_rdm(use_fci=1)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get fermion Hamiltonian.
self.fermion_hamiltonian = normal_ordered(
get_fermion_operator(self.molecular_hamiltonian))
# Get qubit Hamiltonian.
self.qubit_hamiltonian = jordan_wigner(self.fermion_hamiltonian)
# Get explicit coefficients.
self.nuclear_repulsion = self.molecular_hamiltonian.constant
self.one_body = self.molecular_hamiltonian.one_body_tensor
self.two_body = self.molecular_hamiltonian.two_body_tensor
# Get matrix form.
self.hamiltonian_matrix = get_sparse_operator(
self.molecular_hamiltonian)
self.hamiltonian_matrix_no_core = get_sparse_operator(
self.molecular_hamiltonian_no_core)
def test_all(self):
# Test reverse Jordan-Wigner.
fermion_hamiltonian = reverse_jordan_wigner(self.qubit_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test mapping to interaction operator.
fermion_hamiltonian = get_fermion_operator(self.molecular_hamiltonian)
fermion_hamiltonian = normal_ordered(fermion_hamiltonian)
self.assertTrue(self.fermion_hamiltonian == fermion_hamiltonian)
# Test RDM energy.
fci_rdm_energy = self.nuclear_repulsion
fci_rdm_energy += numpy.sum(self.fci_rdm.one_body_tensor *
self.one_body)
fci_rdm_energy += numpy.sum(self.fci_rdm.two_body_tensor *
self.two_body)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Confirm expectation on qubit Hamiltonian using reverse JW matches.
qubit_rdm = self.fci_rdm.get_qubit_expectations(self.qubit_hamiltonian)
qubit_energy = 0.0
for term, coefficient in qubit_rdm.terms.items():
qubit_energy += coefficient * self.qubit_hamiltonian.terms[term]
self.assertAlmostEqual(qubit_energy, self.molecule.fci_energy)
# Confirm fermionic RDMs can be built from measured qubit RDMs.
new_fermi_rdm = get_interaction_rdm(qubit_rdm)
new_fermi_rdm.expectation(self.molecular_hamiltonian)
self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy)
# Test sparse matrices.
energy, wavefunction = get_ground_state(self.hamiltonian_matrix)
self.assertAlmostEqual(energy, self.molecule.fci_energy)
expected_energy = expectation(self.hamiltonian_matrix, wavefunction)
self.assertAlmostEqual(expected_energy, energy)
# Make sure you can reproduce Hartree-Fock energy.
hf_state = jw_hartree_fock_state(self.molecule.n_electrons,
count_qubits(self.qubit_hamiltonian))
hf_density = get_density_matrix([hf_state], [1.])
expected_hf_density_energy = expectation(self.hamiltonian_matrix,
hf_density)
expected_hf_energy = expectation(self.hamiltonian_matrix, hf_state)
self.assertAlmostEqual(expected_hf_energy, self.molecule.hf_energy)
self.assertAlmostEqual(expected_hf_density_energy,
self.molecule.hf_energy)
# Check that frozen core result matches frozen core FCI from psi4.
# Recore frozen core result from external calculation.
self.frozen_core_fci_energy = -7.8807607374168
no_core_fci_energy = numpy.linalg.eigh(
self.hamiltonian_matrix_no_core.toarray())[0][0]
self.assertAlmostEqual(no_core_fci_energy, self.frozen_core_fci_energy)
# Check that the freeze_orbitals function has the same effect as the
# as the occupied_indices option of get_molecular_hamiltonian.
frozen_hamiltonian = freeze_orbitals(
get_fermion_operator(self.molecular_hamiltonian), [0, 1])
self.assertTrue(frozen_hamiltonian == get_fermion_operator(
self.molecular_hamiltonian_no_core))
| apache-2.0 | 7,106,191,499,194,213,000 | 46.410072 | 80 | 0.654173 | false |
kpayson64/grpc | examples/python/route_guide/route_guide_pb2_grpc.py | 55 | 4244 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import route_guide_pb2 as route__guide__pb2
class RouteGuideStub(object):
"""Interface exported by the server.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetFeature = channel.unary_unary(
'/routeguide.RouteGuide/GetFeature',
request_serializer=route__guide__pb2.Point.SerializeToString,
response_deserializer=route__guide__pb2.Feature.FromString,
)
self.ListFeatures = channel.unary_stream(
'/routeguide.RouteGuide/ListFeatures',
request_serializer=route__guide__pb2.Rectangle.SerializeToString,
response_deserializer=route__guide__pb2.Feature.FromString,
)
self.RecordRoute = channel.stream_unary(
'/routeguide.RouteGuide/RecordRoute',
request_serializer=route__guide__pb2.Point.SerializeToString,
response_deserializer=route__guide__pb2.RouteSummary.FromString,
)
self.RouteChat = channel.stream_stream(
'/routeguide.RouteGuide/RouteChat',
request_serializer=route__guide__pb2.RouteNote.SerializeToString,
response_deserializer=route__guide__pb2.RouteNote.FromString,
)
class RouteGuideServicer(object):
"""Interface exported by the server.
"""
def GetFeature(self, request, context):
"""A simple RPC.
Obtains the feature at a given position.
A feature with an empty name is returned if there's no feature at the given
position.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListFeatures(self, request, context):
"""A server-to-client streaming RPC.
Obtains the Features available within the given Rectangle. Results are
streamed rather than returned at once (e.g. in a response message with a
repeated field), as the rectangle may cover a large area and contain a
huge number of features.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RecordRoute(self, request_iterator, context):
"""A client-to-server streaming RPC.
Accepts a stream of Points on a route being traversed, returning a
RouteSummary when traversal is completed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RouteChat(self, request_iterator, context):
"""A Bidirectional streaming RPC.
Accepts a stream of RouteNotes sent while a route is being traversed,
while receiving other RouteNotes (e.g. from other users).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouteGuideServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetFeature': grpc.unary_unary_rpc_method_handler(
servicer.GetFeature,
request_deserializer=route__guide__pb2.Point.FromString,
response_serializer=route__guide__pb2.Feature.SerializeToString,
),
'ListFeatures': grpc.unary_stream_rpc_method_handler(
servicer.ListFeatures,
request_deserializer=route__guide__pb2.Rectangle.FromString,
response_serializer=route__guide__pb2.Feature.SerializeToString,
),
'RecordRoute': grpc.stream_unary_rpc_method_handler(
servicer.RecordRoute,
request_deserializer=route__guide__pb2.Point.FromString,
response_serializer=route__guide__pb2.RouteSummary.SerializeToString,
),
'RouteChat': grpc.stream_stream_rpc_method_handler(
servicer.RouteChat,
request_deserializer=route__guide__pb2.RouteNote.FromString,
response_serializer=route__guide__pb2.RouteNote.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'routeguide.RouteGuide', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| apache-2.0 | 1,557,904,399,272,445,200 | 36.557522 | 79 | 0.704995 | false |
MSusik/invenio | invenio/modules/formatter/format_elements/bfe_date_rec.py | 39 | 1053 | ## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints date of the entry of the record in database
"""
__revision__ = "$Id$"
def format_element(bfo):
"""
Date of the entry of the record in the database
@see: date.py
"""
date = bfo.field('909C1c')
return date
| gpl-2.0 | -7,645,262,181,802,193,000 | 35.310345 | 75 | 0.710351 | false |
lmazuel/azure-sdk-for-python | azure-batch/azure/batch/models/pool_add_parameter.py | 1 | 12361 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolAddParameter(Model):
"""A pool in the Azure Batch service to add.
:param id: A string that uniquely identifies the pool within the account.
The ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters. The
ID is case-preserving and case-insensitive (that is, you may not have two
pool IDs within an account that differ only by case).
:type id: str
:param display_name: The display name for the pool. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param vm_size: The size of virtual machines in the pool. All virtual
machines in a pool are the same size. For information about available
sizes of virtual machines for Cloud Services pools (pools created with
cloudServiceConfiguration), see Sizes for Cloud Services
(http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/).
Batch supports all Cloud Services VM sizes except ExtraSmall, A1V2 and
A2V2. For information about available VM sizes for pools using images from
the Virtual Machines Marketplace (pools created with
virtualMachineConfiguration) see Sizes for Virtual Machines (Linux)
(https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/)
or Sizes for Virtual Machines (Windows)
(https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/).
Batch supports all Azure VM sizes except STANDARD_A0 and those with
premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series).
:type vm_size: str
:param cloud_service_configuration: The cloud service configuration for
the pool. This property and virtualMachineConfiguration are mutually
exclusive and one of the properties must be specified. This property
cannot be specified if the Batch account was created with its
poolAllocationMode property set to 'UserSubscription'.
:type cloud_service_configuration:
~azure.batch.models.CloudServiceConfiguration
:param virtual_machine_configuration: The virtual machine configuration
for the pool. This property and cloudServiceConfiguration are mutually
exclusive and one of the properties must be specified.
:type virtual_machine_configuration:
~azure.batch.models.VirtualMachineConfiguration
:param resize_timeout: The timeout for allocation of compute nodes to the
pool. This timeout applies only to manual scaling; it has no effect when
enableAutoScale is set to true. The default value is 15 minutes. The
minimum value is 5 minutes. If you specify a value less than 5 minutes,
the Batch service returns an error; if you are calling the REST API
directly, the HTTP status code is 400 (Bad Request).
:type resize_timeout: timedelta
:param target_dedicated_nodes: The desired number of dedicated compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to true. If enableAutoScale is set to false, then you must set
either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_dedicated_nodes: int
:param target_low_priority_nodes: The desired number of low-priority
compute nodes in the pool. This property must not be specified if
enableAutoScale is set to true. If enableAutoScale is set to false, then
you must set either targetDedicatedNodes, targetLowPriorityNodes, or both.
:type target_low_priority_nodes: int
:param enable_auto_scale: Whether the pool size should automatically
adjust over time. If false, at least one of targetDedicateNodes and
targetLowPriorityNodes must be specified. If true, the autoScaleFormula
property is required and the pool automatically resizes according to the
formula. The default value is false.
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of compute
nodes in the pool. This property must not be specified if enableAutoScale
is set to false. It is required if enableAutoScale is set to true. The
formula is checked for validity before the pool is created. If the formula
is not valid, the Batch service rejects the request with detailed error
information. For more information about specifying this formula, see
'Automatically scale compute nodes in an Azure Batch pool'
(https://azure.microsoft.com/documentation/articles/batch-automatic-scaling/).
:type auto_scale_formula: str
:param auto_scale_evaluation_interval: The time interval at which to
automatically adjust the pool size according to the autoscale formula. The
default value is 15 minutes. The minimum and maximum value are 5 minutes
and 168 hours respectively. If you specify a value less than 5 minutes or
greater than 168 hours, the Batch service returns an error; if you are
calling the REST API directly, the HTTP status code is 400 (Bad Request).
:type auto_scale_evaluation_interval: timedelta
:param enable_inter_node_communication: Whether the pool permits direct
communication between nodes. Enabling inter-node communication limits the
maximum size of the pool due to deployment restrictions on the nodes of
the pool. This may result in the pool not reaching its desired size. The
default value is false.
:type enable_inter_node_communication: bool
:param network_configuration: The network configuration for the pool.
:type network_configuration: ~azure.batch.models.NetworkConfiguration
:param start_task: A task specified to run on each compute node as it
joins the pool. The task runs when the node is added to the pool or when
the node is restarted.
:type start_task: ~azure.batch.models.StartTask
:param certificate_references: The list of certificates to be installed on
each compute node in the pool. For Windows compute nodes, the Batch
service installs the certificates to the specified certificate store and
location. For Linux compute nodes, the certificates are stored in a
directory inside the task working directory and an environment variable
AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this
location. For certificates with visibility of 'remoteUser', a 'certs'
directory is created in the user's home directory (e.g.,
/home/{user-name}/certs) and certificates are placed in that directory.
:type certificate_references:
list[~azure.batch.models.CertificateReference]
:param application_package_references: The list of application packages to
be installed on each compute node in the pool.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param application_licenses: The list of application licenses the Batch
service will make available on each compute node in the pool. The list of
application licenses must be a subset of available Batch service
application licenses. If a license is requested which is not supported,
pool creation will fail.
:type application_licenses: list[str]
:param max_tasks_per_node: The maximum number of tasks that can run
concurrently on a single compute node in the pool. The default value is 1.
The maximum value of this setting depends on the size of the compute nodes
in the pool (the vmSize setting).
:type max_tasks_per_node: int
:param task_scheduling_policy: How tasks are distributed across compute
nodes in a pool.
:type task_scheduling_policy: ~azure.batch.models.TaskSchedulingPolicy
:param user_accounts: The list of user accounts to be created on each node
in the pool.
:type user_accounts: list[~azure.batch.models.UserAccount]
:param metadata: A list of name-value pairs associated with the pool as
metadata. The Batch service does not assign any meaning to metadata; it is
solely for the use of user code.
:type metadata: list[~azure.batch.models.MetadataItem]
"""
_validation = {
'id': {'required': True},
'vm_size': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'cloud_service_configuration': {'key': 'cloudServiceConfiguration', 'type': 'CloudServiceConfiguration'},
'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'},
'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'},
'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'},
'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'},
'enable_auto_scale': {'key': 'enableAutoScale', 'type': 'bool'},
'auto_scale_formula': {'key': 'autoScaleFormula', 'type': 'str'},
'auto_scale_evaluation_interval': {'key': 'autoScaleEvaluationInterval', 'type': 'duration'},
'enable_inter_node_communication': {'key': 'enableInterNodeCommunication', 'type': 'bool'},
'network_configuration': {'key': 'networkConfiguration', 'type': 'NetworkConfiguration'},
'start_task': {'key': 'startTask', 'type': 'StartTask'},
'certificate_references': {'key': 'certificateReferences', 'type': '[CertificateReference]'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'application_licenses': {'key': 'applicationLicenses', 'type': '[str]'},
'max_tasks_per_node': {'key': 'maxTasksPerNode', 'type': 'int'},
'task_scheduling_policy': {'key': 'taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'},
'user_accounts': {'key': 'userAccounts', 'type': '[UserAccount]'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, id, vm_size, display_name=None, cloud_service_configuration=None, virtual_machine_configuration=None, resize_timeout=None, target_dedicated_nodes=None, target_low_priority_nodes=None, enable_auto_scale=None, auto_scale_formula=None, auto_scale_evaluation_interval=None, enable_inter_node_communication=None, network_configuration=None, start_task=None, certificate_references=None, application_package_references=None, application_licenses=None, max_tasks_per_node=None, task_scheduling_policy=None, user_accounts=None, metadata=None):
super(PoolAddParameter, self).__init__()
self.id = id
self.display_name = display_name
self.vm_size = vm_size
self.cloud_service_configuration = cloud_service_configuration
self.virtual_machine_configuration = virtual_machine_configuration
self.resize_timeout = resize_timeout
self.target_dedicated_nodes = target_dedicated_nodes
self.target_low_priority_nodes = target_low_priority_nodes
self.enable_auto_scale = enable_auto_scale
self.auto_scale_formula = auto_scale_formula
self.auto_scale_evaluation_interval = auto_scale_evaluation_interval
self.enable_inter_node_communication = enable_inter_node_communication
self.network_configuration = network_configuration
self.start_task = start_task
self.certificate_references = certificate_references
self.application_package_references = application_package_references
self.application_licenses = application_licenses
self.max_tasks_per_node = max_tasks_per_node
self.task_scheduling_policy = task_scheduling_policy
self.user_accounts = user_accounts
self.metadata = metadata
| mit | 8,355,341,465,042,573,000 | 62.389744 | 558 | 0.719602 | false |
achang97/YouTunes | lib/python2.7/site-packages/pyasn1_modules/rfc2560.py | 5 | 8307 | #
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
# OCSP request/response syntax
#
# Derived from a minimal OCSP library (RFC2560) code written by
# Bud P. Bruegger <[email protected]>
# Copyright: Ancitel, S.p.a, Rome, Italy
# License: BSD
#
#
# current limitations:
# * request and response works only for a single certificate
# * only some values are parsed out of the response
# * the request does't set a nonce nor signature
# * there is no signature validation of the response
# * dates are left as strings in GeneralizedTime format -- datetime.datetime
# would be nicer
#
from pyasn1.type import tag, namedtype, namedval, univ, useful
from pyasn1_modules import rfc2459
# Start of OCSP module definitions
# This should be in directory Authentication Framework (X.509) module
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8),
('privilegeWithdrawn', 9),
('aACompromise', 10)
)
# end of directory Authentication Framework (X.509) module
# This should be in PKIX Certificate Extensions module
class GeneralName(univ.OctetString):
pass
# end of PKIX Certificate Extensions module
id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
class AcceptableResponses(univ.SequenceOf):
componentType = univ.ObjectIdentifier()
class ArchiveCutoff(useful.GeneralizedTime):
pass
class UnknownInfo(univ.Null):
pass
class RevokedInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class CertID(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('issuerNameHash', univ.OctetString()),
namedtype.NamedType('issuerKeyHash', univ.OctetString()),
namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
)
class CertStatus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('good',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('revoked',
RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('unknown',
UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class SingleResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certID', CertID()),
namedtype.NamedType('certStatus', CertStatus()),
namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class KeyHash(univ.OctetString):
pass
class ResponderID(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('byName',
rfc2459.Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('byKey',
KeyHash().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class Version(univ.Integer):
namedValues = namedval.NamedValues(('v1', 0))
class ResponseData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('responderID', ResponderID()),
namedtype.NamedType('producedAt', useful.GeneralizedTime()),
namedtype.NamedType('responses', univ.SequenceOf(componentType=SingleResponse())),
namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class BasicOCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsResponseData', ResponseData()),
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class ResponseBytes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseType', univ.ObjectIdentifier()),
namedtype.NamedType('response', univ.OctetString())
)
class OCSPResponseStatus(univ.Enumerated):
namedValues = namedval.NamedValues(
('successful', 0),
('malformedRequest', 1),
('internalError', 2),
('tryLater', 3),
('undefinedStatus', 4), # should never occur
('sigRequired', 5),
('unauthorized', 6)
)
class OCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseStatus', OCSPResponseStatus()),
namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Request(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('reqCert', CertID()),
namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Signature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class TBSRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('requestList', univ.SequenceOf(componentType=Request())),
namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class OCSPRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsRequest', TBSRequest()),
namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
| mit | -2,406,343,363,187,537,000 | 36.759091 | 118 | 0.688456 | false |
cosmicAsymmetry/zulip | zerver/tests/test_tutorial.py | 32 | 2671 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Dict
from zerver.lib.test_helpers import (
get_user_profile_by_email,
most_recent_message,
)
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.models import (
UserProfile,
)
import ujson
def fix_params(raw_params):
# type: (Dict[str, Any]) -> Dict[str, str]
# A few of our few legacy endpoints need their
# individual parameters serialized as JSON.
return {k: ujson.dumps(v) for k, v in raw_params.items()}
class TutorialTests(ZulipTestCase):
def test_send_message(self):
# type: () -> None
email = '[email protected]'
user = get_user_profile_by_email(email)
self.login(email)
welcome_bot = get_user_profile_by_email("[email protected]")
raw_params = dict(
type='stream',
recipient='Denmark',
topic='welcome',
content='hello'
)
params = fix_params(raw_params)
result = self.client_post("/json/tutorial_send_message", params)
self.assert_json_success(result)
message = most_recent_message(user)
self.assertEqual(message.content, 'hello')
self.assertEqual(message.sender, welcome_bot)
# now test some error cases
result = self.client_post("/json/tutorial_send_message", {})
self.assert_json_error(result, "Missing 'type' argument")
result = self.client_post("/json/tutorial_send_message", raw_params)
self.assert_json_error(result, 'argument "type" is not valid json.')
raw_params = dict(
type='INVALID',
recipient='Denmark',
topic='welcome',
content='hello'
)
params = fix_params(raw_params)
result = self.client_post("/json/tutorial_send_message", params)
self.assert_json_error(result, 'Bad data passed in to tutorial_send_message')
def test_tutorial_status(self):
# type: () -> None
email = '[email protected]'
self.login(email)
cases = [
('started', UserProfile.TUTORIAL_STARTED),
('finished', UserProfile.TUTORIAL_FINISHED),
]
for incoming_status, expected_db_status in cases:
raw_params = dict(status=incoming_status)
params = fix_params(raw_params)
result = self.client_post('/json/tutorial_status', params)
self.assert_json_success(result)
user = get_user_profile_by_email(email)
self.assertEqual(user.tutorial_status, expected_db_status)
| apache-2.0 | -6,931,556,165,873,486,000 | 30.423529 | 85 | 0.611756 | false |
cmelange/ansible | lib/ansible/plugins/action/script.py | 22 | 3738 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if self._play_context.check_mode:
result['skipped'] = True
result['msg'] = 'check mode not supported for this module'
return result
if not tmp:
tmp = self._make_tmp_path()
creates = self._task.args.get('creates')
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
if self._remote_file_exists(creates):
self._remove_tmp_path(tmp)
return dict(skipped=True, msg=("skipped, since %s exists" % creates))
removes = self._task.args.get('removes')
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
if not self._remote_file_exists(removes):
self._remove_tmp_path(tmp)
return dict(skipped=True, msg=("skipped, since %s does not exist" % removes))
# the script name is the first item in the raw params, so we split it
# out now so we know the file name we need to transfer to the remote,
# and everything else is an argument to the script which we need later
# to append to the remote command
parts = self._task.args.get('_raw_params', '').strip().split()
source = parts[0]
args = ' '.join(parts[1:])
try:
source = self._loader.get_real_file(self._find_needle('files', source))
except AnsibleError as e:
return dict(failed=True, msg=to_native(e))
# transfer the file to a remote tmp location
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(source))
self._transfer_file(source, tmp_src)
# set file permissions, more permissive when the copy is done as a different user
self._fixup_perms2((tmp, tmp_src), execute=True)
# add preparation steps to one ssh roundtrip executing the script
env_string = self._compute_environment_string()
script_cmd = ' '.join([env_string, tmp_src, args])
script_cmd = self._connection._shell.wrap_for_exec(script_cmd)
result.update(self._low_level_execute_command(cmd=script_cmd, sudoable=True))
# clean up after
self._remove_tmp_path(tmp)
result['changed'] = True
return result
| gpl-3.0 | -3,229,372,390,476,327,000 | 38.347368 | 93 | 0.64473 | false |
nmercier/linux-cross-gcc | linux/lib/python2.7/dist-packages/numpy/lib/tests/test_nanfunctions.py | 21 | 28230 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
assert_raises, assert_array_equal
)
# Test data
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
np.array([0.1042, -0.5954]),
np.array([0.1610, 0.1859, 0.3146])]
class TestNanFunctions_MinMax(TestCase):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalars
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
# check that rows of nan are dealt with for subclasses (#4628)
mat[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
and not np.isnan(res[2, 0]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat)
assert_(np.isscalar(res))
assert_(res != np.nan)
assert_(len(w) == 0)
class TestNanFunctions_ArgminArgmax(TestCase):
nanfuncs = [np.nanargmin, np.nanargmax]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_result_values(self):
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
for row in _ndat:
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
ind = f(row)
val = row[ind]
# comparing with NaN is tricky as the result
# is always false except for NaN != NaN
assert_(not np.isnan(val))
assert_(not fcmp(val, row).any())
assert_(not np.equal(val, row[:ind]).any())
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
assert_raises(ValueError, f, mat, axis=axis)
assert_raises(ValueError, f, np.nan)
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
assert_raises(ValueError, f, mat, axis=axis)
for axis in [1]:
res = f(mat, axis=axis)
assert_equal(res, np.zeros(0))
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_IntTypes(TestCase):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
mat = np.array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
yield self.mat.astype(dtype)
def test_nanmin(self):
tgt = np.min(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmin(mat), tgt)
def test_nanmax(self):
tgt = np.max(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmax(mat), tgt)
def test_nanargmin(self):
tgt = np.argmin(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmin(mat), tgt)
def test_nanargmax(self):
tgt = np.argmax(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmax(mat), tgt)
def test_nansum(self):
tgt = np.sum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nansum(mat), tgt)
def test_nanprod(self):
tgt = np.prod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanprod(mat), tgt)
def test_nanmean(self):
tgt = np.mean(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmean(mat), tgt)
def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)
tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)
def test_nanstd(self):
tgt = np.std(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat), tgt)
tgt = np.std(self.mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat, ddof=1), tgt)
class SharedNanFunctionsTestsMixin(object):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_char(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
tgt = rf(mat, dtype=c, axis=1).dtype.type
res = nf(mat, dtype=c, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=c, axis=None).dtype.type
res = nf(mat, dtype=c, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
def test_allnans(self):
# Check for FutureWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = np.nansum([np.nan]*3, axis=None)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check scalar
res = np.nansum(np.nan)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check there is no warning for not all-nan
np.nansum([0]*3, axis=None)
assert_(len(w) == 0, 'unwanted warning raised')
def test_empty(self):
for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
mat = np.zeros((0, 3))
tgt = [tgt_value]*3
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = []
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = tgt_value
res = f(mat, axis=None)
assert_equal(res, tgt)
class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
def test_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
def test_out_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
def test_ddof(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in [0, 1]:
tgt = [rf(d, ddof=ddof) for d in _rdat]
res = nf(_ndat, axis=1, ddof=ddof)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
dsize = [len(d) for d in _rdat]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tgt = [ddof >= d for d in dsize]
res = nf(_ndat, axis=1, ddof=ddof)
assert_equal(np.isnan(res), tgt)
if any(tgt):
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
else:
assert_(len(w) == 0)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 2)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
class TestNanFunctions_Median(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanmedian(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
res = np.nanmedian(d, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanmedian(d, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.median(mat, axis=1)
res = np.nanmedian(nan_mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.median(mat, axis=None)
res = np.nanmedian(nan_mat, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_small_large(self):
# test the small and large code paths, current cutoff 400 elements
for s in [5, 20, 51, 200, 1000]:
d = np.random.randn(4, s)
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
nonan = np.compress(~np.isnan(x), x)
tgt.append(np.median(nonan, overwrite_input=True))
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
def test_result_values(self):
tgt = [np.median(d) for d in _rdat]
res = np.nanmedian(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
warnings.simplefilter('ignore', FutureWarning)
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
if axis is None:
assert_(len(w) == 1)
else:
assert_(len(w) == 3)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(np.nanmedian(np.nan)))
if axis is None:
assert_(len(w) == 2)
else:
assert_(len(w) == 4)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanmedian(0.) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanmedian, d, axis=-5)
assert_raises(IndexError, np.nanmedian, d, axis=(0, -5))
assert_raises(IndexError, np.nanmedian, d, axis=4)
assert_raises(IndexError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
def test_float_special(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore', RuntimeWarning)
a = np.array([[np.inf, np.nan], [np.nan, np.nan]])
assert_equal(np.nanmedian(a, axis=0), [np.inf, np.nan])
assert_equal(np.nanmedian(a, axis=1), [np.inf, np.nan])
assert_equal(np.nanmedian(a), np.inf)
# minimum fill value check
a = np.array([[np.nan, np.nan, np.inf], [np.nan, np.nan, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
# no mask path
a = np.array([[np.inf, np.inf], [np.inf, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
class TestNanFunctions_Percentile(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanpercentile(ndat, 30)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.percentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
res = np.nanpercentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
res = np.nanpercentile(d, 90, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.percentile(mat, 42, axis=1)
res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.percentile(mat, 42, axis=None)
res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
assert_almost_equal(res, tgt)
# Transpose the array to fit the output convention of numpy.percentile
tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
res = np.nanpercentile(_ndat, (28, 98), axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all())
if axis is None:
assert_(len(w) == 1)
else:
assert_(len(w) == 3)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(np.nanpercentile(np.nan, 60)))
if axis is None:
assert_(len(w) == 2)
else:
assert_(len(w) == 4)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanpercentile(0., 100) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=-5)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, -5))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=4)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, 4))
assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
def test_multiple_percentiles(self):
perc = [50, 100]
mat = np.ones((4, 3))
nan_mat = np.nan * mat
# For checking consistency in higher dimensional case
large_mat = np.ones((3, 4, 5))
large_mat[:, 0:2:4, :] = 0
large_mat[:, :, 3:] *= 2
for axis in [None, 0, 1]:
for keepdim in [False, True]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)
nan_val = np.nanpercentile(nan_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val.shape, val.shape)
val = np.percentile(large_mat, perc, axis=axis,
keepdims=keepdim)
nan_val = np.nanpercentile(large_mat, perc, axis=axis,
keepdims=keepdim)
assert_equal(nan_val, val)
megamat = np.ones((3, 4, 5, 6))
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 3,839,043,500,124,620,000 | 37.408163 | 83 | 0.520616 | false |
epssy/hue | desktop/core/ext-py/python-openid-2.2.5/openid/test/cryptutil.py | 87 | 2961 | import sys
import random
import os.path
from openid import cryptutil
# Most of the purpose of this test is to make sure that cryptutil can
# find a good source of randomness on this machine.
def test_cryptrand():
# It's possible, but HIGHLY unlikely that a correct implementation
# will fail by returning the same number twice
s = cryptutil.getBytes(32)
t = cryptutil.getBytes(32)
assert len(s) == 32
assert len(t) == 32
assert s != t
a = cryptutil.randrange(2L ** 128)
b = cryptutil.randrange(2L ** 128)
assert type(a) is long
assert type(b) is long
assert b != a
# Make sure that we can generate random numbers that are larger
# than platform int size
cryptutil.randrange(long(sys.maxint) + 1L)
def test_reversed():
if hasattr(cryptutil, 'reversed'):
cases = [
('', ''),
('a', 'a'),
('ab', 'ba'),
('abc', 'cba'),
('abcdefg', 'gfedcba'),
([], []),
([1], [1]),
([1,2], [2,1]),
([1,2,3], [3,2,1]),
(range(1000), range(999, -1, -1)),
]
for case, expected in cases:
expected = list(expected)
actual = list(cryptutil.reversed(case))
assert actual == expected, (case, expected, actual)
twice = list(cryptutil.reversed(actual))
assert twice == list(case), (actual, case, twice)
def test_binaryLongConvert():
MAX = sys.maxint
for iteration in xrange(500):
n = 0L
for i in range(10):
n += long(random.randrange(MAX))
s = cryptutil.longToBinary(n)
assert type(s) is str
n_prime = cryptutil.binaryToLong(s)
assert n == n_prime, (n, n_prime)
cases = [
('\x00', 0L),
('\x01', 1L),
('\x7F', 127L),
('\x00\xFF', 255L),
('\x00\x80', 128L),
('\x00\x81', 129L),
('\x00\x80\x00', 32768L),
('OpenID is cool', 1611215304203901150134421257416556L)
]
for s, n in cases:
n_prime = cryptutil.binaryToLong(s)
s_prime = cryptutil.longToBinary(n)
assert n == n_prime, (s, n, n_prime)
assert s == s_prime, (n, s, s_prime)
def test_longToBase64():
f = file(os.path.join(os.path.dirname(__file__), 'n2b64'))
try:
for line in f:
parts = line.strip().split(' ')
assert parts[0] == cryptutil.longToBase64(long(parts[1]))
finally:
f.close()
def test_base64ToLong():
f = file(os.path.join(os.path.dirname(__file__), 'n2b64'))
try:
for line in f:
parts = line.strip().split(' ')
assert long(parts[1]) == cryptutil.base64ToLong(parts[0])
finally:
f.close()
def test():
test_reversed()
test_binaryLongConvert()
test_cryptrand()
test_longToBase64()
test_base64ToLong()
if __name__ == '__main__':
test()
| apache-2.0 | 6,933,728,707,529,041,000 | 26.416667 | 70 | 0.540358 | false |
kingvuplus/gui_test3 | lib/python/Plugins/Extensions/DVDBurn/ProjectSettings.py | 34 | 10923 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.HelpMenu import HelpableScreen
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.FileList import FileList
from Tools.Directories import fileExists, resolveFilename, SCOPE_PLUGINS, SCOPE_FONTS, SCOPE_HDD
from Components.config import config, getConfigListEntry
from Components.ConfigList import ConfigListScreen
class FileBrowser(Screen, HelpableScreen):
def __init__(self, session, scope, configRef):
Screen.__init__(self, session)
# for the skin: first try FileBrowser_DVDBurn, then FileBrowser, this allows individual skinning
self.skinName = ["FileBrowser_DVDBurn", "FileBrowser" ]
HelpableScreen.__init__(self)
self.scope = scope
pattern = ""
self.configRef = configRef
currDir = "/"
if self.scope == "project":
currDir = self.getDir()
pattern = "(?i)^.*\.(ddvdp\.xml)"
elif self.scope == "menutemplate":
currDir = self.getDir()
pattern = "(?i)^.*\.(ddvdm\.xml)"
if self.scope == "menubg":
currDir = self.getDir(configRef.value)
pattern = "(?i)^.*\.(jpeg|jpg|jpe|png|bmp)"
elif self.scope == "menuaudio":
currDir = self.getDir(configRef.value)
pattern = "(?i)^.*\.(mp2|m2a|ac3)"
elif self.scope == "vmgm":
currDir = self.getDir(configRef.value)
pattern = "(?i)^.*\.(mpg|mpeg)"
elif self.scope == "font_face":
currDir = self.getDir(configRef.value, resolveFilename(SCOPE_FONTS))
pattern = "(?i)^.*\.(ttf)"
elif self.scope == "isopath":
currDir = configRef.value
elif self.scope == "image":
currDir = resolveFilename(SCOPE_HDD)
pattern = "(?i)^.*\.(iso)"
self.filelist = FileList(currDir, matchingPattern=pattern)
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions"],
{
"save": self.ok,
"ok": self.ok,
"cancel": self.exit
})
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("DVD file browser"))
def getDir(self, currentVal=None, defaultDir=None):
if currentVal:
return (currentVal.rstrip("/").rsplit("/",1))[0]
return defaultDir or (resolveFilename(SCOPE_PLUGINS)+"Extensions/DVDBurn/")
def ok(self):
if self.filelist.canDescent():
self.filelist.descent()
if self.scope == "image":
path = self["filelist"].getCurrentDirectory() or ""
if fileExists(path+"VIDEO_TS"):
self.close(path,self.scope,self.configRef)
else:
ret = self["filelist"].getCurrentDirectory() + '/' + self["filelist"].getFilename()
self.close(ret,self.scope,self.configRef)
def exit(self):
if self.scope == "isopath":
self.close(self["filelist"].getCurrentDirectory(),self.scope,self.configRef)
self.close(None,False,None)
class ProjectSettings(Screen,ConfigListScreen):
skin = """
<screen name="ProjectSettings" position="center,center" size="560,440" title="Collection settings" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;19" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="config" position="5,50" size="550,276" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,350" zPosition="1" size="560,2" />
<widget source="info" render="Label" position="10,360" size="550,80" font="Regular;18" halign="center" valign="center" />
</screen>"""
def __init__(self, session, project = None):
Screen.__init__(self, session)
self.project = project
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Load"))
if config.usage.setup_level.index >= 2: # expert+
self["key_blue"] = StaticText(_("Save"))
else:
self["key_blue"] = StaticText()
if config.usage.setup_level.index >= 2: # expert+
infotext = _("Available format variables") + ":\n$i=" + _("Track") + ", $t=" + _("Title") + ", $d=" + _("Description") + ", $l=" + _("length") + ", $c=" + _("chapters") + ",\n" + _("Record") + " $T=" + _("Begin time") + ", $Y=" + _("Year") + ", $M=" + _("month") + ", $D=" + _("day") + ",\n$A=" + _("audio tracks") + ", $C=" + _("Channel") + ", $f=" + _("filename")
else:
infotext = ""
self["info"] = StaticText(infotext)
self.keydict = {}
self.settings = project.settings
ConfigListScreen.__init__(self, [])
self.initConfigList()
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.exit,
"red": self.cancel,
"blue": self.saveProject,
"yellow": self.loadProject,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Collection settings"))
def changedConfigList(self):
key = self.keydict[self["config"].getCurrent()[1]]
if key == "authormode" or key == "output":
self.initConfigList()
def initConfigList(self):
authormode = self.settings.authormode.value
output = self.settings.output.value
self.list = []
self.list.append(getConfigListEntry(_("Collection name"), self.settings.name))
self.list.append(getConfigListEntry(_("Authoring mode"), self.settings.authormode))
self.list.append(getConfigListEntry(_("Output"), self.settings.output))
if output == "iso":
self.list.append(getConfigListEntry(_("ISO path"), self.settings.isopath))
if authormode.startswith("menu"):
self.list.append(getConfigListEntry(_("Menu")+' '+_("template file"), self.settings.menutemplate))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(getConfigListEntry(_("Menu")+' '+_("Title"), self.project.menutemplate.settings.titleformat))
self.list.append(getConfigListEntry(_("Menu")+' '+_("Subtitles"), self.project.menutemplate.settings.subtitleformat))
self.list.append(getConfigListEntry(_("Menu")+' '+_("background image"), self.project.menutemplate.settings.menubg))
self.list.append(getConfigListEntry(_("Menu")+' '+_("Language selection"), self.project.menutemplate.settings.menulang))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("headline")+' '+_("color"), self.settings.color_headline))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("text")+' '+_("color"), self.settings.color_button))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("highlighted button")+' '+_("color"), self.settings.color_highlight))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("font face"), self.settings.font_face))
#self.list.append(getConfigListEntry(_("Font size")+' ('+_("headline")+', '+_("Title")+', '+_("Subtitles")+')', self.settings.font_size))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("spaces (top, between rows, left)"), self.settings.space))
#self.list.append(getConfigListEntry(_("Menu")+' '+_("Audio"), self.settings.menuaudio))
if config.usage.setup_level.index >= 2: # expert+
if authormode != "data_ts":
self.list.append(getConfigListEntry(_("Titleset mode"), self.settings.titlesetmode))
if self.settings.titlesetmode.value == "single" or authormode == "just_linked":
self.list.append(getConfigListEntry(_("VMGM (intro trailer)"), self.settings.vmgm))
else:
self.list.append(getConfigListEntry(_("DVD data format"), self.settings.dataformat))
self["config"].setList(self.list)
self.keydict = {}
for key, val in self.settings.dict().iteritems():
self.keydict[val] = key
for key, val in self.project.menutemplate.settings.dict().iteritems():
self.keydict[val] = key
def keyLeft(self):
ConfigListScreen.keyLeft(self)
key = self.keydict[self["config"].getCurrent()[1]]
if key == "authormode" or key == "output" or key=="titlesetmode":
self.initConfigList()
def keyRight(self):
ConfigListScreen.keyRight(self)
key = self.keydict[self["config"].getCurrent()[1]]
if key == "authormode" or key == "output" or key=="titlesetmode":
self.initConfigList()
def exit(self):
self.applySettings()
self.close(True)
def applySettings(self):
for x in self["config"].list:
x[1].save()
def ok(self):
key = self.keydict[self["config"].getCurrent()[1]]
from DVDProject import ConfigFilename
if type(self["config"].getCurrent()[1]) == ConfigFilename:
self.session.openWithCallback(self.FileBrowserClosed, FileBrowser, key, self["config"].getCurrent()[1])
def cancel(self):
self.close(False)
def loadProject(self):
self.session.openWithCallback(self.FileBrowserClosed, FileBrowser, "project", self.settings)
def saveProject(self):
if config.usage.setup_level.index >= 2: # expert+
self.applySettings()
ret = self.project.saveProject(resolveFilename(SCOPE_PLUGINS)+"Extensions/DVDBurn/")
if ret.startswith:
text = _("Save")+' '+_('OK')+':\n'+ret
self.session.open(MessageBox,text,type = MessageBox.TYPE_INFO)
else:
text = _("Save")+' '+_('Error')
self.session.open(MessageBox,text,type = MessageBox.TYPE_ERROR)
def FileBrowserClosed(self, path, scope, configRef):
if scope == "menutemplate":
if self.project.menutemplate.loadTemplate(path):
print "[ProjectSettings] menu template loaded"
configRef.setValue(path)
self.initConfigList()
else:
self.session.open(MessageBox,self.project.error,MessageBox.TYPE_ERROR)
elif scope == "project":
self.path = path
print "len(self.titles)", len(self.project.titles)
if len(self.project.titles):
self.session.openWithCallback(self.askLoadCB, MessageBox,text = _("Your current collection will get lost!") + "\n" + _("Do you want to restore your settings?"), type = MessageBox.TYPE_YESNO)
else:
self.askLoadCB(True)
elif scope:
configRef.setValue(path)
self.initConfigList()
def askLoadCB(self, answer):
if answer is not None and answer:
if self.project.loadProject(self.path):
self.initConfigList()
else:
self.session.open(MessageBox,self.project.error,MessageBox.TYPE_ERROR)
| gpl-2.0 | 1,444,806,314,667,675,600 | 43.222672 | 368 | 0.680033 | false |
alexlo03/ansible | contrib/inventory/vagrant.py | 28 | 4076 | #!/usr/bin/env python
"""
Vagrant external inventory script. Automatically finds the IP of the booted vagrant vm(s), and
returns it under the host group 'vagrant'
Example Vagrant configuration using this script:
config.vm.provision :ansible do |ansible|
ansible.playbook = "./provision/your_playbook.yml"
ansible.inventory_file = "./provision/inventory/vagrant.py"
ansible.verbose = true
end
"""
# Copyright (C) 2013 Mark Mandel <[email protected]>
# 2015 Igor Khomyakov <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the spacewalk.py inventory script for giving me the basic structure
# of this.
#
import sys
import os.path
import subprocess
import re
from paramiko import SSHConfig
from optparse import OptionParser
from collections import defaultdict
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves import StringIO
_group = 'vagrant' # a default group
_ssh_to_ansible = [('user', 'ansible_ssh_user'),
('hostname', 'ansible_ssh_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_ssh_port')]
# Options
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of Vagrant servers for Ansible")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
# get all the ssh configs for all boxes in an array of dictionaries.
def get_ssh_config():
return dict((k, get_a_ssh_config(k)) for k in list_running_boxes())
# list all the running boxes
def list_running_boxes():
output = to_text(subprocess.check_output(["vagrant", "status"]), errors='surrogate_or_strict').split('\n')
boxes = []
for line in output:
matcher = re.search(r"([^\s]+)[\s]+running \(.+", line)
if matcher:
boxes.append(matcher.group(1))
return boxes
# get the ssh config for a single box
def get_a_ssh_config(box_name):
"""Gives back a map of all the machine's ssh configurations"""
output = to_text(subprocess.check_output(["vagrant", "ssh-config", box_name]), errors='surrogate_or_strict')
config = SSHConfig()
config.parse(StringIO(output))
host_config = config.lookup(box_name)
# man 5 ssh_config:
# > It is possible to have multiple identity files ...
# > all these identities will be tried in sequence.
for id in host_config['identityfile']:
if os.path.isfile(id):
host_config['identityfile'] = id
return dict((v, host_config[k]) for k, v in _ssh_to_ansible)
# List out servers that vagrant has running
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
meta = defaultdict(dict)
for host in ssh_config:
meta['hostvars'][host] = ssh_config[host]
print(json.dumps({_group: list(ssh_config.keys()), '_meta': meta}))
sys.exit(0)
# Get out the host details
# ------------------------------
elif options.host:
print(json.dumps(get_a_ssh_config(options.host)))
sys.exit(0)
# Print out help
# ------------------------------
else:
parser.print_help()
sys.exit(0)
| gpl-3.0 | 8,947,058,068,151,190,000 | 30.114504 | 112 | 0.65947 | false |
mrgloom/menpofit | menpofit/_version.py | 5 | 15768 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "menpofit-"
cfg.versionfile_source = "menpofit/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| bsd-3-clause | -2,500,855,670,279,683,000 | 33.278261 | 79 | 0.577879 | false |
xtao/openduckbill | src/openduckbilld.py | 3 | 1582 | #!/usr/bin/python2.4
# Copyright 2008 Google Inc.
# Author : Anoop Chandran <[email protected]>
#
# openduckbill is a simple backup application. It offers support for
# transferring data to a local backup directory, NFS. It also provides
# file system monitoring of directories marked for backup. Please read
# the README file for more details.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""The wrapper which starts the application.
openduckbilld is a wrapper to rest of the application code. The function
StartOpenDuckbill calls the code which reads the config file and does rest of
the initialisation.
"""
import daemon
def StartOpenDuckbill():
"""Starts the process of setting up environment and initialisation."""
main_config = 'config.yaml'
dbinit = daemon.OpenDuckbillMain(main_config)
if dbinit.MainInitialize():
dbinit.BackupInitialize()
if __name__ == '__main__':
StartOpenDuckbill()
| gpl-2.0 | -5,042,270,495,903,658,000 | 34.155556 | 81 | 0.76043 | false |
ZenHarbinger/snapcraft | integration_tests/test_tar_plugin.py | 2 | 2083 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import (
DirExists,
FileExists
)
import integration_tests
class TarPluginTestCase(integration_tests.TestCase):
def test_stage_nil_plugin(self):
self.run_snapcraft('stage', 'tar')
expected_files = [
'flat',
os.path.join('flatdir', 'flat2'),
'onedeep',
os.path.join('onedeepdir', 'onedeep2'),
'oneflat',
'top-simple',
'notop',
'parent',
'slash',
'readonly_file',
os.path.join('destdir1', 'destdir2', 'top-simple')
]
for expected_file in expected_files:
self.assertThat(
os.path.join(self.stage_dir, expected_file),
FileExists())
expected_dirs = [
'dir-simple',
'notopdir',
'destdir1',
os.path.join('destdir1', 'destdir2')
]
for expected_dir in expected_dirs:
self.assertThat(
os.path.join(self.stage_dir, expected_dir),
DirExists())
binary_output = self.get_output_ignoring_non_zero_exit(
os.path.join(self.stage_dir, 'bin', 'test'))
self.assertEqual('tarproject\n', binary_output)
# Regression test for
# https://bugs.launchpad.net/snapcraft/+bug/1500728
self.run_snapcraft('pull')
| gpl-3.0 | -582,446,290,726,293,100 | 30.560606 | 71 | 0.601056 | false |
taylorhxu/pybrain | examples/supervised/backprop/datasets/parity.py | 30 | 1587 | #!/usr/bin/env python
__author__ = 'Tom Schaul ([email protected])'
from pybrain.datasets import SequentialDataSet
class ParityDataSet(SequentialDataSet):
""" Determine whether the bitstring up to the current point conains a pair number of 1s or not."""
def __init__(self):
SequentialDataSet.__init__(self, 1,1)
self.newSequence()
self.addSample([-1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.newSequence()
self.addSample([1], [1])
self.addSample([1], [-1])
self.newSequence()
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.newSequence()
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.newSequence()
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([1], [1])
| bsd-3-clause | 4,482,021,155,472,248,300 | 29.519231 | 102 | 0.503466 | false |
sysalexis/kbengine | kbe/res/scripts/common/Lib/test/test_asynchat.py | 72 | 11161 | # test asynchat
from test import support
# If this fails, the test will be skipped.
thread = support.import_module('_thread')
import asynchat
import asyncore
import errno
import socket
import sys
import time
import unittest
import unittest.mock
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
SERVER_QUIT = b'QUIT\n'
TIMEOUT = 3.0
if threading:
class echo_server(threading.Thread):
# parameter to determine the number of bytes passed back to the
# client each send
chunk_size = 1
def __init__(self, event):
threading.Thread.__init__(self)
self.event = event
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.sock)
# This will be set if the client wants us to wait before echoing
# data back.
self.start_resend_event = None
def run(self):
self.sock.listen(1)
self.event.set()
conn, client = self.sock.accept()
self.buffer = b""
# collect data until quit message is seen
while SERVER_QUIT not in self.buffer:
data = conn.recv(1)
if not data:
break
self.buffer = self.buffer + data
# remove the SERVER_QUIT message
self.buffer = self.buffer.replace(SERVER_QUIT, b'')
if self.start_resend_event:
self.start_resend_event.wait()
# re-send entire set of collected data
try:
# this may fail on some tests, such as test_close_when_done,
# since the client closes the channel when it's done sending
while self.buffer:
n = conn.send(self.buffer[:self.chunk_size])
time.sleep(0.001)
self.buffer = self.buffer[n:]
except:
pass
conn.close()
self.sock.close()
class echo_client(asynchat.async_chat):
def __init__(self, terminator, server_port):
asynchat.async_chat.__init__(self)
self.contents = []
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((HOST, server_port))
self.set_terminator(terminator)
self.buffer = b""
def handle_connect(self):
pass
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
def collect_incoming_data(self, data):
self.buffer += data
def found_terminator(self):
self.contents.append(self.buffer)
self.buffer = b""
def start_echo_server():
event = threading.Event()
s = echo_server(event)
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
return s, event
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestAsynchat(unittest.TestCase):
usepoll = False
def setUp(self):
self._threads = support.threading_setup()
def tearDown(self):
support.threading_cleanup(*self._threads)
def line_terminator_check(self, term, server_chunk):
event = threading.Event()
s = echo_server(event)
s.chunk_size = server_chunk
s.start()
event.wait()
event.clear()
time.sleep(0.01) # Give server time to start accepting.
c = echo_client(term, s.port)
c.push(b"hello ")
c.push(b"world" + term)
c.push(b"I'm not dead yet!" + term)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"])
# the line terminator tests below check receiving variously-sized
# chunks back from the server in order to exercise all branches of
# async_chat.handle_read
def test_line_terminator1(self):
# test one-character terminator
for l in (1, 2, 3):
self.line_terminator_check(b'\n', l)
def test_line_terminator2(self):
# test two-character terminator
for l in (1, 2, 3):
self.line_terminator_check(b'\r\n', l)
def test_line_terminator3(self):
# test three-character terminator
for l in (1, 2, 3):
self.line_terminator_check(b'qqq', l)
def numeric_terminator_check(self, termlen):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(termlen, s.port)
data = b"hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [data[:termlen]])
def test_numeric_terminator1(self):
# check that ints & longs both work (since type is
# explicitly checked in async_chat.handle_read)
self.numeric_terminator_check(1)
def test_numeric_terminator2(self):
self.numeric_terminator_check(6)
def test_none_terminator(self):
# Try reading a fixed number of bytes
s, event = start_echo_server()
c = echo_client(None, s.port)
data = b"hello world, I'm not dead yet!\n"
c.push(data)
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [])
self.assertEqual(c.buffer, data)
def test_simple_producer(self):
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
data = b"hello world\nI'm not dead yet!\n"
p = asynchat.simple_producer(data+SERVER_QUIT, buffer_size=8)
c.push_with_producer(p)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"])
def test_string_producer(self):
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
data = b"hello world\nI'm not dead yet!\n"
c.push_with_producer(data+SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [b"hello world", b"I'm not dead yet!"])
def test_empty_line(self):
# checks that empty lines are handled correctly
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
c.push(b"hello world\n\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents,
[b"hello world", b"", b"I'm not dead yet!"])
def test_close_when_done(self):
s, event = start_echo_server()
s.start_resend_event = threading.Event()
c = echo_client(b'\n', s.port)
c.push(b"hello world\nI'm not dead yet!\n")
c.push(SERVER_QUIT)
c.close_when_done()
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
# Only allow the server to start echoing data back to the client after
# the client has closed its connection. This prevents a race condition
# where the server echoes all of its data before we can check that it
# got any down below.
s.start_resend_event.set()
s.join(timeout=TIMEOUT)
if s.is_alive():
self.fail("join() timed out")
self.assertEqual(c.contents, [])
# the server might have been able to send a byte or two back, but this
# at least checks that it received something and didn't just fail
# (which could still result in the client not having received anything)
self.assertGreater(len(s.buffer), 0)
def test_push(self):
# Issue #12523: push() should raise a TypeError if it doesn't get
# a bytes string
s, event = start_echo_server()
c = echo_client(b'\n', s.port)
data = b'bytes\n'
c.push(data)
c.push(bytearray(data))
c.push(memoryview(data))
self.assertRaises(TypeError, c.push, 10)
self.assertRaises(TypeError, c.push, 'unicode')
c.push(SERVER_QUIT)
asyncore.loop(use_poll=self.usepoll, count=300, timeout=.01)
s.join(timeout=TIMEOUT)
self.assertEqual(c.contents, [b'bytes', b'bytes', b'bytes'])
class TestAsynchat_WithPoll(TestAsynchat):
usepoll = True
class TestAsynchatMocked(unittest.TestCase):
def test_blockingioerror(self):
# Issue #16133: handle_read() must ignore BlockingIOError
sock = unittest.mock.Mock()
sock.recv.side_effect = BlockingIOError(errno.EAGAIN)
dispatcher = asynchat.async_chat()
dispatcher.set_socket(sock)
self.addCleanup(dispatcher.del_channel)
with unittest.mock.patch.object(dispatcher, 'handle_error') as error:
dispatcher.handle_read()
self.assertFalse(error.called)
class TestHelperFunctions(unittest.TestCase):
def test_find_prefix_at_end(self):
self.assertEqual(asynchat.find_prefix_at_end("qwerty\r", "\r\n"), 1)
self.assertEqual(asynchat.find_prefix_at_end("qwertydkjf", "\r\n"), 0)
class TestFifo(unittest.TestCase):
def test_basic(self):
f = asynchat.fifo()
f.push(7)
f.push(b'a')
self.assertEqual(len(f), 2)
self.assertEqual(f.first(), 7)
self.assertEqual(f.pop(), (1, 7))
self.assertEqual(len(f), 1)
self.assertEqual(f.first(), b'a')
self.assertEqual(f.is_empty(), False)
self.assertEqual(f.pop(), (1, b'a'))
self.assertEqual(len(f), 0)
self.assertEqual(f.is_empty(), True)
self.assertEqual(f.pop(), (0, None))
def test_given_list(self):
f = asynchat.fifo([b'x', 17, 3])
self.assertEqual(len(f), 3)
self.assertEqual(f.pop(), (1, b'x'))
self.assertEqual(f.pop(), (1, 17))
self.assertEqual(f.pop(), (1, 3))
self.assertEqual(f.pop(), (0, None))
class TestNotConnected(unittest.TestCase):
def test_disallow_negative_terminator(self):
# Issue #11259
client = asynchat.async_chat()
self.assertRaises(ValueError, client.set_terminator, -1)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | -7,138,717,779,159,256,000 | 32.516517 | 79 | 0.587582 | false |
jicruz/heroku-bot | lib/pip/_vendor/distlib/wheel.py | 412 | 39115 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fn = 'METADATA'
else:
fn = METADATA_FILENAME
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
except KeyError:
raise ValueError('Invalid wheel, because %s is '
'missing' % fn)
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy for sorting
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
records.sort()
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written.
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
if u_arcname.endswith('/RECORD.jws'):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match('(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| gpl-3.0 | -3,028,837,808,793,968,600 | 38.994888 | 82 | 0.48567 | false |
nguyenppt/support-tools | googlecode-issues-exporter/github_issue_converter_test.py | 2 | 21195 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the GitHub Services."""
# pylint: disable=missing-docstring,protected-access
import collections
import httplib
import json
import unittest
import urlparse
import github_issue_converter
import issues
from issues_test import DEFAULT_USERNAME
from issues_test import SINGLE_COMMENT
from issues_test import SINGLE_ISSUE
from issues_test import COMMENT_ONE
from issues_test import COMMENT_TWO
from issues_test import COMMENT_THREE
from issues_test import COMMENTS_DATA
from issues_test import NO_ISSUE_DATA
from issues_test import USER_MAP
from issues_test import REPO
# The GitHub username.
GITHUB_USERNAME = DEFAULT_USERNAME
# The GitHub repo name.
GITHUB_REPO = REPO
# The GitHub oauth token.
GITHUB_TOKEN = "oauth_token"
# The URL used for calls to GitHub.
GITHUB_API_URL = "https://api.github.com"
class FakeGitHubService(github_issue_converter.GitHubService):
"""A fake of the GitHubService.
This also allows for queueing of responses and there content into a reponse
queue. For example if you wanted a successful response and then a failure you
would call AddSuccessfulResponse and then AddFailureResponse. Then when a call
to _PerformHttpRequest is made the succesful response is made. The next call
would then return the failed response.
If no responses are in the queue a succesful request with no content is
returned.
Attributes:
github_owner_username: The username of the owner of the repository.
github_repo_name: The GitHub repository name.
github_oauth_token: The oauth token to use for the requests.
"""
# pylint: disable=super-init-not-called
def __init__(self, github_owner_username, github_repo_name,
github_oauth_token):
"""Initialize the FakeGitHubService.
Args:
github_owner_username: The username of the owner of the repository.
github_repo_name: The GitHub repository name.
github_oauth_token: The oauth token to use for the requests.
"""
self.github_owner_username = github_owner_username
self.github_repo_name = github_repo_name
self._github_oauth_token = github_oauth_token
self._action_queue = collections.deque([])
def AddSuccessfulResponse(self, content=None):
"""Adds a succesfull response with no content to the reponse queue."""
self.AddResponse(content=content)
def AddFailureResponse(self):
"""Adds a failed response with no content to the reponse queue."""
self.AddResponse(httplib.BAD_REQUEST)
def AddResponse(self, response=httplib.OK, content=None):
status = {"status": response}
full_response = {}
full_response["status"] = status
full_response["content"] = content if content else {}
self._action_queue.append(full_response)
def _PerformHttpRequest(self, method, url, body="{}", params=None):
if not self._action_queue:
return {"status": httplib.OK}, {}
full_response = self._action_queue.popleft()
return (full_response["status"], full_response["content"])
def PerformGetRequest(self, url, params=None):
"""Makes a fake GET request.
Args:
url: The URL to make the call to.
params: A dictionary of parameters to be used in the http call.
Returns:
A tuple of a fake response and fake content.
"""
return self._PerformHttpRequest("GET", url, params=params)
def PerformPostRequest(self, url, body):
"""Makes a POST request.
Args:
url: The URL to make the call to.
body: The body of the request.
Returns:
A tuple of a fake response and content
"""
return self._PerformHttpRequest("POST", url, body=body)
def PerformPatchRequest(self, url, body):
"""Makes a PATCH request.
Args:
url: The URL to make the call to.
body: The body of the request.
Returns:
A tuple of a fake response and content
"""
return self._PerformHttpRequest("PATCH", url, body=body)
class Http2Mock(object):
"""Mock httplib2.Http object. Only mocks out the request function.
This mock keeps track of the last url, method and body called.
Attributes:
response_success: Fake successful HTTP response.
response_failure: Fake failure HTTP response.
response: The response of the next HTTP request.
content: The content of the next HTTP request.
last_url: The last URL that an HTTP request was made to.
last_method: The last method that an HTTP request was made to.
last_body: The last body method that an HTTP request was made to.
"""
response_success = {"status": httplib.OK}
response_failure = {"status": httplib.BAD_REQUEST}
def __init__(self):
"""Initialize the Http2Mock."""
self.response = self.response_success
self.content = {}
self.last_url = None
self.last_method = None
self.last_body = None
def request(self, url, method, headers=None, body=None):
"""Makes a fake HTTP request.
Args:
url: The url to make the call to.
method: The type of call. POST, GET, etc.
body: The request of the body.
Returns:
A tuple of a response and its content.
"""
self.last_url = url
self.last_method = method
self.last_body = body
return (self.response, json.dumps(self.content))
class TestGitHubService(unittest.TestCase):
"""Tests for the GitHubService."""
def setUp(self):
self.http_mock = Http2Mock()
self.github_service = github_issue_converter.GitHubService(
GITHUB_USERNAME, GITHUB_REPO, GITHUB_TOKEN,
rate_limit=False,
http_instance=self.http_mock)
def testSuccessfulRequestSuccess(self):
success = github_issue_converter._CheckSuccessful(
self.http_mock.response_success)
self.assertTrue(success)
def testSuccessfulRequestFailure(self):
failure = github_issue_converter._CheckSuccessful(
self.http_mock.response_failure)
self.assertFalse(failure)
def testGetRemainingRequestsRequestsLeft(self):
self.http_mock.content = {"rate": {"remaining": "500"}}
requests = self.github_service._GetRemainingRequests()
self.assertEqual(requests, 500)
def testGetRemainingRequestsNoRequestsLeft(self):
self.http_mock.content = {"rate": {"remaining": "0"}}
requests = self.github_service._GetRemainingRequests()
self.assertEqual(requests, 0)
def testGetRemainingRequestsBadResponse(self):
self.http_mock.content = {"bad": "content"}
requests = self.github_service._GetRemainingRequests()
self.assertEqual(requests, 0)
def testRequestLimitReachedLimitReached(self):
self.http_mock.content = {"rate": {"remaining": "0"}}
limit_reached = self.github_service._RequestLimitReached()
self.assertTrue(limit_reached)
def testRequestLimitReachedLimitNotReached(self):
self.http_mock.content = {"rate": {"remaining": "500"}}
limit_reached = self.github_service._RequestLimitReached()
self.assertFalse(limit_reached)
def testHttpRequest(self):
response, content = self.github_service._PerformHttpRequest("GET", "/test")
self.assertEqual(response, self.http_mock.response_success)
self.assertEqual(content, {})
self.assertEqual(self.http_mock.last_method, "GET")
uri = ("%s/test?access_token=%s" % (GITHUB_API_URL, GITHUB_TOKEN))
self.assertEqual(self.http_mock.last_url, uri)
def testHttpRequestParams(self):
params = {"one": 1, "two": 2}
response, content = self.github_service._PerformHttpRequest("POST",
"/test",
params=params)
self.assertEqual(response, self.http_mock.response_success)
self.assertEqual(content, {})
self.assertEqual(self.http_mock.last_method, "POST")
uri = ("%s/test?access_token=%s&one=1&two=2" %
(GITHUB_API_URL, GITHUB_TOKEN))
# pylint: disable=unpacking-non-sequence
(expected_scheme, expected_domain, expected_path, expected_params,
expected_query, expected_fragment) = urlparse.urlparse(uri)
expected_query_list = expected_query.split("&")
# pylint: disable=unpacking-non-sequence
(actual_scheme, actual_domain, actual_path, actual_params, actual_query,
actual_fragment) = urlparse.urlparse(self.http_mock.last_url)
actual_query_list = actual_query.split("&")
self.assertEqual(expected_scheme, actual_scheme)
self.assertEqual(expected_domain, actual_domain)
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_params, actual_params)
self.assertEqual(expected_fragment, actual_fragment)
self.assertItemsEqual(expected_query_list, actual_query_list)
def testGetRequest(self):
self.github_service.PerformGetRequest("/test")
self.assertEqual(self.http_mock.last_method, "GET")
def testPostRequest(self):
self.github_service.PerformPostRequest("/test", "")
self.assertEqual(self.http_mock.last_method, "POST")
def testPatchRequest(self):
self.github_service.PerformPatchRequest("/test", "")
self.assertEqual(self.http_mock.last_method, "PATCH")
class TestUserService(unittest.TestCase):
"""Tests for the UserService."""
def setUp(self):
self.github_service = FakeGitHubService(GITHUB_USERNAME,
GITHUB_REPO,
GITHUB_TOKEN)
self.github_user_service = github_issue_converter.UserService(
self.github_service)
def testIsUserTrue(self):
is_user = self.github_user_service.IsUser("username123")
self.assertTrue(is_user)
def testIsUserFalse(self):
self.github_service.AddFailureResponse()
is_user = self.github_user_service.IsUser("username321")
self.assertFalse(is_user)
class TestIssueService(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self.http_mock = Http2Mock()
self.github_service = github_issue_converter.GitHubService(
GITHUB_USERNAME, GITHUB_REPO, GITHUB_TOKEN,
rate_limit=False,
http_instance=self.http_mock)
self.github_issue_service = github_issue_converter.IssueService(
self.github_service, comment_delay=0)
def testCreateIssue(self):
issue_body = {
"body": (
"```\none\n```\n\nOriginal issue reported on code.google.com by `a_uthor` on last year\n"
"- **Labels added**: added-label\n"
"- **Labels removed**: removed-label\n"),
"assignee": "default_username",
"labels": ["awesome", "great"],
"title": "issue_title",
}
self.http_mock.content = {"number": 1}
issue_number = self.github_issue_service.CreateIssue(SINGLE_ISSUE)
self.assertEqual(self.http_mock.last_method, "POST")
uri = ("%s/repos/%s/%s/issues?access_token=%s" %
(GITHUB_API_URL, GITHUB_USERNAME, GITHUB_REPO, GITHUB_TOKEN))
self.assertEqual(self.http_mock.last_url, uri)
self.assertEqual(self.http_mock.last_body, json.dumps(issue_body))
self.assertEqual(1, issue_number)
def testCloseIssue(self):
self.github_issue_service.CloseIssue(123)
self.assertEqual(self.http_mock.last_method, "PATCH")
uri = ("%s/repos/%s/%s/issues/%d?access_token=%s" %
(GITHUB_API_URL, GITHUB_USERNAME, GITHUB_REPO, 123, GITHUB_TOKEN))
self.assertEqual(self.http_mock.last_url, uri)
self.assertEqual(self.http_mock.last_body,
json.dumps({"state": "closed"}))
def testCreateComment(self):
comment_body = (
"```\none\n```\n\nOriginal issue reported on code.google.com "
"by `a_uthor` on last year\n"
"- **Labels added**: added-label\n"
"- **Labels removed**: removed-label\n")
self.github_issue_service.CreateComment(1, SINGLE_COMMENT)
self.assertEqual(self.http_mock.last_method, "POST")
uri = ("%s/repos/%s/%s/issues/%d/comments?access_token=%s" %
(GITHUB_API_URL, GITHUB_USERNAME, GITHUB_REPO, 1, GITHUB_TOKEN))
self.assertEqual(self.http_mock.last_url, uri)
self.assertEqual(self.http_mock.last_body,
json.dumps({"body": comment_body}))
def testGetIssueNumber(self):
issue = {"number": 1347}
issue_number = self.github_issue_service._GetIssueNumber(issue)
self.assertEqual(1347, issue_number)
def testGetIssues(self):
fake_github_service = FakeGitHubService(GITHUB_USERNAME,
GITHUB_REPO,
GITHUB_TOKEN)
github_issue_service = github_issue_converter.IssueService(
fake_github_service, comment_delay=0)
fake_github_service.AddFailureResponse()
with self.assertRaises(IOError):
github_issue_service.GetIssues()
class TestIssueExporter(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self.github_service = FakeGitHubService(GITHUB_USERNAME,
GITHUB_REPO,
GITHUB_TOKEN)
self.github_user_service = github_issue_converter.UserService(
self.github_service)
self.github_issue_service = github_issue_converter.IssueService(
self.github_service, comment_delay=0)
self.issue_exporter = issues.IssueExporter(
self.github_issue_service, self.github_user_service,
NO_ISSUE_DATA, GITHUB_REPO, USER_MAP)
self.issue_exporter.Init()
self.TEST_ISSUE_DATA = [
{
"id": "1",
"number": "1",
"title": "Title1",
"state": "open",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO, COMMENT_THREE],
},
"labels": ["Type-Issue", "Priority-High"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User1"
},
},
{
"id": "2",
"number": "2",
"title": "Title2",
"state": "closed",
"owner": {"kind": "projecthosting#issuePerson",
"name": "User2"
},
"labels": [],
"comments": {
"items": [COMMENT_ONE],
},
},
{
"id": "3",
"number": "3",
"title": "Title3",
"state": "closed",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO],
},
"labels": ["Type-Defect"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User3"
}
}]
def testGetAllPreviousIssues(self):
self.assertEqual(0, len(self.issue_exporter._previously_created_issues))
content = [{"number": 1, "title": "issue_title", "comments": 2}]
self.github_service.AddResponse(content=content)
self.issue_exporter._GetAllPreviousIssues()
self.assertEqual(1, len(self.issue_exporter._previously_created_issues))
self.assertTrue("issue_title" in self.issue_exporter._previously_created_issues)
previous_issue = self.issue_exporter._previously_created_issues["issue_title"]
self.assertEqual(1, previous_issue["id"])
self.assertEqual("issue_title", previous_issue["title"])
self.assertEqual(2, previous_issue["comment_count"])
def testCreateIssue(self):
content = {"number": 1234}
self.github_service.AddResponse(content=content)
issue_number = self.issue_exporter._CreateIssue(SINGLE_ISSUE)
self.assertEqual(1234, issue_number)
def testCreateIssueFailedOpenRequest(self):
self.github_service.AddFailureResponse()
with self.assertRaises(issues.ServiceError):
self.issue_exporter._CreateIssue(SINGLE_ISSUE)
def testCreateIssueFailedCloseRequest(self):
content = {"number": 1234}
self.github_service.AddResponse(content=content)
self.github_service.AddFailureResponse()
issue_number = self.issue_exporter._CreateIssue(SINGLE_ISSUE)
self.assertEqual(1234, issue_number)
def testCreateComments(self):
self.assertEqual(0, self.issue_exporter._comment_number)
self.issue_exporter._CreateComments(COMMENTS_DATA, 1234, SINGLE_ISSUE)
self.assertEqual(4, self.issue_exporter._comment_number)
def testCreateCommentsFailure(self):
self.github_service.AddFailureResponse()
self.assertEqual(0, self.issue_exporter._comment_number)
with self.assertRaises(issues.ServiceError):
self.issue_exporter._CreateComments(COMMENTS_DATA, 1234, SINGLE_ISSUE)
def testStart(self):
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
# Note: Some responses are from CreateIssues, others are from CreateComment.
self.github_service.AddResponse(content={"number": 1})
self.github_service.AddResponse(content={"number": 10})
self.github_service.AddResponse(content={"number": 11})
self.github_service.AddResponse(content={"number": 2})
self.github_service.AddResponse(content={"number": 20})
self.github_service.AddResponse(content={"number": 3})
self.github_service.AddResponse(content={"number": 30})
self.issue_exporter.Start()
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
# Comment counts are per issue and should match the numbers from the last
# issue created, minus one for the first comment, which is really
# the issue description.
self.assertEqual(1, self.issue_exporter._comment_number)
self.assertEqual(1, self.issue_exporter._comment_total)
def testStart_SkipDeletedComments(self):
comment = {
"content": "one",
"id": 1,
"published": "last year",
"author": {"name": "[email protected]"},
"updates": {
"labels": ["added-label", "-removed-label"],
},
}
self.issue_exporter._issue_json_data = [
{
"id": "1",
"number": "1",
"title": "Title1",
"state": "open",
"comments": {
"items": [
COMMENT_ONE,
comment,
COMMENT_TWO,
comment],
},
"labels": ["Type-Issue", "Priority-High"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User1"
},
}]
# Verify the comment data from the test issue references all comments.
self.github_service.AddResponse(content={"number": 1})
self.issue_exporter.Start()
# Remember, the first comment is for the issue.
self.assertEqual(3, self.issue_exporter._comment_number)
self.assertEqual(3, self.issue_exporter._comment_total)
# Set the deletedBy information for the comment object, now they
# should be ignored by the export.
comment["deletedBy"] = {}
self.github_service.AddResponse(content={"number": 1})
self.issue_exporter._previously_created_issues = {}
self.issue_exporter.Start()
self.assertEqual(1, self.issue_exporter._comment_number)
self.assertEqual(1, self.issue_exporter._comment_total)
def testStart_SkipAlreadyCreatedIssues(self):
self.issue_exporter._previously_created_issues["Title1"] = {
"id": 1,
"title": "Title1",
"comment_count": 3
}
self.issue_exporter._previously_created_issues["Title2"] = {
"id": 2,
"title": "Title2",
"comment_count": 1
}
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
self.github_service.AddResponse(content={"number": 3})
self.issue_exporter.Start()
self.assertEqual(2, self.issue_exporter._skipped_issues)
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
def testStart_ReAddMissedComments(self):
self.issue_exporter._previously_created_issues["Title1"] = {
"id": 1,
"title": "Title1",
"comment_count": 1 # Missing 2 comments.
}
self.issue_exporter._issue_json_data = self.TEST_ISSUE_DATA
# First requests to re-add comments, then create issues.
self.github_service.AddResponse(content={"number": 11})
self.github_service.AddResponse(content={"number": 12})
self.github_service.AddResponse(content={"number": 2})
self.github_service.AddResponse(content={"number": 3})
self.issue_exporter.Start()
self.assertEqual(1, self.issue_exporter._skipped_issues)
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
if __name__ == "__main__":
unittest.main(buffer=True)
| apache-2.0 | -8,939,932,848,136,075,000 | 35.35506 | 101 | 0.657089 | false |
heke123/chromium-crosswalk | net/data/verify_certificate_chain_unittest/generate-expired-target-notBefore.py | 5 | 1182 | #!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 1 intermediary, where the target is expired (violates
validity.notBefore). Verification is expected to fail."""
import common
# Self-signed root certificate (part of trust store).
root = common.create_self_signed_root_certificate('Root')
root.set_validity_range(common.JANUARY_1_2015_UTC, common.JANUARY_1_2016_UTC)
# Intermediary certificate.
intermediary = common.create_intermediary_certificate('Intermediary', root)
intermediary.set_validity_range(common.JANUARY_1_2015_UTC,
common.JANUARY_1_2016_UTC)
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediary)
target.set_validity_range(common.MARCH_2_2015_UTC, common.JANUARY_1_2016_UTC)
chain = [target, intermediary]
trusted = [root]
# Both the root and intermediary are valid at this time, however the
# target is not.
time = common.MARCH_1_2015_UTC
verify_result = False
common.write_test_file(__doc__, chain, trusted, time, verify_result)
| bsd-3-clause | -723,594,222,017,862,900 | 35.9375 | 79 | 0.750423 | false |
qlands/onadata | onadata/apps/logger/migrations/0007_auto__add_field_xform_has_start_time.py | 13 | 6918 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'XForm.has_start_time'
db.add_column('odk_logger_xform', 'has_start_time', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'XForm.has_start_time'
db.delete_column('odk_logger_xform', 'has_start_time')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.attachment': {
'Meta': {'object_name': 'Attachment'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['odk_logger.Instance']"}),
'media_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['logger']
| bsd-2-clause | 8,636,821,917,159,513,000 | 70.319588 | 182 | 0.55146 | false |
cstipkovic/spidermonkey-research | python/macholib/macholib/itergraphreport.py | 16 | 1929 | """
Utilities for creating dot output from a MachOGraph
XXX: need to rewrite this based on altgraph.Dot
"""
from collections import deque
try:
from itertools import imap
except ImportError:
imap = map
__all__ = ['itergraphreport']
def itergraphreport(nodes, describe_edge, name='G'):
edges = deque()
nodetoident = {}
mainedges = set()
def nodevisitor(node, data, outgoing, incoming):
return {'label': str(node)}
def edgevisitor(edge, data, head, tail):
return {}
yield 'digraph %s {\n' % (name,)
attr = dict(rankdir='LR', concentrate='true')
cpatt = '%s="%s"'
for item in attr.iteritems():
yield '\t%s;\n' % (cpatt % item,)
# find all packages (subgraphs)
for (node, data, outgoing, incoming) in nodes:
nodetoident[node] = getattr(data, 'identifier', node)
# create sets for subgraph, write out descriptions
for (node, data, outgoing, incoming) in nodes:
# update edges
for edge in imap(describe_edge, outgoing):
edges.append(edge)
# describe node
yield '\t"%s" [%s];\n' % (
node,
','.join([
(cpatt % item) for item in
nodevisitor(node, data, outgoing, incoming).iteritems()
]),
)
graph = []
while edges:
edge, data, head, tail = edges.popleft()
if data in ('run_file', 'load_dylib'):
graph.append((edge, data, head, tail))
def do_graph(edges, tabs):
edgestr = tabs + '"%s" -> "%s" [%s];\n'
# describe edge
for (edge, data, head, tail) in edges:
attribs = edgevisitor(edge, data, head, tail)
yield edgestr % (
head,
tail,
','.join([(cpatt % item) for item in attribs.iteritems()]),
)
for s in do_graph(graph, '\t'):
yield s
yield '}\n'
| mpl-2.0 | -1,693,671,182,173,531,100 | 25.424658 | 75 | 0.543805 | false |
anoopcs9/samba | python/samba/tests/net_join.py | 6 | 2340 | # Unix SMB/CIFS implementation.
#
# Copyright (C) Catalyst.Net Ltd. 2017
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Confirm that net.join_member works
"""
import samba.tests
import os
from samba.net import Net, LIBNET_JOIN_AUTOMATIC
from samba.credentials import DONT_USE_KERBEROS
from samba import NTSTATUSError, ntstatus
import ctypes
class NetJoinTests(samba.tests.TestCaseInTempDir):
def setUp(self):
super(NetJoinTests, self).setUp()
self.domain = os.environ["DOMAIN"]
self.server = os.environ["SERVER"]
self.lp = self.get_loadparm()
self.lp.set("private dir", self.tempdir)
self.lp.set("lock dir", self.tempdir)
self.lp.set("state directory", self.tempdir)
def tearDown(self):
super(NetJoinTests, self).tearDown()
def test_net_join(self):
netbios_name = "NetJoinTest"
machinepass = "abcdefghij"
creds = self.insta_creds(template=self.get_credentials(),
kerberos_state=DONT_USE_KERBEROS)
net = Net(creds, self.lp, server=self.server)
# NOTE WELL: We must not run more than one successful
# net.join_member per file (process), as the shared
# secrets.ldb handle will be kept between runs.
try:
(join_password, sid, domain_name) = net.join_member(
self.domain, netbios_name, LIBNET_JOIN_AUTOMATIC,
machinepass=machinepass)
except NTSTATUSError as e:
code = ctypes.c_uint32(e[0]).value
if code == ntstatus.NT_STATUS_CONNECTION_DISCONNECTED:
self.fail("Connection failure")
raise
os.unlink(os.path.join(self.tempdir, "secrets.ldb"))
pass
| gpl-3.0 | 1,980,999,309,935,555,300 | 35 | 71 | 0.665812 | false |
plusk01/roboclaw-v5 | examples/roboclaw_speeddistance.py | 2 | 2263 | #***Before using this example the motor/controller combination must be
#***tuned and the settings saved to the Roboclaw using IonMotion.
#***The Min and Max Positions must be at least 0 and 50000
import time
import roboclaw
def displayspeed():
enc1 = roboclaw.ReadEncM1(address)
enc2 = roboclaw.ReadEncM2(address)
speed1 = roboclaw.ReadSpeedM1(address)
speed2 = roboclaw.ReadSpeedM2(address)
print("Encoder1:"),
if(enc1[0]==1):
print enc1[1],
print format(enc1[2],'02x'),
else:
print "failed",
print "Encoder2:",
if(enc2[0]==1):
print enc2[1],
print format(enc2[2],'02x'),
else:
print "failed " ,
print "Speed1:",
if(speed1[0]):
print speed1[1],
else:
print "failed",
print("Speed2:"),
if(speed2[0]):
print speed2[1]
else:
print "failed "
#Windows comport name
roboclaw.Open("COM3",38400)
#Linux comport name
#roboclaw.Open("/dev/ttyACM0",115200)
address = 0x80
version = roboclaw.ReadVersion(address)
if version[0]==False:
print "GETVERSION Failed"
else:
print repr(version[1])
while(1):
roboclaw.SpeedDistanceM1(address,12000,48000,1)
roboclaw.SpeedDistanceM2(address,-12000,48000,1)
buffers = (0,0,0)
while(buffers[1]!=0x80 and buffers[2]!=0x80): #Loop until distance command has completed
displayspeed();
buffers = roboclaw.ReadBuffers(address);
time.sleep(2)
roboclaw.SpeedDistanceM1(address,-12000,48000,1)
roboclaw.SpeedDistanceM2(address,12000,48000,1)
buffers = (0,0,0)
while(buffers[1]!=0x80 and buffers[2]!=0x80): #Loop until distance command has completed
displayspeed()
buffers = roboclaw.ReadBuffers(address)
time.sleep(2); #When no second command is given the motors will automatically slow down to 0 which takes 1 second
roboclaw.SpeedDistanceM1(address,12000,48000,1)
roboclaw.SpeedDistanceM2(address,-12000,48000,1)
roboclaw.SpeedDistanceM1(address,-12000,48000,0)
roboclaw.SpeedDistanceM2(address,12000,48000,0)
roboclaw.SpeedDistanceM1(address,0,48000,0)
roboclaw.SpeedDistanceM2(address,0,48000,0)
buffers = (0,0,0)
while(buffers[1]!=0x80 and buffers[2]!=0x80): #Loop until distance command has completed
displayspeed()
buffers = roboclaw.ReadBuffers(address)
time.sleep(1)
| mit | -387,954,521,332,675,600 | 26.2875 | 115 | 0.709236 | false |
s142857/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_stream.py | 446 | 6264 | from __future__ import absolute_import, division, unicode_literals
from . import support # flake8: noqa
import unittest
import codecs
from io import BytesIO
from six.moves import http_client
from html5lib.inputstream import (BufferedStream, HTMLInputStream,
HTMLUnicodeInputStream, HTMLBinaryInputStream)
class BufferedStreamTest(unittest.TestCase):
def test_basic(self):
s = b"abc"
fp = BufferedStream(BytesIO(s))
read = fp.read(10)
assert read == s
def test_read_length(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert read1 == b"a"
read2 = fp.read(2)
assert read2 == b"bc"
read3 = fp.read(3)
assert read3 == b"def"
read4 = fp.read(4)
assert read4 == b""
def test_tell(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert fp.tell() == 1
read2 = fp.read(2)
assert fp.tell() == 3
read3 = fp.read(3)
assert fp.tell() == 6
read4 = fp.read(4)
assert fp.tell() == 6
def test_seek(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert read1 == b"a"
fp.seek(0)
read2 = fp.read(1)
assert read2 == b"a"
read3 = fp.read(2)
assert read3 == b"bc"
fp.seek(2)
read4 = fp.read(2)
assert read4 == b"cd"
fp.seek(4)
read5 = fp.read(2)
assert read5 == b"ef"
def test_seek_tell(self):
fp = BufferedStream(BytesIO(b"abcdef"))
read1 = fp.read(1)
assert fp.tell() == 1
fp.seek(0)
read2 = fp.read(1)
assert fp.tell() == 1
read3 = fp.read(2)
assert fp.tell() == 3
fp.seek(2)
read4 = fp.read(2)
assert fp.tell() == 4
fp.seek(4)
read5 = fp.read(2)
assert fp.tell() == 6
class HTMLUnicodeInputStreamShortChunk(HTMLUnicodeInputStream):
_defaultChunkSize = 2
class HTMLBinaryInputStreamShortChunk(HTMLBinaryInputStream):
_defaultChunkSize = 2
class HTMLInputStreamTest(unittest.TestCase):
def test_char_ascii(self):
stream = HTMLInputStream(b"'", encoding='ascii')
self.assertEqual(stream.charEncoding[0], 'ascii')
self.assertEqual(stream.char(), "'")
def test_char_utf8(self):
stream = HTMLInputStream('\u2018'.encode('utf-8'), encoding='utf-8')
self.assertEqual(stream.charEncoding[0], 'utf-8')
self.assertEqual(stream.char(), '\u2018')
def test_char_win1252(self):
stream = HTMLInputStream("\xa9\xf1\u2019".encode('windows-1252'))
self.assertEqual(stream.charEncoding[0], 'windows-1252')
self.assertEqual(stream.char(), "\xa9")
self.assertEqual(stream.char(), "\xf1")
self.assertEqual(stream.char(), "\u2019")
def test_bom(self):
stream = HTMLInputStream(codecs.BOM_UTF8 + b"'")
self.assertEqual(stream.charEncoding[0], 'utf-8')
self.assertEqual(stream.char(), "'")
def test_utf_16(self):
stream = HTMLInputStream((' ' * 1025).encode('utf-16'))
self.assertTrue(stream.charEncoding[0] in ['utf-16-le', 'utf-16-be'], stream.charEncoding)
self.assertEqual(len(stream.charsUntil(' ', True)), 1025)
def test_newlines(self):
stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\r\nccc\rddddxe")
self.assertEqual(stream.position(), (1, 0))
self.assertEqual(stream.charsUntil('c'), "a\nbb\n")
self.assertEqual(stream.position(), (3, 0))
self.assertEqual(stream.charsUntil('x'), "ccc\ndddd")
self.assertEqual(stream.position(), (4, 4))
self.assertEqual(stream.charsUntil('e'), "x")
self.assertEqual(stream.position(), (4, 5))
def test_newlines2(self):
size = HTMLUnicodeInputStream._defaultChunkSize
stream = HTMLInputStream("\r" * size + "\n")
self.assertEqual(stream.charsUntil('x'), "\n" * size)
def test_position(self):
stream = HTMLBinaryInputStreamShortChunk(codecs.BOM_UTF8 + b"a\nbb\nccc\nddde\nf\ngh")
self.assertEqual(stream.position(), (1, 0))
self.assertEqual(stream.charsUntil('c'), "a\nbb\n")
self.assertEqual(stream.position(), (3, 0))
stream.unget("\n")
self.assertEqual(stream.position(), (2, 2))
self.assertEqual(stream.charsUntil('c'), "\n")
self.assertEqual(stream.position(), (3, 0))
stream.unget("\n")
self.assertEqual(stream.position(), (2, 2))
self.assertEqual(stream.char(), "\n")
self.assertEqual(stream.position(), (3, 0))
self.assertEqual(stream.charsUntil('e'), "ccc\nddd")
self.assertEqual(stream.position(), (4, 3))
self.assertEqual(stream.charsUntil('h'), "e\nf\ng")
self.assertEqual(stream.position(), (6, 1))
def test_position2(self):
stream = HTMLUnicodeInputStreamShortChunk("abc\nd")
self.assertEqual(stream.position(), (1, 0))
self.assertEqual(stream.char(), "a")
self.assertEqual(stream.position(), (1, 1))
self.assertEqual(stream.char(), "b")
self.assertEqual(stream.position(), (1, 2))
self.assertEqual(stream.char(), "c")
self.assertEqual(stream.position(), (1, 3))
self.assertEqual(stream.char(), "\n")
self.assertEqual(stream.position(), (2, 0))
self.assertEqual(stream.char(), "d")
self.assertEqual(stream.position(), (2, 1))
def test_python_issue_20007(self):
"""
Make sure we have a work-around for Python bug #20007
http://bugs.python.org/issue20007
"""
class FakeSocket(object):
def makefile(self, _mode, _bufsize=None):
return BytesIO(b"HTTP/1.1 200 Ok\r\n\r\nText")
source = http_client.HTTPResponse(FakeSocket())
source.begin()
stream = HTMLInputStream(source)
self.assertEqual(stream.charsUntil(" "), "Text")
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == '__main__':
main()
| mpl-2.0 | 8,543,401,352,933,068,000 | 33.229508 | 98 | 0.595626 | false |
Lilykos/invenio | invenio/legacy/pdfchecker/__init__.py | 13 | 1091 | #!@PYTHON@
# -*- mode: python; coding: utf-8; -*-
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Arxiv Pdf Checker Task.
It checks periodically if arxiv papers are not missing pdfs
"""
from invenio.base.factory import with_app_context
@with_app_context()
def main():
from .arxiv import main as arxiv_main
return arxiv_main()
| gpl-2.0 | 5,489,555,657,419,852,000 | 34.193548 | 74 | 0.737855 | false |
keyurpatel076/MissionPlannerGit | Lib/site-packages/numpy/core/_internal.py | 53 | 18020 | #A place for code to be called from C-code
# that implements more complicated stuff.
import re
import sys
import warnings
from numpy.compat import asbytes, bytes
if (sys.byteorder == 'little'):
_nbo = asbytes('<')
else:
_nbo = asbytes('>')
def _makenames_list(adict):
from numpy.core import dtype
allfields = []
fnames = adict.keys()
for fname in fnames:
obj = adict[fname]
n = len(obj)
if not isinstance(obj, tuple) or n not in [2,3]:
raise ValueError("entry not a 2- or 3- tuple")
if (n > 2) and (obj[2] == fname):
continue
num = int(obj[1])
if (num < 0):
raise ValueError("invalid offset.")
format = dtype(obj[0])
if (format.itemsize == 0):
raise ValueError("all itemsizes must be fixed.")
if (n > 2):
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
from numpy.core import dtype
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if (len(res) > 2):
titles.append(res[2])
else:
titles.append(None)
return dtype({"names" : names,
"formats" : formats,
"offsets" : offsets,
"titles" : titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.dtinfo is None:
return descriptor.str
else:
# TODO: This used to put a copy of the metadata
# in the tuple. Now we put in the dtinfo tuple.
# I have no idea if this is right.
new = descriptor.dtinfo
return (descriptor.str, new)
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('','|V%d' % num))
offset += num
if len(field) > 3:
name = (field[2],field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibilty.
def _reconstruct(subtype, shape, dtype):
from multiarray import ndarray
return ndarray.__new__(subtype, shape, dtype)
# format_re and _split were taken from numarray by J. Todd Miller
def _split(input):
"""Split the input formats string into field formats without splitting
the tuple used to specify multi-dimensional arrays."""
newlist = []
hold = asbytes('')
listinput = input.split(asbytes(','))
for element in listinput:
if hold != asbytes(''):
item = hold + asbytes(',') + element
else:
item = element
left = item.count(asbytes('('))
right = item.count(asbytes(')'))
# if the parenthesis is not balanced, hold the string
if left > right :
hold = item
# when balanced, append to the output list and reset the hold
elif left == right:
newlist.append(item.strip())
hold = asbytes('')
# too many close parenthesis is unacceptable
else:
raise SyntaxError(item)
# if there is string left over in hold
if hold != asbytes(''):
raise SyntaxError(hold)
return newlist
format_datetime = re.compile(r"""
(?P<typecode>M8|m8|datetime64|timedelta64)
([[]
((?P<num>\d+)?
(?P<baseunit>Y|M|W|B|D|h|m|s|ms|us|ns|ps|fs|as)
(/(?P<den>\d+))?
[]])
(//(?P<events>\d+))?)?""", re.X)
# Return (baseunit, num, den, events), datetime
# from date-time string
def _datetimestring(astr):
res = format_datetime.match(astr)
if res is None:
raise ValueError("Incorrect date-time string.")
typecode = res.group('typecode')
datetime = (typecode == asbytes('M8') or typecode == asbytes('datetime64'))
defaults = [asbytes('us'), 1, 1, 1]
names = ['baseunit', 'num', 'den', 'events']
func = [bytes, int, int, int]
dt_tuple = []
for i, name in enumerate(names):
value = res.group(name)
if value:
dt_tuple.append(func[i](value))
else:
dt_tuple.append(defaults[i])
return tuple(dt_tuple), datetime
format_re = re.compile(asbytes(r'(?P<order1>[<>|=]?)(?P<repeats> *[(]?[ ,0-9]*[)]? *)(?P<order2>[<>|=]?)(?P<dtype>[A-Za-z0-9.]*)'))
# astr is a string (perhaps comma separated)
_convorder = {asbytes('='): _nbo}
def _commastring(astr):
res = _split(astr)
if (len(res)) < 1:
raise ValueError("unrecognized formant")
result = []
for k,item in enumerate(res):
# convert item
try:
(order1, repeats, order2, dtype) = format_re.match(item).groups()
except (TypeError, AttributeError):
raise ValueError('format %s is not recognized' % item)
if order2 == asbytes(''):
order = order1
elif order1 == asbytes(''):
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError('in-consistent byte-order specification %s and %s' % (order1, order2))
order = order1
if order in [asbytes('|'), asbytes('='), _nbo]:
order = asbytes('')
dtype = order + dtype
if (repeats == asbytes('')):
newitem = dtype
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
return result
def _getintp_ctype():
from numpy.core import dtype
val = _getintp_ctype.cache
if val is not None:
return val
char = dtype('p').char
import ctypes
if (char == 'i'):
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes(object):
def cast(self, num, obj):
return num
def c_void_p(self, num):
return num
class _ctypes(object):
def __init__(self, array, ptr=None):
try:
import ctypes
self._ctypes = ctypes
except ImportError:
self._ctypes = _missing_ctypes()
self._arr = array
self._data = ptr
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
def get_data(self):
return self._data
def get_shape(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.shape)
def get_strides(self):
if self._zerod:
return None
return (_getintp_ctype()*self._arr.ndim)(*self._arr.strides)
def get_as_parameter(self):
return self._ctypes.c_void_p(self._data)
data = property(get_data, None, doc="c-types data")
shape = property(get_shape, None, doc="c-types shape")
strides = property(get_strides, None, doc="c-types strides")
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
# Given a datatype and an order object
# return a new names tuple
# with the order indicated
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
raise ValueError("unknown field name: %s" % (name,))
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
# Given an array with fields and a sequence of field names
# construct a new array with just those fields copied over
def _index_fields(ary, fields):
from multiarray import empty, dtype
dt = ary.dtype
new_dtype = [(name, dt[name]) for name in dt.names if name in fields]
future_dtype = [(name, dt[name]) for name in fields if name in dt.names]
if not new_dtype == future_dtype:
depdoc = "Out of order field selection on recarrays currently returns \
fields in order. This behavior is deprecated in numpy 1.5 and will change in \
2.0. See ticket #1431."
warnings.warn(depdoc, DeprecationWarning)
if ary.flags.f_contiguous:
order = 'F'
else:
order = 'C'
newarray = empty(ary.shape, dtype=new_dtype, order=order)
for name in fields:
newarray[name] = ary[name]
return newarray
# Given a string containing a PEP 3118 format specifier,
# construct a Numpy dtype
_pep3118_native_map = {
'?': '?',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
from numpy.core import dtype
fields = {}
offset = 0
explicit_name = False
this_explicit_name = False
common_alignment = 1
is_padding = False
last_offset = 0
dummy_name_index = [0]
def next_dummy_name():
dummy_name_index[0] += 1
def get_dummy_name():
while True:
name = 'f%d' % dummy_name_index[0]
if name not in fields:
return name
next_dummy_name()
# Parse spec
while spec:
value = None
# End of structure, bail out to upper level
if spec[0] == '}':
spec = spec[1:]
break
# Sub-arrays (1)
shape = None
if spec[0] == '(':
j = spec.index(')')
shape = tuple(map(int, spec[1:j].split(',')))
spec = spec[j+1:]
# Byte order
if spec[0] in ('@', '=', '<', '>', '^', '!'):
byteorder = spec[0]
if byteorder == '!':
byteorder = '>'
spec = spec[1:]
# Byte order characters also control native vs. standard type sizes
if byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize = 1
if spec[0].isdigit():
j = 1
for j in xrange(1, len(spec)):
if not spec[j].isdigit():
break
itemsize = int(spec[:j])
spec = spec[j:]
# Data types
is_padding = False
if spec[:2] == 'T{':
value, spec, align, next_byteorder = _dtype_from_pep3118(
spec[2:], byteorder=byteorder, is_subdtype=True)
elif spec[0] in type_map_chars:
next_byteorder = byteorder
if spec[0] == 'Z':
j = 2
else:
j = 1
typechar = spec[:j]
spec = spec[j:]
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(byteorder, byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % spec)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = (align*common_alignment
/ _gcd(align, common_alignment))
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
this_explicit_name = False
if spec and spec.startswith(':'):
i = spec[1:].index(':') + 1
name = spec[1:i]
spec = spec[i+1:]
explicit_name = True
this_explicit_name = True
else:
name = get_dummy_name()
if not is_padding or this_explicit_name:
if name in fields:
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
% name)
fields[name] = (value, offset)
last_offset = offset
if not this_explicit_name:
next_dummy_name()
byteorder = next_byteorder
offset += value.itemsize
offset += extra_offset
# Check if this was a simple 1-item type
if len(fields.keys()) == 1 and not explicit_name and fields['f0'][1] == 0 \
and not is_subdtype:
ret = fields['f0'][0]
else:
ret = dtype(fields)
# Trailing padding must be explicitly added
padding = offset - ret.itemsize
if byteorder == '@':
padding += (-offset) % common_alignment
if is_padding and not this_explicit_name:
ret = _add_trailing_padding(ret, padding)
# Finished
if is_subdtype:
return ret, spec, common_alignment, byteorder
else:
return ret
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
from numpy.core import dtype
if value.fields is None:
vfields = {'f0': (value, 0)}
else:
vfields = dict(value.fields)
if value.names and value.names[-1] == '' and \
value[''].char == 'V':
# A trailing padding field is already present
vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding),
vfields[''][1])
value = dtype(vfields)
else:
# Get a free name for the padding field
j = 0
while True:
name = 'pad%d' % j
if name not in vfields:
vfields[name] = ('V%d' % padding, value.itemsize)
break
j += 1
value = dtype(vfields)
if '' not in vfields:
# Strip out the name of the padding field
names = list(value.names)
names[-1] = ''
value.names = tuple(names)
return value
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a%b
return a
| gpl-3.0 | -3,881,838,203,730,782,000 | 28.205835 | 131 | 0.535461 | false |
neuroidss/nupic.research | projects/sequence_classification/run_encoder_with_union.py | 9 | 8995 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run sequence classification experiment with
Input -> RDSE encoder -> Union model
Search for the optimal union window
One needs to run the script "run_encoder_only.py" first to get the
optimal encoder resolution
"""
import pickle
import time
import matplotlib.pyplot as plt
import multiprocessing
from util_functions import *
from nupic.encoders.random_distributed_scalar import RandomDistributedScalarEncoder
plt.ion()
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams.update({'figure.autolayout': True})
def unionForOneSequence(activeColumns, unionLength=1):
activeColumnTrace = []
unionStepInBatch = 0
unionBatchIdx = 0
unionCols = set()
for t in range(len(activeColumns)):
unionCols = unionCols.union(activeColumns[t])
unionStepInBatch += 1
if unionStepInBatch == unionLength:
activeColumnTrace.append(unionCols)
unionStepInBatch = 0
unionBatchIdx += 1
unionCols = set()
if unionStepInBatch > 0:
activeColumnTrace.append(unionCols)
return activeColumnTrace
def runUnionStep(activeColumns, unionLength=1):
"""
Run encoder -> tm network over dataset, save activeColumn and activeCells
traces
:param tm:
:param encoder:
:param dataset:
:return:
"""
numSequence = len(activeColumns)
activeColumnUnionTrace = []
for i in range(numSequence):
activeColumnTrace = unionForOneSequence(activeColumns[i], unionLength)
activeColumnUnionTrace.append(activeColumnTrace)
# print "{} out of {} done ".format(i, numSequence)
return activeColumnUnionTrace
def runEncoderOverDataset(encoder, dataset):
activeColumnsData = []
for i in range(dataset.shape[0]):
activeColumnsTrace = []
for element in dataset[i, :]:
encoderOutput = encoder.encode(element)
activeColumns = set(np.where(encoderOutput > 0)[0])
activeColumnsTrace.append(activeColumns)
activeColumnsData.append(activeColumnsTrace)
return activeColumnsData
def calcualteEncoderModelWorker(taskQueue, resultQueue, *args):
while True:
nextTask = taskQueue.get()
print "Next task is : ", nextTask
if nextTask is None:
break
nBuckets = nextTask["nBuckets"]
accuracyColumnOnly = calculateEncoderModelAccuracy(nBuckets, *args)
resultQueue.put({nBuckets: accuracyColumnOnly})
print "Column Only model, Resolution: {} Accuracy: {}".format(
nBuckets, accuracyColumnOnly)
return
def calculateEncoderModelAccuracy(nBuckets, numCols, w, trainData, trainLabel):
maxValue = np.max(trainData)
minValue = np.min(trainData)
resolution = (maxValue - minValue) / nBuckets
encoder = RandomDistributedScalarEncoder(resolution, w=w, n=numCols)
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
distMatColumnTrain = calculateDistanceMatTrain(activeColumnsTrain)
meanAccuracy, outcomeColumn = calculateAccuracy(distMatColumnTrain,
trainLabel, trainLabel)
accuracyColumnOnly = np.mean(outcomeColumn)
return accuracyColumnOnly
def runDataSet(dataName, datasetName):
if not os.path.exists('results'):
os.makedirs('results')
trainData, trainLabel, testData, testLabel = loadDataset(dataName,
datasetName)
numTest = len(testLabel)
numTrain = len(trainLabel)
sequenceLength = len(trainData[0])
classList = np.unique(trainLabel).tolist()
numClass = len(classList)
print "Processing {}".format(dataName)
print "Train Sample # {}, Test Sample # {}".format(numTrain, numTest)
print "Sequence Length {} Class # {}".format(sequenceLength, len(classList))
if (max(numTrain, numTest) * sequenceLength < 600 * 600):
print "skip this small dataset for now"
return
try:
unionLengthList = [1, 5, 10, 15, 20]
for unionLength in unionLengthList:
expResultTM = pickle.load(
open('results/modelPerformance/{}_columnOnly_union_{}'.format(
dataName, unionLength), 'r'))
return
except:
print "run data set: ", dataName
EuclideanDistanceMat = calculateEuclideanDistanceMat(testData, trainData)
outcomeEuclidean = calculateEuclideanModelAccuracy(trainData, trainLabel,
testData, testLabel)
accuracyEuclideanDist = np.mean(outcomeEuclidean)
print
print "Euclidean model accuracy: {}".format(accuracyEuclideanDist)
print
# # Use SDR overlap instead of Euclidean distance
print "Running Encoder model"
maxValue = np.max(trainData)
minValue = np.min(trainData)
numCols = 2048
w = 41
try:
searchResolution = pickle.load(
open('results/optimalEncoderResolution/{}'.format(dataName), 'r'))
nBucketList = searchResolution['nBucketList']
accuracyVsResolution = searchResolution['accuracyVsResolution']
optNumBucket = nBucketList[smoothArgMax(np.array(accuracyVsResolution))]
optimalResolution = (maxValue - minValue) / optNumBucket
except:
return
print "optimal bucket # {}".format((maxValue - minValue) / optimalResolution)
encoder = RandomDistributedScalarEncoder(optimalResolution, w=w, n=numCols)
print "encoding train data ..."
activeColumnsTrain = runEncoderOverDataset(encoder, trainData)
print "encoding test data ..."
activeColumnsTest = runEncoderOverDataset(encoder, testData)
print "calculate column distance matrix ..."
# run encoder -> union model, search for the optimal union window
unionLengthList = [1, 5, 10, 15, 20]
for unionLength in unionLengthList:
activeColumnUnionTrain = runUnionStep(activeColumnsTrain, unionLength)
activeColumnUnionTest = runUnionStep(activeColumnsTest, unionLength)
distMatColumnTrain = calculateDistanceMatTrain(activeColumnUnionTrain)
distMatColumnTest = calculateDistanceMat(activeColumnUnionTest,
activeColumnUnionTrain)
trainAccuracyColumnOnly, outcomeColumn = calculateAccuracy(distMatColumnTest,
trainLabel,
testLabel)
testAccuracyColumnOnly, outcomeColumn = calculateAccuracy(distMatColumnTest,
trainLabel,
testLabel)
expResults = {'distMatColumnTrain': distMatColumnTrain,
'distMatColumnTest': distMatColumnTest,
'trainAccuracyColumnOnly': trainAccuracyColumnOnly,
'testAccuracyColumnOnly': testAccuracyColumnOnly}
if not os.path.exists('results/distanceMat'):
os.makedirs('results/distanceMat')
outputFile = open('results/distanceMat/{}_columnOnly_union_{}'.format(
dataName, unionLength), 'w')
pickle.dump(expResults, outputFile)
outputFile.close()
print '--> wrote results to "results/distanceMat"'
def runDataSetWorker(taskQueue, datasetName):
while True:
nextTask = taskQueue.get()
print "Next task is : ", nextTask
if nextTask is None:
break
dataName = nextTask["dataName"]
runDataSet(dataName, datasetName)
return
if __name__ == "__main__":
datasetName = "SyntheticData"
dataSetList = listDataSets(datasetName)
datasetName = 'UCR_TS_Archive_2015'
dataSetList = listDataSets(datasetName)
# dataSetList = ["synthetic_control"]
numCPU = multiprocessing.cpu_count()
numWorker = 2
# Establish communication queues
taskQueue = multiprocessing.JoinableQueue()
for dataName in dataSetList:
taskQueue.put({"dataName": dataName,
"datasetName": datasetName})
for _ in range(numWorker):
taskQueue.put(None)
jobs = []
for i in range(numWorker):
print "Start process ", i
p = multiprocessing.Process(target=runDataSetWorker,
args=(taskQueue, datasetName))
jobs.append(p)
p.daemon = True
p.start()
while not taskQueue.empty():
time.sleep(5)
| agpl-3.0 | 8,369,572,605,949,795,000 | 32.069853 | 83 | 0.68438 | false |
liushuaikobe/evermd | lib/thrift/transport/TTwisted.py | 74 | 6547 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from zope.interface import implements, Interface, Attribute
from twisted.internet.protocol import Protocol, ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.protocols import basic
from twisted.python import log
from twisted.web import server, resource, http
from thrift.transport import TTransport
from cStringIO import StringIO
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = StringIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = StringIO()
self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
for k,v in self.client._reqs.iteritems():
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message='Connection closed')
v.errback(tex)
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
d = self.factory.processor.process(iprot, oprot)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class ThriftServerFactory(ServerFactory):
implements(IThriftServerFactory)
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
class ThriftClientFactory(ClientFactory):
implements(IThriftClientFactory)
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
class ThriftResource(resource.Resource):
allowedMethods = ('POST',)
def __init__(self, processor, inputProtocolFactory,
outputProtocolFactory=None):
resource.Resource.__init__(self)
self.inputProtocolFactory = inputProtocolFactory
if outputProtocolFactory is None:
self.outputProtocolFactory = inputProtocolFactory
else:
self.outputProtocolFactory = outputProtocolFactory
self.processor = processor
def getChild(self, path, request):
return self
def _cbProcess(self, _, request, tmo):
msg = tmo.getvalue()
request.setResponseCode(http.OK)
request.setHeader("content-type", "application/x-thrift")
request.write(msg)
request.finish()
def render_POST(self, request):
request.content.seek(0, 0)
data = request.content.read()
tmi = TTransport.TMemoryBuffer(data)
tmo = TTransport.TMemoryBuffer()
iprot = self.inputProtocolFactory.getProtocol(tmi)
oprot = self.outputProtocolFactory.getProtocol(tmo)
d = self.processor.process(iprot, oprot)
d.addCallback(self._cbProcess, request, tmo)
return server.NOT_DONE_YET
| gpl-2.0 | 8,268,285,665,951,202,000 | 28.894977 | 79 | 0.670536 | false |
azazel75/LasaurApp | docs/conf.py | 1 | 9717 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# LasaurApp documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 10 02:42:34 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.aafig'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'LasaurApp'
copyright = '2016, Stefan Hechenberger'
author = 'Stefan Hechenberger'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '14.11'
# The full version, including alpha/beta/rc tags.
release = '14.11b'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'LasaurApp v14.11b'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'LasaurAppdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LasaurApp.tex', 'LasaurApp Documentation',
'Stefan Hechenberger', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lasaurapp', 'LasaurApp Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LasaurApp', 'LasaurApp Documentation',
author, 'LasaurApp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| gpl-3.0 | 1,352,571,625,436,426,200 | 27.748521 | 80 | 0.692601 | false |
b-cannon/my_djae | djangae/db/utils.py | 1 | 10507 | #STANDARD LIB
from datetime import datetime
from decimal import Decimal
from itertools import chain
import warnings
#LIBRARIES
from django.conf import settings
from django.db import models
from django.db.backends.util import format_number
from django.db import IntegrityError
from django.utils import timezone
from google.appengine.api import datastore
from google.appengine.api.datastore import Key, Query
#DJANGAE
from djangae.utils import memoized
from djangae.indexing import special_indexes_for_column, REQUIRES_SPECIAL_INDEXES
from djangae.db.backends.appengine.dbapi import CouldBeSupportedError
def make_timezone_naive(value):
if value is None:
return None
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("Djangae backend does not support timezone-aware datetimes when USE_TZ is False.")
return value
@memoized
def get_model_from_db_table(db_table):
for model in models.get_models(include_auto_created=True, only_installed=False):
if model._meta.db_table == db_table:
return model
def decimal_to_string(value, max_digits=16, decimal_places=0):
"""
Converts decimal to a unicode string for storage / lookup by nonrel
databases that don't support decimals natively.
This is an extension to `django.db.backends.util.format_number`
that preserves order -- if one decimal is less than another, their
string representations should compare the same (as strings).
TODO: Can't this be done using string.format()?
Not in Python 2.5, str.format is backported to 2.6 only.
"""
# Handle sign separately.
if value.is_signed():
sign = u'-'
value = abs(value)
else:
sign = u''
# Let Django quantize and cast to a string.
value = format_number(value, max_digits, decimal_places)
# Pad with zeroes to a constant width.
n = value.find('.')
if n < 0:
n = len(value)
if n < max_digits - decimal_places:
value = u'0' * (max_digits - decimal_places - n) + value
return sign + value
def normalise_field_value(value):
""" Converts a field value to a common type/format to make comparable to another. """
if isinstance(value, datetime):
return make_timezone_naive(value)
elif isinstance(value, Decimal):
return decimal_to_string(value)
return value
def get_datastore_kind(model):
return get_top_concrete_parent(model)._meta.db_table
def get_prepared_db_value(connection, instance, field, raw=False):
value = getattr(instance, field.attname) if raw else field.pre_save(instance, instance._state.adding)
if hasattr(value, "prepare_database_save"):
value = value.prepare_database_save(field)
else:
value = field.get_db_prep_save(
value,
connection=connection
)
value = connection.ops.value_for_db(value, field)
return value
def get_concrete_parents(model, ignore_leaf=False):
ret = [x for x in model.mro() if hasattr(x, "_meta") and not x._meta.abstract and not x._meta.proxy]
if ignore_leaf:
ret = [ x for x in ret if x != model ]
return ret
@memoized
def get_top_concrete_parent(model):
return get_concrete_parents(model)[-1]
def get_concrete_fields(model, ignore_leaf=False):
"""
Returns all the concrete fields for the model, including those
from parent models
"""
concrete_classes = get_concrete_parents(model, ignore_leaf)
fields = []
for klass in concrete_classes:
fields.extend(klass._meta.fields)
return fields
@memoized
def get_concrete_db_tables(model):
return [ x._meta.db_table for x in get_concrete_parents(model) ]
@memoized
def has_concrete_parents(model):
return get_concrete_parents(model) != [model]
def django_instance_to_entity(connection, model, fields, raw, instance):
# uses_inheritance = False
inheritance_root = get_top_concrete_parent(model)
db_table = get_datastore_kind(inheritance_root)
def value_from_instance(_instance, _field):
value = get_prepared_db_value(connection, _instance, _field, raw)
if (not _field.null and not _field.primary_key) and value is None:
raise IntegrityError("You can't set %s (a non-nullable "
"field) to None!" % _field.name)
is_primary_key = False
if _field.primary_key and _field.model == inheritance_root:
is_primary_key = True
return value, is_primary_key
field_values = {}
primary_key = None
for field in fields:
value, is_primary_key = value_from_instance(instance, field)
if is_primary_key:
primary_key = value
else:
field_values[field.column] = value
# Add special indexed fields
for index in special_indexes_for_column(model, field.column):
indexer = REQUIRES_SPECIAL_INDEXES[index]
values = indexer.prep_value_for_database(value)
if values is None:
continue
if not hasattr(values, "__iter__"):
values = [ values ]
for v in values:
column = indexer.indexed_column_name(field.column, v)
if column in field_values:
if not isinstance(field_values[column], list):
field_values[column] = [ field_values[column], v ]
else:
field_values[column].append(v)
else:
field_values[column] = v
kwargs = {}
if primary_key:
if isinstance(primary_key, (int, long)):
kwargs["id"] = primary_key
elif isinstance(primary_key, basestring):
if len(primary_key) > 500:
warnings.warn("Truncating primary key that is over 500 characters. "
"THIS IS AN ERROR IN YOUR PROGRAM.",
RuntimeWarning)
primary_key = primary_key[:500]
kwargs["name"] = primary_key
else:
raise ValueError("Invalid primary key value")
entity = datastore.Entity(db_table, **kwargs)
entity.update(field_values)
classes = get_concrete_db_tables(model)
if len(classes) > 1:
entity["class"] = classes
return entity
def get_datastore_key(model, pk):
""" Return a datastore.Key for the given model and primary key.
"""
kind = get_top_concrete_parent(model)._meta.db_table
return Key.from_path(kind, model._meta.pk.get_prep_value(pk))
class MockInstance(object):
"""
This creates a mock instance for use when passing a datastore entity
into get_prepared_db_value. This is used when performing updates to prevent a complete
conversion to a Django instance before writing back the entity
"""
def __init__(self, **kwargs):
is_adding = kwargs.pop('_is_adding', False)
class State:
adding = is_adding
self.fields = {}
for field_name, value in kwargs.items():
self.fields[field_name] = value
self._state = State()
def __getattr__(self, attr):
if attr in self.fields:
return self.fields[attr]
raise AttributeError(attr)
def key_exists(key):
qry = Query(keys_only=True)
qry.Ancestor(key)
return qry.Count(limit=1) > 0
def django_ordering_comparison(ordering, lhs, rhs):
if not ordering:
return -1 # Really doesn't matter
ASCENDING = 1
DESCENDING = 2
for order, direction in ordering:
lhs_value = lhs.key() if order == "__key__" else lhs[order]
rhs_value = rhs.key() if order == "__key__" else rhs[order]
if direction == ASCENDING and lhs_value != rhs_value:
return -1 if lhs_value < rhs_value else 1
elif direction == DESCENDING and lhs_value != rhs_value:
return 1 if lhs_value < rhs_value else -1
return 0
def entity_matches_query(entity, query):
"""
Return True if the entity would potentially be returned by the datastore
query
"""
OPERATORS = {
"=": lambda x, y: x == y,
"<": lambda x, y: x < y,
">": lambda x, y: x > y,
"<=": lambda x, y: x <= y,
">=": lambda x, y: x >= y
}
queries = [query]
if isinstance(query, datastore.MultiQuery):
raise CouldBeSupportedError("We just need to separate the multiquery "
"into 'queries' then everything should work")
for query in queries:
comparisons = chain(
[("kind", "=", "_Query__kind") ],
[tuple(x.split(" ") + [ x ]) for x in query.keys()]
)
for ent_attr, op, query_attr in comparisons:
if ent_attr == "__key__":
continue
op = OPERATORS[op] # We want this to throw if there's some op we don't know about
if ent_attr == "kind":
ent_attr = entity.kind()
else:
ent_attr = entity.get(ent_attr)
if callable(ent_attr):
# entity.kind() is a callable, so we need this to save special casing it in a more
# ugly way
ent_attr = ent_attr()
if not isinstance(query_attr, (list, tuple)):
query_attrs = [query_attr]
else:
# The query value can be a list of ANDed values
query_attrs = query_attr
query_attrs = [ getattr(query, x) if x == "_Query__kind" else query.get(x) for x in query_attrs ]
if not isinstance(ent_attr, (list, tuple)):
ent_attr = [ ent_attr ]
matches = False
for query_attr in query_attrs: # [22, 23]
#If any of the values don't match then this query doesn't match
if not any([op(attr, query_attr) for attr in ent_attr]):
matches = False
break
else:
# One of the ent_attrs matches the query_attrs
matches = True
if not matches:
# One of the AND values didn't match
break
else:
# If we got through the loop without breaking, then the entity matches
return True
return False
| bsd-3-clause | 7,902,433,080,726,686,000 | 30.364179 | 111 | 0.599029 | false |
abramhindle/marsyas-fork | scripts/sfplugin-test-wrapper.py | 5 | 1546 | #!/usr/bin/env python
import os
import sys
import subprocess
import shutil
import commands
def runCommand(workingDir, bextractLocation, sfpluginLocation, extraBextractCmdArgs,
mfFileLocation, outputMplFile, testFile, goodFilename):
os.chdir(workingDir)
bextractCmd = ("%s %s -t %s -p %s") % (bextractLocation, extraBextractCmdArgs, mfFileLocation, outputMplFile)
a = commands.getoutput(bextractCmd)
sfpluginCmd = ("%s -pl %s %s") % (sfpluginLocation, outputMplFile, testFile)
a = commands.getoutput(sfpluginCmd)
returnCode = compareSfpluginOutput(a, goodFilename)
return returnCode
def compareSfpluginOutput(oneData, twoFilename):
oneLines = oneData.split("\n")
twoLines = open(twoFilename).readlines()
for a, b in zip(oneLines, twoLines):
if a.rstrip() != b.rstrip():
return False
return True
if __name__ == "__main__":
try:
workingDir = sys.argv[1]
bextractLocation = sys.argv[2]
sfpluginLocation = sys.argv[3]
extraBextractCmdArgs = sys.argv[4]
mfFileLocation = sys.argv[5]
outputMplFile = sys.argv[6]
testFile = sys.argv[7]
goodFilename = sys.argv[8]
except:
print "Syntax: cmake-test-wrapper.py WORKING_DIR ENV_VARS CMD ARG1 ARG2 ... ARGn "
sys.exit(0)
returnCode = runCommand(workingDir, bextractLocation, sfpluginLocation, extraBextractCmdArgs,
mfFileLocation, outputMplFile, testFile, goodFilename)
sys.exit(returnCode)
| gpl-2.0 | -9,078,033,573,374,746,000 | 30.55102 | 113 | 0.665589 | false |
stefan-andritoiu/upm | examples/python/oled_ssd1308.py | 6 | 5876 | #!/usr/bin/env python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Load lcd display module
from __future__ import print_function
import time, signal, sys
from upm import pyupm_lcd as upmLCD
def main():
myLCD = upmLCD.SSD1308(0, 0x3C);
logoArr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 192, 192, 192, 224,
224, 224, 224, 240, 240, 248, 248, 120, 120, 120, 120, 60, 60, 60, 60, 60,
62, 30, 30, 30, 30, 30, 30, 30, 31, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 31, 31, 31, 31, 31,
30, 62, 62, 62, 62, 126, 126, 124, 124, 252, 252, 248, 248, 240, 240, 240,
224, 224, 224, 192, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128,
128, 0, 56, 56, 28, 30, 14, 15, 15, 7, 7, 7, 7, 3, 3, 1, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
192, 192, 192, 192, 192, 192, 192, 192, 0, 0, 0, 0, 192, 193, 195, 195,
195, 7, 15, 15, 63, 127, 255, 255, 255, 254, 252, 252, 240, 192, 0, 0, 0,
0, 0, 0, 0, 0, 128, 192, 192, 240, 248, 124, 124, 60, 0, 0, 0, 0, 159, 159,
159, 159, 159, 159, 159, 159, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0,
0, 0, 0, 0, 254, 254, 254, 254, 254, 254, 254, 254, 128, 128, 128, 128,
128, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 192, 192, 192, 192, 192, 192, 128,
128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255,
0, 0, 0, 0, 3, 7, 3, 3, 3, 0, 0, 0, 0, 0, 1, 1, 255, 255, 255, 255, 255,
255, 255, 0, 0, 224, 248, 252, 252, 255, 127, 15, 15, 3, 1, 0, 0, 0, 0, 0,
0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255,
255, 255, 255, 255, 255, 15, 15, 15, 15, 15, 15, 255, 255, 255, 255, 255,
255, 255, 252, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 15, 15,
15, 15, 15, 224, 224, 252, 254, 255, 255, 255, 255, 159, 159, 143, 143,
135, 135, 143, 159, 255, 255, 255, 255, 255, 255, 252, 248, 0, 0, 0, 255,
255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128,
224, 248, 248, 255, 255, 255, 255, 255, 127, 15, 255, 255, 255, 255, 255,
255, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255,
255, 255, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0,
0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 255,
255, 255, 255, 255, 255, 192, 192, 192, 192, 192, 31, 31, 255, 255, 255,
255, 255, 255, 231, 231, 199, 199, 199, 199, 199, 199, 199, 199, 231, 231,
231, 231, 199, 135, 0, 0, 0, 63, 255, 255, 255, 255, 255, 255, 255, 0, 0,
0, 0, 224, 240, 248, 248, 252, 254, 255, 255, 255, 127, 63, 63, 31, 15, 7,
7, 1, 0, 0, 63, 63, 255, 255, 255, 255, 255, 240, 192, 192, 128, 0, 0, 0,
0, 0, 0, 0, 0, 1, 3, 3, 7, 7, 7, 7, 7, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 7,
0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 7, 0, 0, 0, 0, 0, 0, 3, 3, 7, 7, 7,
7, 7, 7, 7, 7, 7, 0, 0, 0, 1, 3, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 7, 7, 7,
7, 7, 3, 3, 3, 1, 0, 0, 0, 0, 1, 3, 3, 7, 135, 135, 135, 192, 192, 0, 0, 7,
7, 3, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 7, 15, 15,
31, 127, 127, 127, 255, 255, 252, 252, 252, 248, 240, 240, 240, 224, 224,
224, 192, 192, 192, 192, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 192, 192, 192, 192, 192,
224, 224, 224, 224, 240, 240, 240, 240, 248, 248, 248, 248, 252, 252, 252,
254, 254, 255, 255, 255, 255, 255, 255, 127, 127, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
3, 3, 3, 7, 7, 7, 15, 15, 31, 31, 31, 63, 63, 63, 63, 63, 127, 127, 127,
127, 127, 255, 255, 255, 255, 254, 254, 254, 254, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254,
255, 255, 255, 255, 255, 255, 255, 127, 127, 127, 127, 127, 127, 127, 127,
63, 63, 63, 63, 63, 31, 31, 31, 31, 31, 15, 15, 15, 15, 7, 7, 7, 7, 3, 3,
3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0]
intelLogo = upmLCD.uint8Array(len(logoArr))
for x in range(len(logoArr)):
intelLogo.__setitem__(x, logoArr[x])
myLCD.clear()
myLCD.draw(intelLogo, 1024)
del intelLogo
del myLCD
print("Exiting")
if __name__ == '__main__':
main()
| mit | 3,160,959,485,128,185,300 | 57.76 | 89 | 0.541184 | false |
wubr2000/googleads-python-lib | examples/adxbuyer/v201502/basic_operations/remove_placement.py | 4 | 2203 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deletes an ad group criterion using the 'REMOVE' operator.
To get ad group criteria, run get_placements.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
CRITERION_ID = 'INSERT_CRITERION_ID_HERE'
def main(client, ad_group_id, criterion_id):
# Initialize appropriate service.
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201502')
# Construct operations and delete ad group criteria.
operations = [
{
'operator': 'REMOVE',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Placement',
'id': criterion_id
}
}
}
]
result = ad_group_criterion_service.mutate(operations)
# Display results.
for criterion in result['value']:
print ('Ad group criterion with ad group id \'%s\', criterion id \'%s\', '
'and type \'%s\' was deleted.'
% (criterion['adGroupId'], criterion['criterion']['id'],
criterion['criterion']['Criterion.Type']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID, CRITERION_ID)
| apache-2.0 | -4,210,179,938,968,221,000 | 30.927536 | 78 | 0.672265 | false |
hrjn/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause | -9,206,692,442,729,150,000 | 34.864078 | 81 | 0.65647 | false |
paulrouget/servo | tests/wpt/web-platform-tests/webdriver/tests/close_window/user_prompts.py | 22 | 4182 | # META: timeout=long
import pytest
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
def close(session):
return session.transport.send(
"DELETE", "session/{session_id}/window".format(**vars(session)))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog, create_window):
def check_user_prompt_closed_without_exception(dialog_type, retval):
original_handle = session.window_handle
new_handle = create_window()
session.window_handle = new_handle
create_dialog(dialog_type, text=dialog_type)
response = close(session)
assert_success(response)
# Asserting that the dialog was handled requires valid top-level browsing
# context, so we must switch to the original window.
session.window_handle = original_handle
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert new_handle not in session.handles
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog, create_window):
def check_user_prompt_closed_with_exception(dialog_type, retval):
new_handle = create_window()
session.window_handle = new_handle
create_dialog(dialog_type, text=dialog_type)
response = close(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert new_handle in session.handles
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog, create_window):
def check_user_prompt_not_closed_but_exception(dialog_type):
new_handle = create_window()
session.window_handle = new_handle
create_dialog(dialog_type, text=dialog_type)
response = close(session)
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
assert new_handle in session.handles
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_accept(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window is gone
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
# retval not testable for confirm and prompt because window is gone
check_user_prompt_closed_without_exception(dialog_type, None)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| mpl-2.0 | -9,217,744,106,945,975,000 | 34.142857 | 90 | 0.716404 | false |
PsychoTV/PsychoTeam.repository | plugin.video.PsychoTV/resources/lib/sources/wsonline_tv.py | 6 | 4721 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.libraries import cleantitle
from resources.lib.libraries import client
from resources.lib import resolvers
class source:
def __init__(self):
self.base_link = 'http://watchseries-online.ch'
self.search_link = 'index'
def get_show(self, imdb, tvdb, tvshowtitle, year):
try:
tvshowtitle = cleantitle.tv(tvshowtitle)
query = urlparse.urljoin(self.base_link, self.search_link)
result = client.source(query)
result = re.compile('(<li>.+?</li>)').findall(result)
result = [re.compile('href="(.+?)">(.+?)<').findall(i) for i in result]
result = [i[0] for i in result if len(i[0]) > 0]
result = [i for i in result if tvshowtitle == cleantitle.tv(i[1])]
result = [i[0] for i in result][0]
try: url = re.compile('//.+?(/.+)').findall(result)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_episode(self, url, imdb, tvdb, title, date, season, episode):
try:
if url == None: return
year, month = re.compile('(\d{4})-(\d{2})').findall(date)[-1]
if int(year) <= 2008: raise Exception()
cat = urlparse.urljoin(self.base_link, url)
cat = cat.split('category/', 1)[-1].rsplit('/')[0]
url = urlparse.urljoin(self.base_link, '/episode/%s-s%02de%02d' % (cat, int(season), int(episode)))
result = client.source(url, output='response', error=True)
if '404' in result[0]:
url = urlparse.urljoin(self.base_link, '/%s/%s/%s-s%02de%02d' % (year, month, cat, int(season), int(episode)))
result = client.source(url, output='response', error=True)
if '404' in result[0]:
url = urlparse.urljoin(self.base_link, '/%s/%s/%s-%01dx%01d' % (year, month, cat, int(season), int(episode)))
result = client.source(url, output='response', error=True)
if '404' in result[0]: raise Exception()
try: url = re.compile('//.+?(/.+)').findall(url)[0]
except: url = result
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def get_sources(self, url, hosthdDict, hostDict, locDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
links = client.parseDOM(result, 'td', attrs = {'class': 'even tdhost'})
links += client.parseDOM(result, 'td', attrs = {'class': 'odd tdhost'})
for i in links:
try:
host = client.parseDOM(i, 'a')[0]
host = host.split('<', 1)[0]
host = host.rsplit('.', 1)[0].split('.', 1)[-1]
host = host.strip().lower()
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
url = client.parseDOM(i, 'a', ret='href')[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
sources.append({'source': host, 'quality': 'SD', 'provider': 'WSOnline', 'url': url})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
result = client.request(url)
try: url = client.parseDOM(result, 'a', ret='href', attrs = {'class': 'wsoButton'})[0]
except: pass
url = resolvers.request(url)
return url
except:
return
| gpl-2.0 | 7,275,365,600,453,348,000 | 33.210145 | 126 | 0.542046 | false |
sam-tsai/django-old | tests/regressiontests/utils/datastructures.py | 27 | 1212 | """
>>> from django.utils.datastructures import SortedDict
>>> d = SortedDict()
>>> d[7] = 'seven'
>>> d[1] = 'one'
>>> d[9] = 'nine'
>>> d.keys()
[7, 1, 9]
>>> d.values()
['seven', 'one', 'nine']
>>> d.items()
[(7, 'seven'), (1, 'one'), (9, 'nine')]
# Overwriting an item keeps it's place.
>>> d[1] = 'ONE'
>>> d.values()
['seven', 'ONE', 'nine']
# New items go to the end.
>>> d[0] = 'nil'
>>> d.keys()
[7, 1, 9, 0]
# Deleting an item, then inserting the same key again will place it at the end.
>>> del d[7]
>>> d.keys()
[1, 9, 0]
>>> d[7] = 'lucky number 7'
>>> d.keys()
[1, 9, 0, 7]
# Changing the keys won't do anything, it's only a copy of the keys dict.
>>> k = d.keys()
>>> k.remove(9)
>>> d.keys()
[1, 9, 0, 7]
# Initialising a SortedDict with two keys will just take the first one. A real
# dict will actually take the second value so we will too, but we'll keep the
# ordering from the first key found.
>>> tuples = ((2, 'two'), (1, 'one'), (2, 'second-two'))
>>> d = SortedDict(tuples)
>>> d.keys()
[2, 1]
>>> real_dict = dict(tuples)
>>> sorted(real_dict.values())
['one', 'second-two']
>>> d.values() # Here the order of SortedDict values *is* what we are testing
['second-two', 'one']
"""
| bsd-3-clause | -2,609,876,074,755,356,000 | 22.307692 | 79 | 0.574257 | false |
fldc/CouchPotatoServer | couchpotato/core/plugins/category/main.py | 61 | 3963 | import traceback
from couchpotato import get_db
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from .index import CategoryIndex, CategoryMediaIndex
log = CPLog(__name__)
class CategoryPlugin(Plugin):
_database = {
'category': CategoryIndex,
'category_media': CategoryMediaIndex,
}
def __init__(self):
addApiView('category.save', self.save)
addApiView('category.save_order', self.saveOrder)
addApiView('category.delete', self.delete)
addApiView('category.list', self.allView, docs = {
'desc': 'List all available categories',
'return': {'type': 'object', 'example': """{
'success': True,
'categories': array, categories
}"""}
})
addEvent('category.all', self.all)
def allView(self, **kwargs):
return {
'success': True,
'categories': self.all()
}
def all(self):
db = get_db()
categories = db.all('category', with_doc = True)
return [x['doc'] for x in categories]
def save(self, **kwargs):
try:
db = get_db()
category = {
'_t': 'category',
'order': kwargs.get('order', 999),
'label': toUnicode(kwargs.get('label', '')),
'ignored': toUnicode(kwargs.get('ignored', '')),
'preferred': toUnicode(kwargs.get('preferred', '')),
'required': toUnicode(kwargs.get('required', '')),
'destination': toUnicode(kwargs.get('destination', '')),
}
try:
c = db.get('id', kwargs.get('id'))
category['order'] = c.get('order', category['order'])
c.update(category)
db.update(c)
except:
c = db.insert(category)
c.update(category)
return {
'success': True,
'category': c
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False,
'category': None
}
def saveOrder(self, **kwargs):
try:
db = get_db()
order = 0
for category_id in kwargs.get('ids', []):
c = db.get('id', category_id)
c['order'] = order
db.update(c)
order += 1
return {
'success': True
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False
}
def delete(self, id = None, **kwargs):
try:
db = get_db()
success = False
message = ''
try:
c = db.get('id', id)
db.delete(c)
# Force defaults on all empty category movies
self.removeFromMovie(id)
success = True
except:
message = log.error('Failed deleting category: %s', traceback.format_exc())
return {
'success': success,
'message': message
}
except:
log.error('Failed: %s', traceback.format_exc())
return {
'success': False
}
def removeFromMovie(self, category_id):
try:
db = get_db()
movies = [x['doc'] for x in db.get_many('category_media', category_id, with_doc = True)]
if len(movies) > 0:
for movie in movies:
movie['category_id'] = None
db.update(movie)
except:
log.error('Failed: %s', traceback.format_exc())
| gpl-3.0 | -3,093,676,944,332,690,000 | 25.42 | 100 | 0.482463 | false |
aviweit/libcloud | libcloud/test/test_utils.py | 42 | 13018 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import socket
import codecs
import unittest
import warnings
import os.path
from itertools import chain
# In Python > 2.7 DeprecationWarnings are disabled by default
warnings.simplefilter('default')
import libcloud.utils.files
from libcloud.utils.misc import get_driver, set_driver
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import b
from libcloud.utils.py3 import bchr
from libcloud.utils.py3 import hexadigits
from libcloud.utils.py3 import urlquote
from libcloud.compute.types import Provider
from libcloud.compute.providers import DRIVERS
from libcloud.utils.misc import get_secure_random_string
from libcloud.utils.networking import is_public_subnet
from libcloud.utils.networking import is_private_subnet
from libcloud.utils.networking import is_valid_ip_address
from libcloud.utils.networking import join_ipv4_segments
from libcloud.utils.networking import increment_ipv4_segments
from libcloud.storage.drivers.dummy import DummyIterator
WARNINGS_BUFFER = []
if PY3:
from io import FileIO as file
def show_warning(msg, cat, fname, lno, line=None):
WARNINGS_BUFFER.append((msg, cat, fname, lno))
original_func = warnings.showwarning
class TestUtils(unittest.TestCase):
def setUp(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
def tearDown(self):
global WARNINGS_BUFFER
WARNINGS_BUFFER = []
warnings.showwarning = original_func
def test_guess_file_mime_type(self):
file_path = os.path.abspath(__file__)
mimetype, encoding = libcloud.utils.files.guess_file_mime_type(
file_path=file_path)
self.assertTrue(mimetype.find('python') != -1)
def test_get_driver(self):
driver = get_driver(drivers=DRIVERS, provider=Provider.DUMMY)
self.assertTrue(driver is not None)
try:
driver = get_driver(drivers=DRIVERS, provider='fooba')
except AttributeError:
pass
else:
self.fail('Invalid provider, but an exception was not thrown')
def test_set_driver(self):
# Set an existing driver
try:
driver = set_driver(DRIVERS, Provider.DUMMY,
'libcloud.storage.drivers.dummy',
'DummyStorageDriver')
except AttributeError:
pass
# Register a new driver
driver = set_driver(DRIVERS, 'testingset',
'libcloud.storage.drivers.dummy',
'DummyStorageDriver')
self.assertTrue(driver is not None)
# Register it again
try:
set_driver(DRIVERS, 'testingset',
'libcloud.storage.drivers.dummy',
'DummyStorageDriver')
except AttributeError:
pass
# Register an invalid module
try:
set_driver(DRIVERS, 'testingnew',
'libcloud.storage.drivers.dummy1',
'DummyStorageDriver')
except ImportError:
pass
# Register an invalid class
try:
set_driver(DRIVERS, 'testingnew',
'libcloud.storage.drivers.dummy',
'DummyStorageDriver1')
except AttributeError:
pass
def test_deprecated_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_DEPRECATION_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_DEPRECATION_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.deprecated_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_in_development_warning(self):
warnings.showwarning = show_warning
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = False
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.SHOW_IN_DEVELOPMENT_WARNING = True
self.assertEqual(len(WARNINGS_BUFFER), 0)
libcloud.utils.in_development_warning('test_module')
self.assertEqual(len(WARNINGS_BUFFER), 1)
def test_read_in_chunks_iterator_no_data(self):
iterator = DummyIterator()
generator1 = libcloud.utils.files.read_in_chunks(iterator=iterator,
yield_empty=False)
generator2 = libcloud.utils.files.read_in_chunks(iterator=iterator,
yield_empty=True)
# yield_empty=False
count = 0
for data in generator1:
count += 1
self.assertEqual(data, b(''))
self.assertEqual(count, 0)
# yield_empty=True
count = 0
for data in generator2:
count += 1
self.assertEqual(data, b(''))
self.assertEqual(count, 1)
def test_read_in_chunks_iterator(self):
def iterator():
for x in range(0, 1000):
yield 'aa'
for result in libcloud.utils.files.read_in_chunks(iterator(),
chunk_size=10,
fill_size=False):
self.assertEqual(result, b('aa'))
for result in libcloud.utils.files.read_in_chunks(iterator(), chunk_size=10,
fill_size=True):
self.assertEqual(result, b('aaaaaaaaaa'))
def test_read_in_chunks_filelike(self):
class FakeFile(file):
def __init__(self):
self.remaining = 500
def read(self, size):
self.remaining -= 1
if self.remaining == 0:
return ''
return 'b' * (size + 1)
for index, result in enumerate(libcloud.utils.files.read_in_chunks(
FakeFile(), chunk_size=10,
fill_size=False)):
self.assertEqual(result, b('b' * 11))
self.assertEqual(index, 498)
for index, result in enumerate(libcloud.utils.files.read_in_chunks(
FakeFile(), chunk_size=10,
fill_size=True)):
if index != 548:
self.assertEqual(result, b('b' * 10))
else:
self.assertEqual(result, b('b' * 9))
self.assertEqual(index, 548)
def test_exhaust_iterator(self):
def iterator_func():
for x in range(0, 1000):
yield 'aa'
data = b('aa' * 1000)
iterator = libcloud.utils.files.read_in_chunks(iterator=iterator_func())
result = libcloud.utils.files.exhaust_iterator(iterator=iterator)
self.assertEqual(result, data)
result = libcloud.utils.files.exhaust_iterator(iterator=iterator_func())
self.assertEqual(result, data)
data = '12345678990'
iterator = StringIO(data)
result = libcloud.utils.files.exhaust_iterator(iterator=iterator)
self.assertEqual(result, b(data))
def test_exhaust_iterator_empty_iterator(self):
data = ''
iterator = StringIO(data)
result = libcloud.utils.files.exhaust_iterator(iterator=iterator)
self.assertEqual(result, b(data))
def test_unicode_urlquote(self):
# Regression tests for LIBCLOUD-429
if PY3:
# Note: this is a unicode literal
val = '\xe9'
else:
val = codecs.unicode_escape_decode('\xe9')[0]
uri = urlquote(val)
self.assertEqual(b(uri), b('%C3%A9'))
# Unicode without unicode characters
uri = urlquote('~abc')
self.assertEqual(b(uri), b('%7Eabc'))
# Already-encoded bytestring without unicode characters
uri = urlquote(b('~abc'))
self.assertEqual(b(uri), b('%7Eabc'))
def test_get_secure_random_string(self):
for i in range(1, 500):
value = get_secure_random_string(size=i)
self.assertEqual(len(value), i)
def test_hexadigits(self):
self.assertEqual(hexadigits(b('')), [])
self.assertEqual(hexadigits(b('a')), ['61'])
self.assertEqual(hexadigits(b('AZaz09-')),
['41', '5a', '61', '7a', '30', '39', '2d'])
def test_bchr(self):
if PY3:
self.assertEqual(bchr(0), b'\x00')
self.assertEqual(bchr(97), b'a')
else:
self.assertEqual(bchr(0), '\x00')
self.assertEqual(bchr(97), 'a')
class NetworkingUtilsTestCase(unittest.TestCase):
def test_is_public_and_is_private_subnet(self):
public_ips = [
'213.151.0.8',
'86.87.86.1',
'8.8.8.8',
'8.8.4.4'
]
private_ips = [
'192.168.1.100',
'10.0.0.1',
'172.16.0.0'
]
for address in public_ips:
is_public = is_public_subnet(ip=address)
is_private = is_private_subnet(ip=address)
self.assertTrue(is_public)
self.assertFalse(is_private)
for address in private_ips:
is_public = is_public_subnet(ip=address)
is_private = is_private_subnet(ip=address)
self.assertFalse(is_public)
self.assertTrue(is_private)
def test_is_valid_ip_address(self):
valid_ipv4_addresses = [
'192.168.1.100',
'10.0.0.1',
'213.151.0.8',
'77.77.77.77'
]
invalid_ipv4_addresses = [
'10.1',
'256.256.256.256',
'0.567.567.567',
'192.168.0.257'
]
valid_ipv6_addresses = [
'fe80::200:5aee:feaa:20a2',
'2607:f0d0:1002:51::4',
'2607:f0d0:1002:0051:0000:0000:0000:0004',
'::1'
]
invalid_ipv6_addresses = [
'2607:f0d',
'2607:f0d0:0004',
]
for address in valid_ipv4_addresses:
status = is_valid_ip_address(address=address,
family=socket.AF_INET)
self.assertTrue(status)
for address in valid_ipv6_addresses:
status = is_valid_ip_address(address=address,
family=socket.AF_INET6)
self.assertTrue(status)
for address in chain(invalid_ipv4_addresses, invalid_ipv6_addresses):
status = is_valid_ip_address(address=address,
family=socket.AF_INET)
self.assertFalse(status)
for address in chain(invalid_ipv4_addresses, invalid_ipv6_addresses):
status = is_valid_ip_address(address=address,
family=socket.AF_INET6)
self.assertFalse(status)
def test_join_ipv4_segments(self):
values = [
(('127', '0', '0', '1'), '127.0.0.1'),
(('255', '255', '255', '0'), '255.255.255.0'),
]
for segments, joined_ip in values:
result = join_ipv4_segments(segments=segments)
self.assertEqual(result, joined_ip)
def test_increment_ipv4_segments(self):
values = [
(('127', '0', '0', '1'), '127.0.0.2'),
(('255', '255', '255', '0'), '255.255.255.1'),
(('254', '255', '255', '255'), '255.0.0.0'),
(('100', '1', '0', '255'), '100.1.1.0'),
]
for segments, incremented_ip in values:
result = increment_ipv4_segments(segments=segments)
result = join_ipv4_segments(segments=result)
self.assertEqual(result, incremented_ip)
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | 6,754,832,451,209,640,000 | 32.81039 | 84 | 0.565107 | false |
django-nonrel/djangoappengine | docs/conf.py | 3 | 7964 | # -*- coding: utf-8 -*-
#
# Django App Engine documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 20 20:01:39 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django App Engine'
copyright = u'2011, AllButtonsPressed, Potato London, Wilfred Hughes'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6.0'
# The full version, including alpha/beta/rc tags.
release = '1.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoAppEnginedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'DjangoAppEngine.tex', u'Django App Engine Documentation',
u'AllButtonsPressed, Potato London, Wilfred Hughes', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangoappengine', u'Django App Engine Documentation',
[u'AllButtonsPressed, Potato London, Wilfred Hughes'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoAppEngine', u'Django App Engine Documentation',
u'AllButtonsPressed, Potato London, Wilfred Hughes', 'DjangoAppEngine',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| bsd-3-clause | 4,284,537,228,714,997,000 | 31.773663 | 80 | 0.708061 | false |
funbaker/astropy | astropy/io/misc/asdf/__init__.py | 2 | 1456 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
The **asdf** subpackage contains code that is used to serialize astropy types
so that they can be represented and stored using the Advanced Scientific Data
Format (ASDF). This subpackage defines classes, referred to as **tags**, that
implement the logic for serialization and deserialization.
ASDF makes use of abstract data type definitons called **schemas**. The tags
provided here are simply specific implementations of particular schemas.
Currently astropy only implements tags for a subset of schemas that are defined
externally by the ASDF Standard. However, it is likely that astropy will
eventually define schemas of its own.
Astropy currently has no ability to read or write ASDF files itself. In order
to process ASDF files it is necessary to make use of the standalone **asdf**
package. Users should never need to refer to tag implementations directly.
Their presence should be entirely transparent when processing ASDF files.
If both **asdf** and **astropy** are installed, no futher configuration is
required in order to process ASDF files. The **asdf** package has been designed
to automatically detect the presence of the tags defined by **astropy**.
Documentation on the ASDF Standard can be found `here
<https://asdf-standard.readthedocs.io>`__. Documentation on the ASDF Python
module can be found `here <https://asdf.readthedocs.io>`__.
"""
| bsd-3-clause | -5,625,127,473,037,531,000 | 52.925926 | 79 | 0.782967 | false |
ingkebil/GMDBot | page_exists.py | 1 | 1236 | #!/usr/bin/env python
"""Check if a '(data page)' for a biomolecule exists"""
__author__ = "Kenny Billiau"
__copyright__ = "2014, GMD"
__license__ = "GPL v2"
__version__ = "0.1"
import sys
import urllib2
import re
import downloadinchi as inchi
import openpyxl as px
import urllib
def get_molecules_from_xlsx(fn):
workbook = px.load_workbook(fn)
page = workbook.get_sheet_by_name(name='Wikipedia')
res = []
for row in page.range('A7:E208'):
if row[4].value not in ('#N/A', None):
res.append(row[0].value)
return res
def main(argv):
links = []
if len(argv) == 0:
lines = inchi.get_page('https://en.wikipedia.org/wiki/List_of_biomolecules')
links = inchi.get_molecule_links(lines)
else:
links = get_molecules_from_xlsx(argv[0])
pageid_re = re.compile('pageid')
for title in links:
print(title + ' '),
url = urllib2.urlopen("https://en.wikipedia.org/w/api.php?action=query&format=yaml&titles=%s" % urllib.quote(title + '_(data_page)'))
lines = url.read()
if len(pageid_re.findall(lines)) > 0:
print 'found'
else:
print 'NOT FOUND'
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-2.0 | -8,409,803,177,533,417,000 | 24.22449 | 141 | 0.59466 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.