repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tatsy/bssrdf-estimate | bssrdf_estimate/interface/control_widget.py | 1 | 2883 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class ParameterWidget(QWidget):
def __init__(self, parent=None):
super(ParameterWidget, self).__init__(parent)
self.formLayout = QFormLayout()
self.widthLineEdit = QLineEdit()
self.widthLineEdit.setText('800')
self.formLayout.addRow('width', self.widthLineEdit)
self.heightLineEdit = QLineEdit()
self.heightLineEdit.setText('600')
self.formLayout.addRow('height', self.heightLineEdit)
self.sppLineEdit = QLineEdit()
self.sppLineEdit.setText('1')
self.formLayout.addRow('samples', self.sppLineEdit)
self.nphotonLineEdit = QLineEdit()
self.nphotonLineEdit.setText('1000000')
self.formLayout.addRow('photons', self.nphotonLineEdit)
self.scaleLineEdit = QLineEdit()
self.scaleLineEdit.setText('0.01')
self.formLayout.addRow('scale', self.scaleLineEdit)
self.setLayout(self.formLayout)
class ControlWidget(QWidget):
def __init__(self, parent=None):
super(ControlWidget, self).__init__(parent)
self.paramWidget = ParameterWidget()
self.loadPushButton = QPushButton()
self.loadPushButton.setText('Load')
self.estimatePushButton = QPushButton()
self.estimatePushButton.setText('Estimate')
self.renderPushButton = QPushButton()
self.renderPushButton.setText('Render')
self.boxLayout = QVBoxLayout()
self.boxLayout.addWidget(self.paramWidget)
self.boxLayout.addWidget(self.loadPushButton)
self.boxLayout.addWidget(self.estimatePushButton)
self.boxLayout.addWidget(self.renderPushButton)
self.setLayout(self.boxLayout)
@property
def width_value(self):
return int(self.paramWidget.widthLineEdit.text())
@width_value.setter
def width_value(self, value):
self.paramWidget.widthLineEdit.setText(str(value))
@property
def height_value(self):
return int(self.paramWidget.heightLineEdit.text())
@height_value.setter
def height_value(self, value):
self.paramWidget.heightLineEdit.setText(str(value))
@property
def sample_per_pixel(self):
return int(self.paramWidget.sppLineEdit.text())
@sample_per_pixel.setter
def sample_per_pixel(self, value):
self.paramWidget.sppLineEdit.setText(str(value))
@property
def num_photons(self):
return int(self.paramWidget.nphotonLineEdit.text())
@num_photons.setter
def num_photons(self, value):
self.paramWidget.nphotonLineEdit.setText(str(value))
@property
def bssrdf_scale(self):
return float(self.paramWidget.scaleLineEdit.text())
@bssrdf_scale.setter
def bssrdf_scale(self, value):
self.paramWidget.scaleLineEdit.setText(str(value))
| mit | 1,971,967,807,924,753,700 | 29.670213 | 63 | 0.674298 | false | 3.844 | false | false | false |
Darkdadaah/pywikibot-core | tests/wikidataquery_tests.py | 1 | 9750 | # -*- coding: utf-8 -*-
"""Test cases for the WikidataQuery query syntax and API."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import os
import time
import pywikibot
import pywikibot.data.wikidataquery as query
from pywikibot.page import ItemPage, PropertyPage, Claim
from tests.aspects import unittest, WikidataTestCase, TestCase
class TestDryApiFunctions(TestCase):
"""Test WikiDataQuery API functions."""
net = False
def testQueries(self):
"""
Test Queries and check whether they're behaving correctly.
Check that we produce the expected query strings and that
invalid inputs are rejected correctly
"""
q = query.HasClaim(99)
self.assertEqual(str(q), "claim[99]")
q = query.HasClaim(99, 100)
self.assertEqual(str(q), "claim[99:100]")
q = query.HasClaim(99, [100])
self.assertEqual(str(q), "claim[99:100]")
q = query.HasClaim(99, [100, 101])
self.assertEqual(str(q), "claim[99:100,101]")
q = query.NoClaim(99, [100, 101])
self.assertEqual(str(q), "noclaim[99:100,101]")
q = query.StringClaim(99, "Hello")
self.assertEqual(str(q), 'string[99:"Hello"]')
q = query.StringClaim(99, ["Hello"])
self.assertEqual(str(q), 'string[99:"Hello"]')
q = query.StringClaim(99, ["Hello", "world"])
self.assertEqual(str(q), 'string[99:"Hello","world"]')
self.assertRaises(TypeError, lambda: query.StringClaim(99, 2))
q = query.Tree(92, [1], 2)
self.assertEqual(str(q), 'tree[92][1][2]')
# missing third arg
q = query.Tree(92, 1)
self.assertEqual(str(q), 'tree[92][1][]')
# missing second arg
q = query.Tree(92, reverse=3)
self.assertEqual(str(q), 'tree[92][][3]')
q = query.Tree([92, 93], 1, [2, 7])
self.assertEqual(str(q), 'tree[92,93][1][2,7]')
# bad tree arg types
self.assertRaises(TypeError, lambda: query.Tree(99, "hello"))
q = query.Link("enwiki")
self.assertEqual(str(q), 'link[enwiki]')
q = query.NoLink(["enwiki", "frwiki"])
self.assertEqual(str(q), 'nolink[enwiki,frwiki]')
# bad link arg types
self.assertRaises(TypeError, lambda: query.Link(99))
self.assertRaises(TypeError, lambda: query.Link([99]))
# HasClaim with tree as arg
q = query.HasClaim(99, query.Tree(1, 2, 3))
self.assertEqual(str(q), "claim[99:(tree[1][2][3])]")
q = query.HasClaim(99, query.Tree(1, [2, 5], [3, 90]))
self.assertEqual(str(q), "claim[99:(tree[1][2,5][3,90])]")
class TestLiveApiFunctions(WikidataTestCase):
"""Test WikiDataQuery API functions."""
cached = True
def testQueriesWDStructures(self):
"""Test queries using Wikibase page structures like ItemPage."""
q = query.HasClaim(PropertyPage(self.repo, "P99"))
self.assertEqual(str(q), "claim[99]")
q = query.HasClaim(PropertyPage(self.repo, "P99"),
ItemPage(self.repo, "Q100"))
self.assertEqual(str(q), "claim[99:100]")
q = query.HasClaim(99, [100, PropertyPage(self.repo, "P101")])
self.assertEqual(str(q), "claim[99:100,101]")
q = query.StringClaim(PropertyPage(self.repo, "P99"), "Hello")
self.assertEqual(str(q), 'string[99:"Hello"]')
q = query.Tree(ItemPage(self.repo, "Q92"), [1], 2)
self.assertEqual(str(q), 'tree[92][1][2]')
q = query.Tree(ItemPage(self.repo, "Q92"), [PropertyPage(self.repo, "P101")], 2)
self.assertEqual(str(q), 'tree[92][101][2]')
self.assertRaises(TypeError, lambda: query.Tree(PropertyPage(self.repo, "P92"),
[PropertyPage(self.repo, "P101")],
2))
c = pywikibot.Coordinate(50, 60)
q = query.Around(PropertyPage(self.repo, "P625"), c, 23.4)
self.assertEqual(str(q), 'around[625,50,60,23.4]')
begin = pywikibot.WbTime(site=self.repo, year=1999)
end = pywikibot.WbTime(site=self.repo, year=2010, hour=1)
# note no second comma
q = query.Between(PropertyPage(self.repo, "P569"), begin)
self.assertEqual(str(q), 'between[569,+00000001999-01-01T00:00:00Z]')
q = query.Between(PropertyPage(self.repo, "P569"), end=end)
self.assertEqual(str(q), 'between[569,,+00000002010-01-01T01:00:00Z]')
q = query.Between(569, begin, end)
self.assertEqual(str(q),
'between[569,+00000001999-01-01T00:00:00Z,+00000002010-01-01T01:00:00Z]')
# try negative year
begin = pywikibot.WbTime(site=self.repo, year=-44)
q = query.Between(569, begin, end)
self.assertEqual(str(q),
'between[569,-00000000044-01-01T00:00:00Z,+00000002010-01-01T01:00:00Z]')
def testQueriesDirectFromClaim(self):
"""Test construction of the right Query from a page.Claim."""
# Datatype: item
claim = Claim(self.repo, 'P17')
claim.setTarget(pywikibot.ItemPage(self.repo, 'Q35'))
q = query.fromClaim(claim)
self.assertEqual(str(q), 'claim[17:35]')
# Datatype: string
claim = Claim(self.repo, 'P225')
claim.setTarget('somestring')
q = query.fromClaim(claim)
self.assertEqual(str(q), 'string[225:"somestring"]')
# Datatype: external-id
claim = Claim(self.repo, 'P268')
claim.setTarget('somestring')
q = query.fromClaim(claim)
self.assertEqual(str(q), 'string[268:"somestring"]')
# Datatype: commonsMedia
claim = Claim(self.repo, 'P18')
claim.setTarget(
pywikibot.FilePage(
pywikibot.Site(self.family, self.code),
'Foo.jpg'))
q = query.fromClaim(claim)
self.assertEqual(str(q), 'string[18:"Foo.jpg"]')
def testQuerySets(self):
"""Test that we can join queries together correctly."""
# construct via queries
qs = query.HasClaim(99, 100).AND(query.HasClaim(99, 101))
self.assertEqual(str(qs), 'claim[99:100] AND claim[99:101]')
self.assertEqual(repr(qs), 'QuerySet(claim[99:100] AND claim[99:101])')
qs = query.HasClaim(99, 100).AND(query.HasClaim(99, 101)).AND(query.HasClaim(95))
self.assertEqual(str(qs), 'claim[99:100] AND claim[99:101] AND claim[95]')
# construct via queries
qs = query.HasClaim(99, 100).AND([query.HasClaim(99, 101), query.HasClaim(95)])
self.assertEqual(str(qs), 'claim[99:100] AND claim[99:101] AND claim[95]')
qs = query.HasClaim(99, 100).OR([query.HasClaim(99, 101), query.HasClaim(95)])
self.assertEqual(str(qs), 'claim[99:100] OR claim[99:101] OR claim[95]')
q1 = query.HasClaim(99, 100)
q2 = query.HasClaim(99, 101)
# different joiners get explicit grouping parens (the api also allows
# implicit, but we don't do that)
qs1 = q1.AND(q2)
qs2 = q1.OR(qs1).AND(query.HasClaim(98))
self.assertEqual(str(qs2),
'(claim[99:100] OR (claim[99:100] AND claim[99:101])) AND claim[98]')
# if the joiners are the same, no need to group
qs1 = q1.AND(q2)
qs2 = q1.AND(qs1).AND(query.HasClaim(98))
self.assertEqual(str(qs2),
'claim[99:100] AND claim[99:100] AND claim[99:101] AND claim[98]')
qs1 = query.HasClaim(100).AND(query.HasClaim(101))
qs2 = qs1.OR(query.HasClaim(102))
self.assertEqual(str(qs2), '(claim[100] AND claim[101]) OR claim[102]')
qs = query.Link("enwiki").AND(query.NoLink("dewiki"))
self.assertEqual(str(qs), 'link[enwiki] AND nolink[dewiki]')
def testQueryApiSyntax(self):
"""Test that we can generate the API query correctly."""
w = query.WikidataQuery("http://example.com")
qs = w.getQueryString(query.Link("enwiki"))
self.assertEqual(qs, "q=link%5Benwiki%5D")
self.assertEqual(w.getUrl(qs), "http://example.com/api?q=link%5Benwiki%5D")
# check labels and props work OK
qs = w.getQueryString(query.Link("enwiki"), ['en', 'fr'], ['prop'])
self.assertEqual(qs, "q=link%5Benwiki%5D&labels=en,fr&props=prop")
class TestApiSlowFunctions(TestCase):
"""Test slow WikiDataQuery API functions."""
hostname = 'https://wdq.wmflabs.org/api'
def testQueryApiGetter(self):
"""Test that we can actually retreive data and that caching works."""
w = query.WikidataQuery(cacheMaxAge=0)
# this query doesn't return any items, save a bit of bandwidth!
q = query.HasClaim(105).AND([query.NoClaim(225), query.HasClaim(100)])
# check that the cache file is created
cacheFile = w.getCacheFilename(w.getQueryString(q, [], []))
# remove existing cache file
try:
os.remove(cacheFile)
except OSError:
pass
data = w.query(q)
self.assertFalse(os.path.exists(cacheFile))
w = query.WikidataQuery(cacheMaxAge=0.1)
data = w.query(q)
self.assertTrue(os.path.exists(cacheFile))
self.assertIn('status', data)
self.assertIn('items', data)
t1 = time.time()
data = w.query(q)
t2 = time.time()
# check that the cache access is fast
self.assertLess(t2 - t1, 0.2)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| mit | -838,298,079,280,254,500 | 32.62069 | 98 | 0.591897 | false | 3.325375 | true | false | false |
sixty-north/cosmic-ray | src/cosmic_ray/operators/number_replacer.py | 1 | 1175 | """Implementation of the NumberReplacer operator.
"""
import parso
from ..ast import is_number
from .operator import Operator
# List of offsets that we apply to numbers in the AST. Each index into the list
# corresponds to single mutation.
OFFSETS = [
+1,
-1,
]
class NumberReplacer(Operator):
"""An operator that modifies numeric constants."""
def mutation_positions(self, node):
if is_number(node):
for _ in OFFSETS:
yield (node.start_pos, node.end_pos)
def mutate(self, node, index):
"""Modify the numeric value on `node`."""
assert index < len(OFFSETS), 'received count with no associated offset'
assert isinstance(node, parso.python.tree.Number)
val = eval(node.value) + OFFSETS[index] # pylint: disable=W0123
return parso.python.tree.Number(' ' + str(val), node.start_pos)
@classmethod
def examples(cls):
return (
('x = 1', 'x = 2'),
('x = 1', 'x = 0', 1),
('x = 4.2', 'x = 5.2'),
('x = 4.2', 'x = 3.2', 1),
('x = 1j', 'x = (1+1j)'),
('x = 1j', 'x = (-1+1j)', 1),
)
| mit | 266,275,105,423,719,900 | 26.325581 | 79 | 0.550638 | false | 3.476331 | false | false | false |
nico-izo/kobato | kobato/commands/config.py | 1 | 2869 | from kobato.plugin import KobatoBasePlugin, kobato_plugin_register
import sys
class KobatoConfig(KobatoBasePlugin):
def prepare(self, parser):
subparsers = parser.add_subparsers(help='sub-command help')
set_ = subparsers.add_parser('set', help='Set config values')
set_.add_argument('name')
set_.add_argument('value')
set_.set_defaults(func=self.set)
show = subparsers.add_parser('show', help='Show config values')
show.add_argument('name')
show.set_defaults(func=self.show)
reset = subparsers.add_parser('reset', help='Remove setting from config')
reset.add_argument('name')
reset.set_defaults(func=self.reset)
def run(self, args):
# TODO FIXME
raise NotImplementedError('TODO: SHOW HELP')
def set(self, args):
try:
(group, name) = args['name'].split('.')
except ValueError:
print('Please provide path in format: <group>.<name>')
sys.exit(1)
val = args['value']
if self._config.get(group) is None:
self._config[group] = {}
val = {
'true': True,
'false': False,
'True': True,
'False': False
}.get(val, val)
self._config[group][name] = val
self._config.dump()
def show(self, args):
path = args['name']
def print_group(name):
if self._config.get(name) is None:
print('No such group in config:', name)
return
for line in self._config.get(name):
print(' {}.{} => {}'.format(name, line, self._config.get(name).get(line)))
def print_line(group, name):
if self._config.get(group) is None:
print('No such group in config:', name)
return
if self._config.get(group).get(name) is None:
print('No such entry found in config')
return
print(' {}.{} => {}'.format(group, name, self._config.get(group).get(name)))
def error(*args, **kwargs):
print('Invalid format')
sys.exit(1)
{
1: print_group,
2: print_line
}.get(len(path.split('.')), error)(*path.split('.'))
def reset(self, args):
try:
(group, name) = args['name'].split('.')
except ValueError:
print('You can\'t reset config group. Reset individual entries.'
' Please provide path in format: <group>.<name>')
sys.exit(1)
try:
del self._config[group][name]
except KeyError:
pass
self._config.dump()
kobato_plugin_register(
'config',
KobatoConfig,
aliases=['cfg'],
description='Set, reset and view kobato config'
)
| gpl-3.0 | -7,846,888,021,279,062,000 | 27.127451 | 92 | 0.53015 | false | 4.040845 | true | false | false |
samcaulfield/PML_IDE | Test/kbdShortcutToSave.py | 2 | 2599 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class Kbrd(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost/"
self.verificationErrors = []
self.accept_next_alert = True
def test_kbrd(self):
driver = self.driver
f = open('property')
project_name = f.read()
f.close()
driver.get(self.base_url + "/" + project_name + "/")
driver.find_element_by_css_selector("div.ace_content").click()
driver.find_element_by_class_name("ace_text-input").send_keys("process w {}")
driver.find_element_by_link_text("Edit").click()
driver.find_element_by_link_text("Preferences").click()
Select(driver.find_element_by_id("setKeyboardHandler")).select_by_visible_text("emacs")
driver.find_element_by_xpath("//div[10]").click()
driver.find_element_by_class_name("ace_text-input").send_keys(Keys.CONTROL,"x","s")
time.sleep(1)
driver.find_element_by_css_selector("#fileSaveToDiskModal > div.modal-dialog > div.modal-content > div.modal-header > button.close").click()
time.sleep(3)
driver.find_element_by_link_text("Edit").click()
driver.find_element_by_link_text("Preferences").click()
Select(driver.find_element_by_id("setKeyboardHandler")).select_by_visible_text("vim")
driver.find_element_by_xpath("//div[10]").click()
driver.find_element_by_class_name("ace_text-input").send_keys(":","w", Keys.ENTER)
time.sleep(1)
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| mit | 5,959,334,172,456,122,000 | 37.791045 | 141 | 0.672566 | false | 3.44695 | false | false | false |
RadioRevolt/SlackBot | plugins/dab.py | 1 | 5676 | from urllib.request import urlopen
from json import loads
from rtmbot.core import Plugin
def get_elements(studio):
valid_studio_values = ['studio', 'teknikerrom']
studio = studio.strip().lower()
if studio not in valid_studio_values:
return False
elements_url = urlopen('http://pappagorg.radiorevolt.no/v1/sendinger/currentelements/' + studio).read().decode()
elements = loads(elements_url)
if elements['current']:
current_class = elements['current']['class'].lower()
if current_class == 'music':
return 'Låt: {0} - {1}'.format(elements['current']['title'], elements['current']['artist'])
elif current_class == 'audio':
return 'Lydsak: {0}'.format(elements['current']['title'])
elif current_class == 'promotion':
return 'Jingle: {0}'.format(elements['current']['title'])
else:
return 'Unknown ({0}): {1}'.format(current_class, elements['current']['title'])
elif elements['previous'] or elements['next']:
return 'Stikk'
return False
def has_elements(studio):
valid_studio_values = ['studio', 'teknikerrom', 'autoavvikler']
studio = studio.strip().lower()
if studio not in valid_studio_values:
return False
elements_url = urlopen('http://pappagorg.radiorevolt.no/v1/sendinger/currentelements/' + studio).read().decode()
elements = loads(elements_url)
return elements['current'] or elements['next'] or elements['previous']
def debug():
elements_url = urlopen('http://pappagorg.radiorevolt.no/v1/sendinger/currentelements/autoavvikler').read().decode()
elements = loads(elements_url)
warnings = list()
if scheduled_replay():
if not elements['current']:
if elements['previous']:
warnings.append('Reprisen i autoavvikler er for kort, og har sluttet!')
else:
warnings.append('Planlagt reprise i autoavvikler, men ingen elementer i autoavvikler!')
elif elements['next']:
warnings.append('Det ligger mer enn ett element i autoavvikler. Nå: {0}({1}), neste: {2}({3}'.format(
elements['current']['title'], get_type(elements['current']['class']), elements['next']['title'],
get_type(elements['next']['class'])))
elif elements['previous']:
warnings.append(
'Det lå et element før gjelende element i autoavvikler. Nå: {0}({1}), forige: {2}({3}'.format(
elements['current']['title'], get_type(elements['current']['class']), elements['previous']['title'],
get_type(elements['previous']['class'])
))
else:
studio = has_elements('studio')
tekrom = has_elements('teknikerrom')
if studio:
if elements['current'] or elements['previous']:
warnings.append('Ligger elementer i både autoavvikler og i studio.')
if tekrom:
if elements['current'] or elements['previous']:
warnings.append('Ligger elementer i både autoavvikler og i teknikerrom.')
if not tekrom and not studio:
if elements['current']:
warnings.append('Ser ut som noen har slunteret unna og lagt inn reprise.')
if elements['next']:
warnings.append('Det ligger mer enn ett element i autoavvikler. Nå: {0}({1}), neste: {2}({3}'.format(
elements['current']['title'], get_type(elements['current']['class']), elements['next']['title'],
get_type(elements['next']['class'])))
if not elements['current']:
if elements['previous']:
warnings.append(
'Det er ingen elementer som spiller noe sted! (det lå et i autoavvikler, men det stoppet)')
else:
warnings.append('Det er inten elementer som spiller noe sted!')
return warnings
def get_type(class_type):
if class_type == 'music':
return 'låt'
if class_type == 'audio':
return 'lyd'
if class_type == 'promotion':
return 'jingle'
return 'ukjent'
def scheduled_replay():
current_shows_url = urlopen('http://pappagorg.radiorevolt.no/v1/sendinger/currentshows').read().decode()
current_shows_data = loads(current_shows_url)
return '(R)' in current_shows_data['current']['title']
def get_show():
current_shows_url = urlopen('http://pappagorg.radiorevolt.no/v1/sendinger/currentshows').read().decode()
current_shows_data = loads(current_shows_url)
show_end = current_shows_data['current']['endtime'].split(' ')[-1]
show_start = current_shows_data['current']['starttime'].split(' ')[-1]
show_now = current_shows_data['current']['title']
show_next = current_shows_data['next']['title']
return 'Nå: {0} ({1} - {2}), Neste: {3}'.format(show_now, show_start, show_end, show_next)
class DabPlugin(Plugin):
def process_message(self, data):
if data['text'] == '.dab':
for warning in debug():
self.outputs.append([data['channel'], warning])
if scheduled_replay():
self.outputs.append([data['channel'], get_show()])
else:
self.outputs.append([data['channel'], get_show()])
studio = get_elements('studio')
tekrom = get_elements('teknikerrom')
if studio:
self.outputs.append([data['channel'], studio + ' i studio 1.'])
if tekrom:
self.outputs.append([data['channel'], tekrom + ' i teknikerrom.'])
| mit | -2,632,871,222,282,662,000 | 39.177305 | 120 | 0.590468 | false | 3.756631 | false | false | false |
radio-astro-tools/spectral-cube | spectral_cube/stokes_spectral_cube.py | 5 | 5517 | from __future__ import print_function, absolute_import, division
import six
import numpy as np
from astropy.io.registry import UnifiedReadWriteMethod
from .io.core import StokesSpectralCubeRead, StokesSpectralCubeWrite
from .spectral_cube import SpectralCube, BaseSpectralCube
from . import wcs_utils
from .masks import BooleanArrayMask, is_broadcastable_and_smaller
__all__ = ['StokesSpectalCube']
VALID_STOKES = ['I', 'Q', 'U', 'V', 'RR', 'LL', 'RL', 'LR']
class StokesSpectralCube(object):
"""
A class to store a spectral cube with multiple Stokes parameters.
The individual Stokes cubes can share a common mask in addition to having
component-specific masks.
"""
def __init__(self, stokes_data, mask=None, meta=None, fill_value=None):
self._stokes_data = stokes_data
self._meta = meta or {}
self._fill_value = fill_value
reference = tuple(stokes_data.keys())[0]
for component in stokes_data:
if not isinstance(stokes_data[component], BaseSpectralCube):
raise TypeError("stokes_data should be a dictionary of "
"SpectralCube objects")
if not wcs_utils.check_equality(stokes_data[component].wcs,
stokes_data[reference].wcs):
raise ValueError("All spectral cubes in stokes_data "
"should have the same WCS")
if component not in VALID_STOKES:
raise ValueError("Invalid Stokes component: {0} - should be "
"one of I, Q, U, V, RR, LL, RL, LR".format(component))
if stokes_data[component].shape != stokes_data[reference].shape:
raise ValueError("All spectral cubes should have the same shape")
self._wcs = stokes_data[reference].wcs
self._shape = stokes_data[reference].shape
if isinstance(mask, BooleanArrayMask):
if not is_broadcastable_and_smaller(mask.shape, self._shape):
raise ValueError("Mask shape is not broadcastable to data shape:"
" {0} vs {1}".format(mask.shape, self._shape))
self._mask = mask
@property
def shape(self):
return self._shape
@property
def mask(self):
"""
The underlying mask
"""
return self._mask
@property
def wcs(self):
return self._wcs
def __dir__(self):
if six.PY2:
return self.components + dir(type(self)) + list(self.__dict__)
else:
return self.components + super(StokesSpectralCube, self).__dir__()
@property
def components(self):
return list(self._stokes_data.keys())
def __getattr__(self, attribute):
"""
Descriptor to return the Stokes cubes
"""
if attribute in self._stokes_data:
if self.mask is not None:
return self._stokes_data[attribute].with_mask(self.mask)
else:
return self._stokes_data[attribute]
else:
raise AttributeError("StokesSpectralCube has no attribute {0}".format(attribute))
def with_mask(self, mask, inherit_mask=True):
"""
Return a new StokesSpectralCube instance that contains a composite mask
of the current StokesSpectralCube and the new ``mask``.
Parameters
----------
mask : :class:`MaskBase` instance, or boolean numpy array
The mask to apply. If a boolean array is supplied,
it will be converted into a mask, assuming that
`True` values indicate included elements.
inherit_mask : bool (optional, default=True)
If True, combines the provided mask with the
mask currently attached to the cube
Returns
-------
new_cube : :class:`StokesSpectralCube`
A cube with the new mask applied.
Notes
-----
This operation returns a view into the data, and not a copy.
"""
if isinstance(mask, np.ndarray):
if not is_broadcastable_and_smaller(mask.shape, self.shape):
raise ValueError("Mask shape is not broadcastable to data shape: "
"%s vs %s" % (mask.shape, self.shape))
mask = BooleanArrayMask(mask, self.wcs)
if self._mask is not None:
return self._new_cube_with(mask=self.mask & mask if inherit_mask else mask)
else:
return self._new_cube_with(mask=mask)
def _new_cube_with(self, stokes_data=None,
mask=None, meta=None, fill_value=None):
data = self._stokes_data if stokes_data is None else stokes_data
mask = self._mask if mask is None else mask
if meta is None:
meta = {}
meta.update(self._meta)
fill_value = self._fill_value if fill_value is None else fill_value
cube = StokesSpectralCube(stokes_data=data, mask=mask,
meta=meta, fill_value=fill_value)
return cube
def with_spectral_unit(self, unit, **kwargs):
stokes_data = {k: self._stokes_data[k].with_spectral_unit(unit, **kwargs)
for k in self._stokes_data}
return self._new_cube_with(stokes_data=stokes_data)
read = UnifiedReadWriteMethod(StokesSpectralCubeRead)
write = UnifiedReadWriteMethod(StokesSpectralCubeWrite)
| bsd-3-clause | -5,200,881,096,275,218,000 | 33.917722 | 93 | 0.591988 | false | 4.157498 | false | false | false |
cs-au-dk/Artemis | WebKit/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py | 2 | 3757 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import sys
import os
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.port.gtk import GtkPort
from webkitpy.layout_tests.port import port_testcase
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.thirdparty.mock import Mock
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.tool.mocktool import MockOptions
class GtkPortTest(port_testcase.PortTestCase):
port_name = 'gtk'
port_maker = GtkPort
def test_show_results_html_file(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
expected_stderr = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--gtk', 'file://test.html'], cwd=/mock-checkout\n"
OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_stderr=expected_stderr)
def assertLinesEqual(self, a, b):
if hasattr(self, 'assertMultiLineEqual'):
self.assertMultiLineEqual(a, b)
else:
self.assertEqual(a.splitlines(), b.splitlines())
def test_get_crash_log(self):
core_directory = os.environ.get('WEBKIT_CORE_DUMPS_DIRECTORY', '/path/to/coredumps')
core_pattern = os.path.join(core_directory, "core-pid_%p-_-process_%e")
mock_empty_crash_log = """\
Crash log for DumpRenderTree (pid 28529):
Coredump core-pid_28529-_-process_DumpRenderTree not found. To enable crash logs:
- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
- enable core dumps: ulimit -c unlimited
- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(core_directory)s
STDERR: <empty>""" % locals()
def _mock_gdb_output(coredump_path):
return (mock_empty_crash_log, [])
port = self.make_port()
port._get_gdb_output = mock_empty_crash_log
log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
self.assertLinesEqual(log, mock_empty_crash_log)
log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
self.assertLinesEqual(log, mock_empty_crash_log)
| gpl-3.0 | -4,762,748,133,023,776,000 | 44.817073 | 140 | 0.733032 | false | 3.877193 | true | false | false |
pajaco/spaces | spaces_config/config.py | 1 | 4045 | """
Regular ini rules, except:
special settings:
_use: those will be marked as dependencies of the current section
_provider: the python provider that creates shell commands
keys can have no value
values can be lists (whitespace is separator)
values can contain references to other sections and particular keys in them
"""
from ConfigParser import (ConfigParser, NoOptionError, Error,
NoSectionError, MAX_INTERPOLATION_DEPTH)
import re
class SpacesConfigParser(ConfigParser):
_USES_OPT = "_uses"
_PROVIDER_OPT = "_provider"
def gettuple(self, section, option):
value = self.get(section, option)
return list(filter(None, (x.strip() for x in value.splitlines())))
def getuses(self, section):
out = []
try:
for uses in self.gettuple(section, self._USES_OPT):
if uses[0] == '[':
uses = uses[1:]
if uses[-1] == ']':
uses = uses[:-1]
if not self.has_section(uses):
raise NoSectionError(uses)
out.append(uses)
except NoOptionError:
pass
# now those used for interpolation
for o, v in self.items(section, raw=True):
m = self._KEYCRE.match(v)
if m.group(1):
if not self.has_section(m.group(1)):
raise NoSectionError(m.group(1))
out.append(m.group(1))
return set(out)
def getprovider(self, section):
return self.get(section, self._PROVIDER_OPT)
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "[" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"\[([^\]]*)\]:(\S+)|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
elif self.has_section(s):
o = match.group(2)
if o is None:
return match.group()
# try exact match
if self.has_option(s, o):
return self.get(s, o)
# try partial; longest first
for option in reversed(sorted(self.options(s))):
if o.startswith(option):
v = self.get(s, option, raw=True)
return v + o[len(option):]
raise NoOptionError(s, o)
else:
raise NoSectionError(s)
if __name__ == "__main__":
from StringIO import StringIO
cfg = """
[test section 1]
testkeya: 1
testkeyb: a
b
_provider: BlahProvider
[test section 2]
#_uses: [test section 1]
testkeya: [test section 1]:testkeyafoo
testkeyb: [test section 1]:testkeyb
_provider: FooProvider
"""
config = SpacesConfigParser(allow_no_value=True)
config.readfp(StringIO(cfg), 'cfg')
#print config.sections()
#print config.items('test section 1')
#print config.items('test section 2')
#print config.gettuple('test section 2', 'testkeya')
#print config.gettuple('test section 2', 'testkeyb')
#print config.gettuple('test section 1', 'testkeyb')
#print config.gettuple('test section 1', 'testkeya')
print config.getuses('test section 1')
print config.getuses('test section 2')
#print config.getprovider('test section 1')
#print config.getprovider('test section 2')
| mit | 3,264,266,853,195,560,400 | 31.36 | 79 | 0.56267 | false | 4.127551 | true | false | false |
digris/openbroadcast.org | website/apps/statistics/label_statistics.py | 2 | 2149 | # -*- coding: utf-8 -*-
import datetime
import logging
from django.utils import timezone
from atracker.models import EventType
from .utils.queries import get_media_for_label
from .utils.xls_output_label import label_statistics_as_xls
TITLE_MAP = {
"playout": "Airplay statistics",
"download": "Download statistics",
"stream": "Stream statistics",
}
log = logging.getLogger(__name__)
def yearly_summary_for_label_as_xls(year, label, event_type_id, output=None):
log.debug("generating {} statistics for {} - {}".format(event_type_id, label, year))
year = int(year)
event_type = EventType.objects.get(pk=event_type_id)
title = "{}: open broadcast radio".format(TITLE_MAP.get(event_type.title))
start = datetime.datetime.combine(datetime.date(year, 1, 1), datetime.time.min)
end = datetime.datetime.combine(datetime.date(year, 12, 31), datetime.time.max)
objects = get_media_for_label(
label=label, start=start, end=end, event_type_id=event_type_id
)
years = [{"start": start, "end": end, "objects": objects}]
label_statistics_as_xls(label=label, years=years, title=title, output=output)
return output
def summary_for_label_as_xls(label, event_type_id, output=None):
log.debug(
"generating {} statistics for {} - since created".format(event_type_id, label)
)
event_type = EventType.objects.get(pk=event_type_id)
title = "{}: open broadcast radio".format(TITLE_MAP.get(event_type.title))
years = []
year_start = label.created.year if label.created.year >= 2014 else 2014
year_end = timezone.now().year
for year in range(year_end, year_start - 1, -1):
start = datetime.datetime.combine(datetime.date(year, 1, 1), datetime.time.min)
end = datetime.datetime.combine(datetime.date(year, 12, 31), datetime.time.max)
objects = get_media_for_label(
label=label, start=start, end=end, event_type_id=event_type_id
)
years.append({"start": start, "end": end, "objects": objects})
label_statistics_as_xls(label=label, years=years, title=title, output=output)
return output
| gpl-3.0 | 7,539,881,283,366,182,000 | 28.847222 | 88 | 0.67194 | false | 3.394945 | false | false | false |
sagiss/txrm2nexus | txm2nexuslib/scripts/manynorm.py | 1 | 3458 | #!/usr/bin/python
"""
(C) Copyright 2018 ALBA-CELLS
Authors: Marc Rosanes, Carlos Falcon, Zbigniew Reszela, Carlos Pascual
The program is distributed under the terms of the
GNU General Public License (or the Lesser GPL).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from argparse import RawTextHelpFormatter
from txm2nexuslib.images.multiplenormalization import normalize_images
def main():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
description = ('Normalize images located in different hdf5 files\n'
'Each file containing one of the images to be normalized')
parser = argparse.ArgumentParser(description=description,
formatter_class=RawTextHelpFormatter)
parser.register('type', 'bool', str2bool)
parser.add_argument('file_index_fn', metavar='file_index_fn',
type=str, help='DB index json filename of hdf5 data '
'files to be normalized')
parser.add_argument('-d', '--date', type=int,
default=None,
help='Date of files to be normalized\n'
'If None, no filter is applied\n'
'(default: None)')
parser.add_argument('-s', '--sample', type=str,
default=None,
help='Sample name of files to be normalized\n'
'If None, all sample names are normalized\n'
'(default: None)')
parser.add_argument('-e', '--energy', type=float,
default=None,
help='Energy of files to be normalized\n'
'If None, no filter is applied\n'
'(default: None)')
parser.add_argument('-t', '--table_h5', type=str,
default="hdf5_proc",
help='DB table of hdf5 to be normalized\n'
'If None, default tinyDB table is used\n'
'(default: hdf5_proc)')
parser.add_argument('-a', '--average_ff', type='bool',
default=True,
help='Compute average FF and normalize using it\n'
'(default: True)')
parser.add_argument('-c', '--cores', type=int,
default=-1,
help='Number of cores used for the format conversion\n'
'(default is max of available CPUs: -1)')
args = parser.parse_args()
normalize_images(args.file_index_fn, table_name=args.table_h5,
date=args.date, sample=args.sample, energy=args.energy,
average_ff=args.average_ff, cores=args.cores)
if __name__ == "__main__":
main()
| gpl-3.0 | -2,847,785,818,340,875,300 | 38.747126 | 79 | 0.569115 | false | 4.456186 | false | false | false |
Julioocz/SIMNAV | simnav/gui/base/resultadosTorre.py | 1 | 1492 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/julio/Desktop/SIMNAV/simnav/gui/base/ui/resultadosTorre.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(494, 339)
self.gridLayout_2 = QtWidgets.QGridLayout(Form)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.label = QtWidgets.QLabel(Form)
font = QtGui.QFont()
font.setPointSize(12)
font.setItalic(True)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.resultadosSimulacion = QtWidgets.QTextBrowser(Form)
self.resultadosSimulacion.setObjectName("resultadosSimulacion")
self.gridLayout.addWidget(self.resultadosSimulacion, 1, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "Resultados internos columna de destilación"))
| mit | 1,173,775,051,875,866,600 | 38.236842 | 119 | 0.685446 | false | 3.681481 | false | false | false |
mathstuf/bodhi | bodhi/mail.py | 1 | 15521 | # This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import smtplib
from textwrap import wrap
from kitchen.text.converters import to_unicode, to_bytes
from kitchen.iterutils import iterate
from . import log
from .util import get_rpm_header
from .config import config
#
# All of the email messages that bodhi is going to be sending around.
#
MESSAGES = {
'new': {
'body': u"""\
%(email)s has submitted a new update for %(release)s\n\n%(updatestr)s
""",
'fields': lambda agent, x: {
'email': agent,
'release': x.release.long_name,
'updatestr': unicode(x)
}
},
'deleted': {
'body': u"""\
%(email)s has deleted the %(package)s update for %(release)s\n\n%(updatestr)s
""",
'fields': lambda agent, x: {
'package': x.title,
'email': agent,
'release': '%s %s' % (x.release.long_name, x.status),
'updatestr': unicode(x)
}
},
'edited': {
'body': u"""\
%(email)s has edited the %(package)s update for %(release)s\n\n%(updatestr)s
""",
'fields': lambda agent, x: {
'package': x.title,
'email': agent,
'release': '%s %s' % (x.release.long_name, x.status),
'updatestr': unicode(x)
}
},
'pushed': {
'body': u"""\
%(package)s has been successfully pushed for %(release)s.\n\n%(updatestr)s
""",
'fields': lambda agent, x: {
'package': x.title,
'release': '%s %s' % (x.release.long_name, x.status),
'updatestr': unicode(x)
}
},
'testing': {
'body': u"""\
%(submitter)s has requested the pushing of the following update to testing:\n
%(updatestr)s
""",
'fields': lambda agent, x: {
'submitter': agent,
'updatestr': unicode(x)
}
},
'unpush': {
'body': u"""\
%(submitter)s has requested the unpushing of the following update:\n
%(updatestr)s
""",
'fields': lambda agent, x: {
'submitter': agent,
'updatestr': unicode(x)
}
},
'obsolete': {
'body': u"""\
%(submitter)s has obsoleted the following update:\n\n%(updatestr)s
""",
'fields': lambda agent, x: {
'submitter': agent,
'updatestr': unicode(x)
}
},
'unpushed': {
'body': u"""\
The following update has been unpushed\n\n%(updatestr)s
""",
'fields': lambda agent, x: {
'updatestr': unicode(x)
}
},
'revoke': {
'body': u"""\
%(submitter)s has revoked the request of the following update:\n\n%(updatestr)s
""",
'fields': lambda agent, x: {
'submitter': agent,
'updatestr': unicode(x)
}
},
'stable': {
'body': u"""\
%(submitter)s has requested the pushing of the following update stable:\n
%(updatestr)s
""",
'fields': lambda agent, x: {
'submitter': agent,
'updatestr': unicode(x)
}
},
'moved': {
'body': u"""\
The following update has been moved from Testing to Stable:\n\n%(updatestr)s
""",
'fields': lambda agent, x: {
'updatestr': unicode(x)
}
},
'stablekarma': {
'body': u"""\
The following update has reached a karma of %(karma)d and is being
automatically marked as stable.\n
%(updatestr)s
""",
'fields': lambda agent, x: {
'karma': x.karma,
'updatestr': unicode(x)
}
},
'unstable': {
'body': u"""\
The following update has reached a karma of %(karma)d and is being
automatically marked as unstable. This update will be unpushed from the
repository.\n
%(updatestr)s
""",
'fields': lambda agent, x: {
'karma': x.karma,
'updatestr': unicode(x)
}
},
'comment': {
'body': u"""\
The following comment has been added to the %(package)s update:
%(comment)s
To reply to this comment, please visit the URL at the bottom of this mail
%(updatestr)s
""",
'fields': lambda agent, x: {
'package': x.title,
'comment': x.comments[-1],
'updatestr': unicode(x)
}
},
'old_testing': {
'body': u"""\
The update for %(package)s has been in 'testing' status for over 2 weeks.
This update can be marked as stable after it achieves a karma of
%(stablekarma)d or by clicking 'Push to Stable'.
This is just a courtesy nagmail. Updates may reside in the testing repository
for more than 2 weeks if you deem it necessary.
You can submit this update to be pushed to the stable repository by going to
the following URL:
https://admin.fedoraproject.org/updates/request/stable/%(package)s
or by running the following command with the bodhi-client:
bodhi -R stable %(package)s
%(updatestr)s
""",
'fields': lambda agent, x: {
'package': x.title,
'stablekarma': x.stable_karma,
'updatestr': unicode(x)
}
},
'security': {
'body': u"""\
%(submitter)s has submitted the following update.
%(updatestr)s
To approve this update and request that it be pushed to stable, you can use the
link below:
https://admin.fedoraproject.org/updates/approve/%(package)s
""",
'fields': lambda agent, x: {
'package': x.title,
'submitter': agent,
'updatestr': unicode(x)
}
},
}
fedora_errata_template = u"""\
--------------------------------------------------------------------------------
Fedora%(testing)s Update Notification
%(updateid)s
%(date)s
--------------------------------------------------------------------------------
Name : %(name)s
Product : %(product)s
Version : %(version)s
Release : %(release)s
URL : %(url)s
Summary : %(summary)s
Description :
%(description)s
--------------------------------------------------------------------------------
%(notes)s%(changelog)s%(references)s
This update can be installed with the "yum" update program. Use
su -c 'yum%(yum_repository)s update %(name)s' at the command line.
For more information, refer to "Managing Software with yum",
available at https://docs.fedoraproject.org/yum/.
All packages are signed with the Fedora Project GPG key. More details on the
GPG keys used by the Fedora Project can be found at
https://fedoraproject.org/keys
--------------------------------------------------------------------------------
"""
fedora_epel_errata_template = u"""\
--------------------------------------------------------------------------------
Fedora EPEL%(testing)s Update Notification
%(updateid)s
%(date)s
--------------------------------------------------------------------------------
Name : %(name)s
Product : %(product)s
Version : %(version)s
Release : %(release)s
URL : %(url)s
Summary : %(summary)s
Description :
%(description)s
--------------------------------------------------------------------------------
%(notes)s%(changelog)s%(references)s
This update can be installed with the "yum" update programs. Use
su -c 'yum%(yum_repository)s update %(name)s' at the command line.
For more information, refer to "Managing Software with yum",
available at https://docs.fedoraproject.org/yum/.
All packages are signed with the Fedora EPEL GPG key. More details on the
GPG keys used by the Fedora Project can be found at
https://fedoraproject.org/keys
--------------------------------------------------------------------------------
"""
maillist_template = u"""\
================================================================================
%(name)s-%(version)s-%(release)s (%(updateid)s)
%(summary)s
--------------------------------------------------------------------------------
%(notes)s%(changelog)s%(references)s
"""
def get_template(update, use_template='fedora_errata_template'):
"""
Build the update notice for a given update.
@param use_template: the template to generate this notice with
"""
from bodhi.models import UpdateStatus, UpdateType
use_template = globals()[use_template]
line = unicode('-' * 80) + '\n'
templates = []
for build in update.builds:
h = get_rpm_header(build.nvr)
info = {}
info['date'] = str(update.date_pushed)
info['name'] = h['name']
info['summary'] = h['summary']
info['version'] = h['version']
info['release'] = h['release']
info['url'] = h['url']
if update.status is UpdateStatus.testing:
info['testing'] = ' Test'
info['yum_repository'] = ' --enablerepo=updates-testing'
else:
info['testing'] = ''
info['yum_repository'] = ''
info['subject'] = u"%s%s%s Update: %s" % (
update.type is UpdateType.security and '[SECURITY] ' or '',
update.release.long_name, info['testing'], build.nvr)
info['updateid'] = update.alias
info['description'] = h['description']
info['product'] = update.release.long_name
info['notes'] = ""
if update.notes and len(update.notes):
info['notes'] = u"Update Information:\n\n%s\n" % \
'\n'.join(wrap(update.notes, width=80))
info['notes'] += line
# Add this updates referenced Bugzillas and CVEs
i = 1
info['references'] = ""
if len(update.bugs) or len(update.cves):
info['references'] = u"References:\n\n"
parent = True in [bug.parent for bug in update.bugs]
for bug in update.bugs:
# Don't show any tracker bugs for security updates
if update.type is UpdateType.security:
# If there is a parent bug, don't show trackers
if parent and not bug.parent:
log.debug("Skipping tracker bug %s" % bug)
continue
title = (bug.title != 'Unable to fetch title' and
bug.title != 'Invalid bug number') and \
' - %s' % bug.title or ''
info['references'] += u" [ %d ] Bug #%d%s\n %s\n" % \
(i, bug.bug_id, title, bug.url)
i += 1
for cve in update.cves:
info['references'] += u" [ %d ] %s\n %s\n" % \
(i, cve.cve_id, cve.url)
i += 1
info['references'] += line
# Find the most recent update for this package, other than this one
lastpkg = build.get_latest()
# Grab the RPM header of the previous update, and generate a ChangeLog
info['changelog'] = u""
if lastpkg:
oldh = get_rpm_header(lastpkg)
oldtime = oldh['changelogtime']
text = oldh['changelogtext']
del oldh
if not text:
oldtime = 0
elif len(text) != 1:
oldtime = oldtime[0]
info['changelog'] = u"ChangeLog:\n\n%s%s" % \
(to_unicode(build.get_changelog(oldtime)), line)
try:
templates.append((info['subject'], use_template % info))
except UnicodeDecodeError:
# We can't trust the strings we get from RPM
log.debug("UnicodeDecodeError! Will try again after decoding")
for (key, value) in info.items():
if value:
info[key] = to_unicode(value)
templates.append((info['subject'], use_template % info))
return templates
def _send_mail(from_addr, to_addr, body):
"""A lower level function to send emails with smtplib"""
smtp_server = config.get('smtp_server')
if not smtp_server:
log.info('Not sending email: No smtp_server defined')
return
smtp = None
try:
log.debug('Connecting to %s', smtp_server)
smtp = smtplib.SMTP(smtp_server)
smtp.sendmail(from_addr, [to_addr], body)
except:
log.exception('Unable to send mail')
finally:
if smtp:
smtp.quit()
def send_mail(from_addr, to_addr, subject, body_text, headers=None):
if not from_addr:
from_addr = config.get('bodhi_email')
if not from_addr:
log.warn('Unable to send mail: bodhi_email not defined in the config')
return
if to_addr in config.get('exclude_mail'):
return
from_addr = to_bytes(from_addr)
to_addr = to_bytes(to_addr)
subject = to_bytes(subject)
body_text = to_bytes(body_text)
msg = ['From: %s' % from_addr, 'To: %s' % to_addr]
if headers:
for key, value in headers.items():
msg.append('%s: %s' % (key, to_bytes(value)))
msg += ['Subject: %s' % subject, '', body_text]
body = '\r\n'.join(msg)
log.info('Sending mail to %s: %s', to_addr, subject)
_send_mail(from_addr, to_addr, body)
def send(to, msg_type, update, sender=None, agent=None):
""" Send an update notification email to a given recipient """
assert agent, 'No agent given'
critpath = getattr(update, 'critpath', False) and '[CRITPATH] ' or ''
headers = {}
if msg_type != 'buildroot_override':
headers = {
"X-Bodhi-Update-Type": update.type.description,
"X-Bodhi-Update-Release": update.release.name,
"X-Bodhi-Update-Status": update.status.description,
"X-Bodhi-Update-Builds": ",".join([b.nvr for b in update.builds]),
"X-Bodhi-Update-Title": update.title,
"X-Bodhi-Update-Pushed": update.pushed,
"X-Bodhi-Update-Submitter": update.user.name,
}
if update.request:
headers["X-Bodhi-Update-Request"] = update.request.description
initial_message_id = "<bodhi-update-%s-%s-%s@%s>" % (
update.id, update.user.name, update.release.name,
config.get('message_id_email_domain'))
if msg_type == 'new':
headers["Message-ID"] = initial_message_id
else:
headers["References"] = initial_message_id
headers["In-Reply-To"] = initial_message_id
for person in iterate(to):
send_mail(sender, person, '[Fedora Update] %s[%s] %s' % (critpath,
msg_type, update.title), MESSAGES[msg_type]['body'] %
MESSAGES[msg_type]['fields'](agent, update), headers)
def send_releng(subject, body):
""" Send the Release Engineering team a message """
send_mail(config.get('bodhi_email'), config.get('release_team_address'),
subject, body)
def send_admin(msg_type, update, sender=None):
""" Send an update notification to the admins/release team. """
send(config.get('release_team_address'), msg_type, update, sender)
| gpl-2.0 | 5,604,852,034,062,969,000 | 31.134576 | 80 | 0.54481 | false | 3.818204 | true | false | false |
bacaldwell/ironic | ironic/drivers/modules/iscsi_deploy.py | 1 | 23264 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from ironic_lib import disk_utils
from ironic_lib import utils as ironic_utils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from six.moves.urllib import parse
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import keystone
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers import base
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
LOG = logging.getLogger(__name__)
# NOTE(rameshg87): This file now registers some of opts in pxe group.
# This is acceptable for now as a future refactoring into
# separate boot and deploy interfaces is planned, and moving config
# options twice is not recommended. Hence we would move the parameters
# to the appropriate place in the final refactoring.
pxe_opts = [
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help=_('Additional append parameters for baremetal PXE boot.')),
cfg.StrOpt('default_ephemeral_format',
default='ext4',
help=_('Default file system format for ephemeral partition, '
'if one is created.')),
cfg.StrOpt('images_path',
default='/var/lib/ironic/images/',
help=_('On the ironic-conductor node, directory where images '
'are stored on disk.')),
cfg.StrOpt('instance_master_path',
default='/var/lib/ironic/master_images',
help=_('On the ironic-conductor node, directory where master '
'instance images are stored on disk. '
'Setting to <None> disables image caching.')),
cfg.IntOpt('image_cache_size',
default=20480,
help=_('Maximum size (in MiB) of cache for master images, '
'including those in use.')),
# 10080 here is 1 week - 60*24*7. It is entirely arbitrary in the absence
# of a facility to disable the ttl entirely.
cfg.IntOpt('image_cache_ttl',
default=10080,
help=_('Maximum TTL (in minutes) for old master images in '
'cache.')),
cfg.StrOpt('disk_devices',
default='cciss/c0d0,sda,hda,vda',
help=_('The disk devices to scan while doing the deploy.')),
]
iscsi_opts = [
cfg.PortOpt('portal_port',
default=3260,
help=_('The port number on which the iSCSI portal listens '
'for incoming connections.')),
]
CONF = cfg.CONF
CONF.register_opts(pxe_opts, group='pxe')
CONF.register_opts(iscsi_opts, group='iscsi')
DISK_LAYOUT_PARAMS = ('root_gb', 'swap_mb', 'ephemeral_gb')
@image_cache.cleanup(priority=50)
class InstanceImageCache(image_cache.ImageCache):
def __init__(self):
super(self.__class__, self).__init__(
CONF.pxe.instance_master_path,
# MiB -> B
cache_size=CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
cache_ttl=CONF.pxe.image_cache_ttl * 60)
def _get_image_dir_path(node_uuid):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.pxe.images_path, node_uuid)
def _get_image_file_path(node_uuid):
"""Generate the full path for an instances disk."""
return os.path.join(_get_image_dir_path(node_uuid), 'disk')
def _save_disk_layout(node, i_info):
"""Saves the disk layout.
The disk layout used for deployment of the node, is saved.
:param node: the node of interest
:param i_info: instance information (a dictionary) for the node, containing
disk layout information
"""
driver_internal_info = node.driver_internal_info
driver_internal_info['instance'] = {}
for param in DISK_LAYOUT_PARAMS:
driver_internal_info['instance'][param] = i_info[param]
node.driver_internal_info = driver_internal_info
node.save()
def check_image_size(task):
"""Check if the requested image is larger than the root partition size.
:param task: a TaskManager instance containing the node to act on.
:raises: InstanceDeployFailure if size of the image is greater than root
partition.
"""
i_info = deploy_utils.parse_instance_info(task.node)
image_path = _get_image_file_path(task.node.uuid)
image_mb = disk_utils.get_image_mb(image_path)
root_mb = 1024 * int(i_info['root_gb'])
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. Image '
'virtual size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
def cache_instance_image(ctx, node):
"""Fetch the instance's image from Glance
This method pulls the AMI and writes them to the appropriate place
on local disk.
:param ctx: context
:param node: an ironic node object
:returns: a tuple containing the uuid of the image and the path in
the filesystem where image is cached.
"""
i_info = deploy_utils.parse_instance_info(node)
fileutils.ensure_tree(_get_image_dir_path(node.uuid))
image_path = _get_image_file_path(node.uuid)
uuid = i_info['image_source']
LOG.debug("Fetching image %(ami)s for node %(uuid)s",
{'ami': uuid, 'uuid': node.uuid})
deploy_utils.fetch_images(ctx, InstanceImageCache(), [(uuid, image_path)],
CONF.force_raw_images)
return (uuid, image_path)
def destroy_images(node_uuid):
"""Delete instance's image file.
:param node_uuid: the uuid of the ironic node.
"""
ironic_utils.unlink_without_raise(_get_image_file_path(node_uuid))
utils.rmtree_without_raise(_get_image_dir_path(node_uuid))
InstanceImageCache().clean_up()
def get_deploy_info(node, address, iqn, port=None, lun='1'):
"""Returns the information required for doing iSCSI deploy in a dictionary.
:param node: ironic node object
:param address: iSCSI address
:param iqn: iSCSI iqn for the target disk
:param port: iSCSI port, defaults to one specified in the configuration
:param lun: iSCSI lun, defaults to '1'
:raises: MissingParameterValue, if some required parameters were not
passed.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
i_info = deploy_utils.parse_instance_info(node)
params = {
'address': address,
'port': port or CONF.iscsi.portal_port,
'iqn': iqn,
'lun': lun,
'image_path': _get_image_file_path(node.uuid),
'node_uuid': node.uuid}
is_whole_disk_image = node.driver_internal_info['is_whole_disk_image']
if not is_whole_disk_image:
params.update({'root_mb': i_info['root_mb'],
'swap_mb': i_info['swap_mb'],
'ephemeral_mb': i_info['ephemeral_mb'],
'preserve_ephemeral': i_info['preserve_ephemeral'],
'boot_option': deploy_utils.get_boot_option(node),
'boot_mode': _get_boot_mode(node)})
# Append disk label if specified
disk_label = deploy_utils.get_disk_label(node)
if disk_label is not None:
params['disk_label'] = disk_label
missing = [key for key in params if params[key] is None]
if missing:
raise exception.MissingParameterValue(
_("Parameters %s were not passed to ironic"
" for deploy.") % missing)
if is_whole_disk_image:
return params
# configdrive and ephemeral_format are nullable
params['ephemeral_format'] = i_info.get('ephemeral_format')
params['configdrive'] = i_info.get('configdrive')
return params
def continue_deploy(task, **kwargs):
"""Resume a deployment upon getting POST data from deploy ramdisk.
This method raises no exceptions because it is intended to be
invoked asynchronously as a callback from the deploy ramdisk.
:param task: a TaskManager instance containing the node to act on.
:param kwargs: the kwargs to be passed to deploy.
:raises: InvalidState if the event is not allowed by the associated
state machine.
:returns: a dictionary containing the following keys:
For partition image:
'root uuid': UUID of root partition
'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
NOTE: If key exists but value is None, it means partition doesn't
exist.
For whole disk image:
'disk identifier': ID of the disk to which image was deployed.
"""
node = task.node
params = get_deploy_info(node, **kwargs)
def _fail_deploy(task, msg):
"""Fail the deploy after logging and setting error states."""
LOG.error(msg)
deploy_utils.set_failed_state(task, msg)
destroy_images(task.node.uuid)
raise exception.InstanceDeployFailure(msg)
# NOTE(lucasagomes): Let's make sure we don't log the full content
# of the config drive here because it can be up to 64MB in size,
# so instead let's log "***" in case config drive is enabled.
if LOG.isEnabledFor(logging.logging.DEBUG):
log_params = {
k: params[k] if k != 'configdrive' else '***'
for k in params
}
LOG.debug('Continuing deployment for node %(node)s, params %(params)s',
{'node': node.uuid, 'params': log_params})
uuid_dict_returned = {}
try:
if node.driver_internal_info['is_whole_disk_image']:
uuid_dict_returned = deploy_utils.deploy_disk_image(**params)
else:
uuid_dict_returned = deploy_utils.deploy_partition_image(**params)
except Exception as e:
msg = (_('Deploy failed for instance %(instance)s. '
'Error: %(error)s') %
{'instance': node.instance_uuid, 'error': e})
_fail_deploy(task, msg)
root_uuid_or_disk_id = uuid_dict_returned.get(
'root uuid', uuid_dict_returned.get('disk identifier'))
if not root_uuid_or_disk_id:
msg = (_("Couldn't determine the UUID of the root "
"partition or the disk identifier after deploying "
"node %s") % node.uuid)
_fail_deploy(task, msg)
if params.get('preserve_ephemeral', False):
# Save disk layout information, to check that they are unchanged
# for any future rebuilds
_save_disk_layout(node, deploy_utils.parse_instance_info(node))
destroy_images(node.uuid)
return uuid_dict_returned
def do_agent_iscsi_deploy(task, agent_client):
"""Method invoked when deployed with the agent ramdisk.
This method is invoked by drivers for doing iSCSI deploy
using agent ramdisk. This method assumes that the agent
is booted up on the node and is heartbeating.
:param task: a TaskManager object containing the node.
:param agent_client: an instance of agent_client.AgentClient
which will be used during iscsi deploy (for exposing node's
target disk via iSCSI, for install boot loader, etc).
:returns: a dictionary containing the following keys:
For partition image:
'root uuid': UUID of root partition
'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
NOTE: If key exists but value is None, it means partition doesn't
exist.
For whole disk image:
'disk identifier': ID of the disk to which image was deployed.
:raises: InstanceDeployFailure, if it encounters some error
during the deploy.
"""
node = task.node
i_info = deploy_utils.parse_instance_info(node)
wipe_disk_metadata = not i_info['preserve_ephemeral']
iqn = 'iqn.2008-10.org.openstack:%s' % node.uuid
portal_port = CONF.iscsi.portal_port
result = agent_client.start_iscsi_target(
node, iqn,
portal_port,
wipe_disk_metadata=wipe_disk_metadata)
if result['command_status'] == 'FAILED':
msg = (_("Failed to start the iSCSI target to deploy the "
"node %(node)s. Error: %(error)s") %
{'node': node.uuid, 'error': result['command_error']})
deploy_utils.set_failed_state(task, msg)
raise exception.InstanceDeployFailure(reason=msg)
address = parse.urlparse(node.driver_internal_info['agent_url'])
address = address.hostname
uuid_dict_returned = continue_deploy(task, iqn=iqn, address=address)
root_uuid_or_disk_id = uuid_dict_returned.get(
'root uuid', uuid_dict_returned.get('disk identifier'))
# TODO(lucasagomes): Move this bit saving the root_uuid to
# continue_deploy()
driver_internal_info = node.driver_internal_info
driver_internal_info['root_uuid_or_disk_id'] = root_uuid_or_disk_id
node.driver_internal_info = driver_internal_info
node.save()
return uuid_dict_returned
def _get_boot_mode(node):
"""Gets the boot mode.
:param node: A single Node.
:returns: A string representing the boot mode type. Defaults to 'bios'.
"""
boot_mode = deploy_utils.get_boot_mode_for_deploy(node)
if boot_mode:
return boot_mode
return "bios"
def validate(task):
"""Validates the pre-requisites for iSCSI deploy.
Validates whether node in the task provided has some ports enrolled.
This method validates whether conductor url is available either from CONF
file or from keystone.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if the URL of the Ironic API service is not
configured in config file and is not accessible via Keystone
catalog.
:raises: MissingParameterValue if no ports are enrolled for the given node.
"""
try:
# TODO(lucasagomes): Validate the format of the URL
CONF.conductor.api_url or keystone.get_service_url()
except (exception.KeystoneFailure,
exception.CatalogNotFound,
exception.KeystoneUnauthorized) as e:
raise exception.InvalidParameterValue(_(
"Couldn't get the URL of the Ironic API service from the "
"configuration file or keystone catalog. Keystone error: %s") % e)
# Validate the root device hints
deploy_utils.parse_root_device_hints(task.node)
deploy_utils.parse_instance_info(task.node)
class ISCSIDeploy(base.DeployInterface):
"""iSCSI Deploy Interface for deploy-related actions."""
def get_properties(self):
return {}
def validate(self, task):
"""Validate the deployment information for the task's node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue.
:raises: MissingParameterValue
"""
task.driver.boot.validate(task)
node = task.node
# Check the boot_mode, boot_option and disk_label capabilities values.
deploy_utils.validate_capabilities(node)
# TODO(rameshg87): iscsi_ilo driver uses this method. Remove
# and copy-paste it's contents here once iscsi_ilo deploy driver
# broken down into separate boot and deploy implementations.
validate(task)
@task_manager.require_exclusive_lock
def deploy(self, task):
"""Start deployment of the task's node.
Fetches instance image, updates the DHCP port options for next boot,
and issues a reboot request to the power driver.
This causes the node to boot into the deployment ramdisk and triggers
the next phase of PXE-based deployment via agent heartbeats.
:param task: a TaskManager instance containing the node to act on.
:returns: deploy state DEPLOYWAIT.
"""
node = task.node
cache_instance_image(task.context, node)
check_image_size(task)
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
@task_manager.require_exclusive_lock
def tear_down(self, task):
"""Tear down a previous deployment on the task's node.
Power off the node. All actual clean-up is done in the clean_up()
method which should be called separately.
:param task: a TaskManager instance containing the node to act on.
:returns: deploy state DELETED.
:raises: NetworkError if the cleaning ports cannot be removed.
:raises: InvalidParameterValue when the wrong state is specified
or the wrong driver info is specified.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
"""
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.unconfigure_tenant_networks(task)
return states.DELETED
@task_manager.require_exclusive_lock
def prepare(self, task):
"""Prepare the deployment environment for this task's node.
Generates the TFTP configuration for PXE-booting both the deployment
and user images, fetches the TFTP image from Glance and add it to the
local cache.
:param task: a TaskManager instance containing the node to act on.
:raises: NetworkError: if the previous cleaning ports cannot be removed
or if new cleaning ports cannot be created.
:raises: InvalidParameterValue when the wrong power state is specified
or the wrong driver info is specified for power management.
:raises: other exceptions by the node's power driver if something
wrong occurred during the power action.
:raises: any boot interface's prepare_ramdisk exceptions.
"""
node = task.node
if node.provision_state == states.ACTIVE:
task.driver.boot.prepare_instance(task)
else:
if node.provision_state == states.DEPLOYING:
# Adding the node to provisioning network so that the dhcp
# options get added for the provisioning port.
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.network.add_provisioning_network(task)
deploy_opts = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, deploy_opts)
def clean_up(self, task):
"""Clean up the deployment environment for the task's node.
Unlinks TFTP and instance images and triggers image cache cleanup.
Removes the TFTP configuration files for this node.
:param task: a TaskManager instance containing the node to act on.
"""
destroy_images(task.node.uuid)
task.driver.boot.clean_up_ramdisk(task)
task.driver.boot.clean_up_instance(task)
provider = dhcp_factory.DHCPFactory()
provider.clean_dhcp(task)
def take_over(self, task):
pass
def get_clean_steps(self, task):
"""Get the list of clean steps from the agent.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the clean steps are not yet
available (cached), for example, when a node has just been
enrolled and has not been cleaned yet.
:returns: A list of clean step dictionaries.
"""
steps = deploy_utils.agent_get_clean_steps(
task, interface='deploy',
override_priorities={
'erase_devices': CONF.deploy.erase_devices_priority})
return steps
def execute_clean_step(self, task, step):
"""Execute a clean step asynchronously on the agent.
:param task: a TaskManager object containing the node
:param step: a clean step dictionary to execute
:raises: NodeCleaningFailure if the agent does not return a command
status
:returns: states.CLEANWAIT to signify the step will be completed
asynchronously.
"""
return deploy_utils.agent_execute_clean_step(task, step)
def prepare_cleaning(self, task):
"""Boot into the agent to prepare for cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the previous cleaning ports cannot
be removed or if new cleaning ports cannot be created
:returns: states.CLEANWAIT to signify an asynchronous prepare.
"""
return deploy_utils.prepare_inband_cleaning(
task, manage_boot=True)
def tear_down_cleaning(self, task):
"""Clean up the PXE and DHCP files after cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the cleaning ports cannot be
removed
"""
deploy_utils.tear_down_inband_cleaning(
task, manage_boot=True)
class VendorPassthru(agent_base_vendor.BaseAgentVendor):
"""Interface to mix IPMI and PXE vendor-specific interfaces."""
@task_manager.require_exclusive_lock
def continue_deploy(self, task, **kwargs):
"""Method invoked when deployed using iSCSI.
This method is invoked during a heartbeat from an agent when
the node is in wait-call-back state. This deploys the image on
the node and then configures the node to boot according to the
desired boot option (netboot or localboot).
:param task: a TaskManager object containing the node.
:param kwargs: the kwargs passed from the heartbeat method.
:raises: InstanceDeployFailure, if it encounters some error during
the deploy.
"""
task.process_event('resume')
node = task.node
LOG.debug('Continuing the deployment on node %s', node.uuid)
uuid_dict_returned = do_agent_iscsi_deploy(task, self._client)
root_uuid = uuid_dict_returned.get('root uuid')
efi_sys_uuid = uuid_dict_returned.get('efi system partition uuid')
self.prepare_instance_to_boot(task, root_uuid, efi_sys_uuid)
self.reboot_and_finish_deploy(task)
| apache-2.0 | -4,818,179,978,091,737,000 | 38.430508 | 79 | 0.649716 | false | 4.15058 | true | false | false |
SunDwarf/taciturn | taciturn/processing.py | 1 | 1207 | import logging
from celery import group
from app import app, celery, client
from taciturn import registry
logger = logging.getLogger("taciturn")
from celery.utils.log import get_task_logger
task_logger = get_task_logger(__name__)
# Synchronous tasks.
def verify(request):
"""
Verifies a request against the config.
"""
if request is None:
return False
if len(request) < 1:
return False
if not isinstance(request[0], str):
return False
if request[0] != client.api_key:
logger.error("Failed to validate API key! Key provided was: {}".format(request[0]))
return False
return True
# Async stuff
@celery.task
def process(data, ptype):
task_logger.info("Entered processor, type {}".format(ptype))
if data[2]["username"] == app.config["API_USERNAME"]:
return
if ptype == 0:
g = group(func.s(data) for func in registry.topic_created_registry.values())
elif ptype == 1:
g = group(func.s(data) for func in registry.post_created_registry.values())
else:
task_logger.error("Unable to handle event of type {}".format(ptype))
return
# Call the group.
g.apply_async()
| mit | -6,951,728,969,206,534,000 | 22.666667 | 91 | 0.645402 | false | 3.81962 | false | false | false |
pombredanne/grr | lib/timeseries_test.py | 1 | 3727 | #!/usr/bin/env python
"""Tests for grr.lib.timeseries."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib import timeseries
class TimeseriesTest(test_lib.GRRBaseTest):
def makeSeries(self):
s = timeseries.Timeseries()
for i in range(1, 101):
s.Append(i, (i+5) * 10000)
return s
def testAppendFilterRange(self):
s = self.makeSeries()
self.assertEqual(100, len(s.data))
self.assertEqual([1, 60000], s.data[0])
self.assertEqual([100, 1050000], s.data[-1])
s.FilterRange(100000, 200000)
self.assertEqual(10, len(s.data))
self.assertEqual([5, 100000], s.data[0])
self.assertEqual([14, 190000], s.data[-1])
def testNormalize(self):
s = self.makeSeries()
s.Normalize(10 * 10000, 100000, 600000)
self.assertEqual(5, len(s.data))
self.assertEqual([9.5, 100000], s.data[0])
self.assertEqual([49.5, 500000], s.data[-1])
s = timeseries.Timeseries()
for i in range(0, 1000):
s.Append(0.5, i * 10)
s.Normalize(200, 5000, 10000)
self.assertEqual(25, len(s.data))
self.assertListEqual(s.data[0], [0.5, 5000])
self.assertListEqual(s.data[24], [0.5, 9800])
s = timeseries.Timeseries()
for i in range(0, 1000):
s.Append(i, i * 10)
s.Normalize(200, 5000, 10000, mode=timeseries.NORMALIZE_MODE_COUNTER)
self.assertEqual(25, len(s.data))
self.assertListEqual(s.data[0], [519, 5000])
self.assertListEqual(s.data[24], [999, 9800])
def testToDeltas(self):
s = self.makeSeries()
self.assertEqual(100, len(s.data))
s.ToDeltas()
self.assertEqual(99, len(s.data))
self.assertEqual([1, 60000], s.data[0])
self.assertEqual([1, 1040000], s.data[-1])
s = timeseries.Timeseries()
for i in range(0, 1000):
s.Append(i, i * 1e6)
s.Normalize(20 * 1e6,
500 * 1e6,
1000 * 1e6,
mode=timeseries.NORMALIZE_MODE_COUNTER)
self.assertEqual(25, len(s.data))
self.assertListEqual(s.data[0], [519, int(500 * 1e6)])
s.ToDeltas()
self.assertEqual(24, len(s.data))
self.assertListEqual(s.data[0], [20, int(500 * 1e6)])
self.assertListEqual(s.data[23], [20, int(960 * 1e6)])
def testNormalizeFillsGapsWithNone(self):
s = timeseries.Timeseries()
for i in range(21, 51):
s.Append(i, (i+5) * 10000)
for i in range(81, 101):
s.Append(i, (i+5) * 10000)
s.Normalize(10 * 10000, 10 * 10000, 120 * 10000)
self.assertEqual(11, len(s.data))
self.assertEqual([None, 100000], s.data[0])
self.assertEqual([22.5, 200000], s.data[1])
self.assertEqual([None, 600000], s.data[5])
self.assertEqual([None, 1100000], s.data[-1])
def testMakeIncreasing(self):
s = timeseries.Timeseries()
for i in range(0, 5):
s.Append(i, i * 1000)
for i in range(0, 5):
s.Append(i, (i+6) * 1000)
self.assertEqual(10, len(s.data))
self.assertEqual([4, 10000], s.data[-1])
s.MakeIncreasing()
self.assertEqual(10, len(s.data))
self.assertEqual([8, 10000], s.data[-1])
def testAddRescale(self):
s1 = timeseries.Timeseries()
for i in range(0, 5):
s1.Append(i, i * 1000)
s2 = timeseries.Timeseries()
for i in range(0, 5):
s2.Append(2*i, i * 1000)
s1.Add(s2)
for i in range(0, 5):
self.assertEqual(3 * i, s1.data[i][0])
s1.Rescale(1/3.0)
for i in range(0, 5):
self.assertEqual(i, s1.data[i][0])
def testMean(self):
s = timeseries.Timeseries()
self.assertEqual(None, s.Mean())
s = self.makeSeries()
self.assertEqual(100, len(s.data))
self.assertEqual(50, s.Mean())
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 | -4,190,545,593,282,263,600 | 28.117188 | 73 | 0.61524 | false | 2.918559 | true | false | false |
rafaelmartins/distpatch | distpatch/helpers.py | 1 | 2633 | # -*- coding: utf-8 -*-
"""
distpatch.helpers
~~~~~~~~~~~~~~~~~
Helper functions for distpatch.
:copyright: (c) 2011 by Rafael Goncalves Martins
:license: GPL-2, see LICENSE for more details.
"""
import atexit
import os
import shutil
import tempfile
from subprocess import call
def tempdir(*args, **kwargs):
def cleanup(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
dirname = tempfile.mkdtemp(*args, **kwargs)
atexit.register(cleanup, dirname)
return dirname
def uncompressed_filename_and_compressor(tarball):
'''returns the filename of the given tarball uncompressed and the compressor.
'''
compressors = {
'.gz': ('gzip', ''),
'.bz2': ('bzip2', ''),
'.xz': ('xz', ''),
'.lzma': ('lzma', ''),
'.tgz': ('gzip', '.tar'),
'.tbz2': ('bzip2', '.tar'),
}
dest, ext = os.path.splitext(tarball)
compressor = compressors.get(ext.lower(), None)
if compressor is None:
return tarball, None
return dest + compressor[1], compressor[0]
def uncompress(fname, output_dir=None):
# extract to a temporary directory and move back, to keep both files:
# compressed and uncompressed.
base_src = os.path.basename(fname)
base_dest, compressor = uncompressed_filename_and_compressor(base_src)
tmp_dir = tempdir()
tmp_src = os.path.join(tmp_dir, base_src)
tmp_dest = os.path.join(tmp_dir, base_dest)
local_dir = os.path.dirname(os.path.abspath(fname))
local_src = os.path.join(local_dir, base_src)
if output_dir is None:
local_dest = os.path.join(local_dir, base_dest)
else:
local_dest = os.path.join(output_dir, base_dest)
shutil.copy2(local_src, tmp_src)
if compressor is not None:
rv = call([compressor, '-fd', tmp_src])
if rv is not os.EX_OK:
raise RuntimeError('Failed to decompress file: %d' % rv)
if not os.path.exists(tmp_dest):
raise RuntimeError('Decompressed file not found: %s' % tmp_dest)
shutil.move(tmp_dest, local_dest)
# we do automatic cleanup, but we should remove it here to save disk space
shutil.rmtree(tmp_dir)
return local_dest
def format_size(size):
KB = 1024
MB = KB * 1024
GB = MB * 1024
TB = GB * 1024
size = float(size)
if size > TB:
return '%.3f TB' % (size / TB)
elif size > GB:
return '%.3f GB' % (size / GB)
elif size > MB:
return '%.3f MB' % (size / MB)
elif size > KB:
return '%.3f KB' % (size / KB)
else:
return '%.0f B' % size
| gpl-2.0 | 7,718,130,299,848,889,000 | 27.010638 | 81 | 0.597797 | false | 3.478203 | false | false | false |
unioslo/cerebrum | contrib/no/Indigo/create_users.py | 1 | 3492 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2006 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Process changelog entries, create user for persons registered
by ABC-import
"""
import sys
import cereconf
from Cerebrum import Errors
from Cerebrum.Utils import Factory
from Cerebrum.modules import CLHandler
def build_account(person_id):
person.clear()
try:
person.find(person_id)
except Errors.NotFoundError:
logger.error("Could not find person %s.", person_id)
return None
person_aff = person.get_affiliations()
acc_id = account.list_accounts_by_owner_id(person_id)
if acc_id == []:
unames = account.suggest_unames(person)
if unames[0] is None:
logger.error('Could not generate user name for %s.', person_id)
return None
account.clear()
account.populate(unames[0], constants.entity_person, person_id,
None, default_creator_id, default_expire_date)
account.write_db()
for s in cereconf.BOFHD_NEW_USER_SPREADS:
account.add_spread(constants.Spread(s))
account.write_db()
if person_aff:
for row in person_aff:
account.set_account_type(row['ou_id'], row['affiliation'])
account.write_db()
return account.entity_id
def main():
global db, constants, logger, person, account
global default_creator_id, default_expire_date
db = Factory.get('Database')()
db.cl_init(change_program='auto_create')
acc = Factory.get('Account')(db)
constants = Factory.get('Constants')(db)
clconstants = Factory.get('CLConstants')(db)
cl_handler = CLHandler.CLHandler(db)
logger = Factory.get_logger('cronjob')
person = Factory.get('Person')(db)
account = Factory.get('Account')(db)
acc.find_by_name(cereconf.INITIAL_ACCOUNTNAME)
default_creator_id = acc.entity_id
default_expire_date = None
try:
cl_events = cl_handler.get_events('auto_create',
(clconstants.person_create,))
if cl_events == []:
logger.info("Nothing to do.")
sys.exit(0)
for event in cl_events:
if event['change_type_id'] == clconstants.person_create:
new_acc_id = build_account(event['subject_entity'])
if new_acc_id is None:
logger.error('Could not create an account for %s',
event['subject_entity'])
continue
cl_handler.confirm_event(event)
except TypeError as e:
logger.warn("No such event, %s" % e)
return None
cl_handler.commit_confirmations()
if __name__ == '__main__':
main()
| gpl-2.0 | 8,037,125,126,642,669,000 | 31.036697 | 75 | 0.632302 | false | 3.799782 | false | false | false |
pajlada/tyggbot | pajbot/modules/basic/permaban.py | 2 | 4621 | import logging
from pajbot.managers.adminlog import AdminLogManager
from pajbot.managers.db import DBManager
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.models.user import User
from pajbot.modules import BaseModule
from pajbot.modules import ModuleType
from pajbot.modules.basic import BasicCommandsModule
from pajbot.modules import ModuleSetting
log = logging.getLogger(__name__)
class PermabanModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Permaban"
DESCRIPTION = "Permabans a user (re-bans them if unbanned by a mod)"
CATEGORY = "Moderation"
ENABLED_DEFAULT = True
MODULE_TYPE = ModuleType.TYPE_ALWAYS_ENABLED
PARENT_MODULE = BasicCommandsModule
SETTINGS = [
ModuleSetting(
key="unban_from_chat",
label="Unban the user from chat when the unpermaban command is used",
type="boolean",
required=True,
default=False,
),
ModuleSetting(
key="enable_send_timeout",
label="Timeout the user for one second to note the unban reason in the mod logs",
type="boolean",
required=True,
default=True,
),
ModuleSetting(
key="timeout_reason",
label="Timeout Reason | Available arguments: {source}",
type="text",
required=False,
placeholder="",
default="Un-permabanned by {source}",
constraints={},
),
]
@staticmethod
def permaban_command(bot, source, message, **rest):
if not message:
return
username = message.split(" ")[0]
with DBManager.create_session_scope() as db_session:
user = User.find_by_user_input(db_session, username)
if not user:
bot.whisper(source, "No user with that name found.")
return False
if user.banned:
bot.whisper(source, "User is already permabanned.")
return False
user.banned = True
bot.ban(
user,
reason=f"User has been added to the {bot.nickname} banlist. Contact a moderator level 1000 or higher for unban.",
)
log_msg = f"{user} has been permabanned"
bot.whisper(source, log_msg)
AdminLogManager.add_entry("Permaban added", source, log_msg)
def unpermaban_command(self, bot, source, message, **rest):
if not message:
return
username = message.split(" ")[0]
with DBManager.create_session_scope() as db_session:
user = User.find_by_user_input(db_session, username)
if not user:
bot.whisper(source, "No user with that name found.")
return False
if user.banned is False:
bot.whisper(source, "User is not permabanned.")
return False
user.banned = False
log_msg = f"{user} is no longer permabanned"
bot.whisper(source, log_msg)
AdminLogManager.add_entry("Permaban remove", source, log_msg)
if self.settings["unban_from_chat"] is True:
bot.unban(user)
if self.settings["enable_send_timeout"] is True:
bot.timeout(user, 1, self.settings["timeout_reason"].format(source=source), once=True)
def load_commands(self, **options):
self.commands["permaban"] = Command.raw_command(
self.permaban_command,
level=1000,
description="Permanently ban a user. Every time the user types in chat, he will be permanently banned again",
examples=[
CommandExample(
None,
"Default usage",
chat="user:!permaban Karl_Kons\n" "bot>user:Karl_Kons has now been permabanned",
description="Permanently ban Karl_Kons from the chat",
).parse()
],
)
self.commands["unpermaban"] = Command.raw_command(
self.unpermaban_command,
level=1000,
description="Remove a permanent ban from a user",
examples=[
CommandExample(
None,
"Default usage",
chat="user:!unpermaban Karl_Kons\n" "bot>user:Karl_Kons is no longer permabanned",
description="Remove permanent ban from Karl_Kons",
).parse()
],
)
| mit | -5,795,660,723,985,454,000 | 34.274809 | 129 | 0.567842 | false | 4.208561 | false | false | false |
Freeseer/freeseer | src/freeseer/frontend/configtool/GeneralWidget.py | 1 | 4557 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
freeseer - vga/presentation capture software
Copyright (C) 2011 Free and Open Source Software Learning Centre
http://fosslc.org
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For support, questions, suggestions or any other inquiries, visit:
http://wiki.github.com/Freeseer/freeseer/
@author: Thanh Ha
'''
from PyQt4 import QtCore, QtGui
from freeseer.frontend.qtcommon.dpi_adapt_qtgui import QGroupBoxWithDpi
from freeseer.frontend.qtcommon.dpi_adapt_qtgui import QWidgetWithDpi
class GeneralWidget(QWidgetWithDpi):
'''
classdocs
'''
def __init__(self, parent=None):
'''
Constructor
'''
super(GeneralWidget, self).__init__(parent)
self.mainLayout = QtGui.QVBoxLayout()
self.mainLayout.addStretch(0)
self.setLayout(self.mainLayout)
fontSize = self.font().pixelSize()
fontUnit = "px"
if fontSize == -1: # Font is set as points, not pixels.
fontUnit = "pt"
fontSize = self.font().pointSize()
boxStyle = "QGroupBox {{ font-weight: bold; font-size: {}{} }}".format(fontSize + 1, fontUnit)
BOX_WIDTH = 400
BOX_HEIGHT = 60
#
# Heading
#
self.title = QtGui.QLabel(u"{0} General {1}".format(u'<h1>', u'</h1>'))
self.mainLayout.insertWidget(0, self.title)
self.mainLayout.insertSpacerItem(1, QtGui.QSpacerItem(0, fontSize * 2))
#
# Language
#
languageBoxLayout = QtGui.QVBoxLayout()
self.languageGroupBox = QGroupBoxWithDpi("Language")
self.languageGroupBox.setLayout(languageBoxLayout)
self.languageGroupBox.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
self.languageGroupBox.setFixedSize(BOX_WIDTH, BOX_HEIGHT)
self.languageGroupBox.setStyleSheet(boxStyle)
self.mainLayout.insertWidget(2, self.languageGroupBox)
languageLayout = QtGui.QHBoxLayout()
languageBoxLayout.addLayout(languageLayout)
self.translateButton = QtGui.QPushButton("Help us translate")
self.languageComboBox = QtGui.QComboBox()
self.languageComboBox.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
languageLayout.addWidget(self.languageComboBox, 2)
languageLayout.addSpacerItem(self.qspacer_item_with_dpi(40, 0))
languageLayout.addWidget(self.translateButton, 1)
#
# Appearance
#
appearanceBoxLayout = QtGui.QVBoxLayout()
self.appearanceGroupBox = QGroupBoxWithDpi("Appearance")
self.appearanceGroupBox.setLayout(appearanceBoxLayout)
self.appearanceGroupBox.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
self.appearanceGroupBox.setFixedSize(BOX_WIDTH, BOX_HEIGHT)
self.appearanceGroupBox.setStyleSheet(boxStyle)
self.mainLayout.insertWidget(3, self.appearanceGroupBox)
self.autoHideCheckBox = QtGui.QCheckBox("Auto-Hide to system tray on record")
appearanceBoxLayout.addWidget(self.autoHideCheckBox)
#
# Reset
#
resetBoxLayout = QtGui.QVBoxLayout()
self.resetGroupBox = QGroupBoxWithDpi("Reset")
self.resetGroupBox.setLayout(resetBoxLayout)
self.resetGroupBox.setSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
self.resetGroupBox.setFixedSize(BOX_WIDTH / 2, BOX_HEIGHT)
self.resetGroupBox.setStyleSheet(boxStyle)
self.mainLayout.addWidget(self.resetGroupBox)
self.mainLayout.addSpacerItem(self.qspacer_item_with_dpi(0, 20))
resetLayout = QtGui.QHBoxLayout()
resetBoxLayout.addLayout(resetLayout)
self.resetButton = QtGui.QPushButton("Reset settings to defaults")
resetLayout.addWidget(self.resetButton)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
main = GeneralWidget()
main.show()
sys.exit(app.exec_())
| gpl-3.0 | -8,458,313,585,973,517,000 | 34.88189 | 102 | 0.692122 | false | 3.868421 | false | false | false |
plq/spyne | examples/queue.py | 2 | 7848 | #!/usr/bin/env python
# encoding: utf8
#
# Copyright © Burak Arslan <burak at arskom dot com dot tr>,
# Arskom Ltd. http://www.arskom.com.tr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""This is a simple db-backed persistent task queue implementation.
The producer (client) writes requests to a database table. The consumer (server)
polls the database every 10 seconds and processes new requests.
"""
import time
import logging
from sqlalchemy import MetaData
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from spyne import MethodContext, Application, rpc, TTableModel, Integer32, \
UnsignedInteger, ByteArray, Mandatory as M
# client stuff
from spyne import RemoteService, RemoteProcedureBase, ClientBase
# server stuff
from spyne import ServerBase, Service
from spyne.protocol.soap import Soap11
db = create_engine('sqlite:///:memory:')
TableModel = TTableModel(MetaData(bind=db))
#
# The database tables used to store tasks and worker status
#
class TaskQueue(TableModel):
__tablename__ = 'task_queue'
id = Integer32(primary_key=True)
data = ByteArray(nullable=False)
class WorkerStatus(TableModel):
__tablename__ = 'worker_status'
worker_id = Integer32(pk=True, autoincrement=False)
task = TaskQueue.customize(store_as='table')
#
# The consumer (server) implementation
#
class Consumer(ServerBase):
transport = 'http://sqlalchemy.persistent.queue/'
def __init__(self, db, app, consumer_id):
super(Consumer, self).__init__(app)
self.session = sessionmaker(bind=db)()
self.id = consumer_id
if self.session.query(WorkerStatus).get(self.id) is None:
self.session.add(WorkerStatus(worker_id=self.id))
self.session.commit()
def serve_forever(self):
while True:
# get the id of the last processed job
last = self.session.query(WorkerStatus).with_lockmode("update") \
.filter_by(worker_id=self.id).one()
# get new tasks
task_id = 0
if last.task is not None:
task_id = last.task.id
task_queue = self.session.query(TaskQueue) \
.filter(TaskQueue.id > task_id) \
.order_by(TaskQueue.id)
for task in task_queue:
initial_ctx = MethodContext(self)
# this is the critical bit, where the request bytestream is put
# in the context so that the protocol can deserialize it.
initial_ctx.in_string = [task.data]
# these two lines are purely for logging
initial_ctx.transport.consumer_id = self.id
initial_ctx.transport.task_id = task.id
# The ``generate_contexts`` call parses the incoming stream and
# splits the request into header and body parts.
# There will be only one context here because no auxiliary
# methods are defined.
for ctx in self.generate_contexts(initial_ctx, 'utf8'):
# This is standard boilerplate for invoking services.
self.get_in_object(ctx)
if ctx.in_error:
self.get_out_string(ctx)
logging.error(''.join(ctx.out_string))
continue
self.get_out_object(ctx)
if ctx.out_error:
self.get_out_string(ctx)
logging.error(''.join(ctx.out_string))
continue
self.get_out_string(ctx)
logging.debug(''.join(ctx.out_string))
last.task_id = task.id
self.session.commit()
time.sleep(10)
#
# The producer (client) implementation
#
class RemoteProcedure(RemoteProcedureBase):
def __init__(self, db, app, name, out_header):
super(RemoteProcedure, self).__init__(db, app, name, out_header)
self.Session = sessionmaker(bind=db)
def __call__(self, *args, **kwargs):
session = self.Session()
for ctx in self.contexts:
self.get_out_object(ctx, args, kwargs)
self.get_out_string(ctx)
out_string = ''.join(ctx.out_string)
print(out_string)
session.add(TaskQueue(data=out_string))
session.commit()
session.close()
class Producer(ClientBase):
def __init__(self, db, app):
super(Producer, self).__init__(db, app)
self.service = RemoteService(RemoteProcedure, db, app)
#
# The service to call.
#
class AsyncService(Service):
@rpc(M(UnsignedInteger))
def sleep(ctx, integer):
print("Sleeping for %d seconds..." % (integer))
time.sleep(integer)
def _on_method_call(ctx):
print("This is worker id %d, processing task id %d." % (
ctx.transport.consumer_id, ctx.transport.task_id))
AsyncService.event_manager.add_listener('method_call', _on_method_call)
if __name__ == '__main__':
# set up logging
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('sqlalchemy.engine.base.Engine').setLevel(logging.DEBUG)
# Setup colorama and termcolor, if they are there
try:
from termcolor import colored
from colorama import init
init()
except ImportError, e:
logging.error("Install 'termcolor' and 'colorama' packages to get "
"colored log output")
def colored(s, *args, **kwargs):
return s
logging.info(colored("Creating database tables...", 'yellow', attrs=['bold']))
TableModel.Attributes.sqla_metadata.create_all()
logging.info(colored("Creating Application...", 'blue'))
application = Application([AsyncService], 'spyne.async',
in_protocol=Soap11(), out_protocol=Soap11())
logging.info(colored("Making requests...", 'yellow', attrs=['bold']))
producer = Producer(db, application)
for i in range(10):
producer.service.sleep(i)
logging.info(colored("Spawning consumer...", 'blue'))
# process requests. it'd make most sense if this was in another process.
consumer = Consumer(db, application, consumer_id=1)
consumer.serve_forever()
| lgpl-2.1 | -899,054,871,729,292,200 | 32.969697 | 82 | 0.636804 | false | 4.162865 | false | false | false |
mapzen/vector-datasource | vectordatasource/transform.py | 1 | 293983 | # -*- encoding: utf-8 -*-
# transformation functions to apply to features
from collections import defaultdict, namedtuple
from math import ceil
from numbers import Number
from shapely.geometry.collection import GeometryCollection
from shapely.geometry import box as Box
from shapely.geometry import LinearRing
from shapely.geometry import LineString
from shapely.geometry import Point
from shapely.geometry import Polygon
from shapely.geometry.multilinestring import MultiLineString
from shapely.geometry.multipoint import MultiPoint
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.polygon import orient
from shapely.ops import linemerge
from shapely.strtree import STRtree
from sort import pois as sort_pois
from StreetNames import short_street_name
from sys import float_info
from tilequeue.process import _make_valid_if_necessary
from tilequeue.process import _visible_shape
from tilequeue.tile import calc_meters_per_pixel_area
from tilequeue.tile import normalize_geometry_type
from tilequeue.tile import tolerance_for_zoom
from tilequeue.transform import calculate_padded_bounds
from util import to_float
from util import safe_int
from zope.dottedname.resolve import resolve
import csv
import pycountry
import re
import shapely.errors
import shapely.wkb
import shapely.ops
import kdtree
feet_pattern = re.compile('([+-]?[0-9.]+)\'(?: *([+-]?[0-9.]+)")?')
number_pattern = re.compile('([+-]?[0-9.]+)')
# pattern to detect numbers with units.
# PLEASE: keep this in sync with the conversion factors below.
unit_pattern = re.compile('([+-]?[0-9.]+) *(mi|km|m|nmi|ft)')
# multiplicative conversion factor from the unit into meters.
# PLEASE: keep this in sync with the unit_pattern above.
unit_conversion_factor = {
'mi': 1609.3440,
'km': 1000.0000,
'm': 1.0000,
'nmi': 1852.0000,
'ft': 0.3048
}
# used to detect if the "name" of a building is
# actually a house number.
digits_pattern = re.compile('^[0-9-]+$')
# used to detect station names which are followed by a
# parenthetical list of line names.
station_pattern = re.compile(r'([^(]*)\(([^)]*)\).*')
# used to detect if an airport's IATA code is the "short"
# 3-character type. there are also longer codes, and ones
# which include numbers, but those seem to be used for
# less important airports.
iata_short_code_pattern = re.compile('^[A-Z]{3}$')
def _to_float_meters(x):
if x is None:
return None
as_float = to_float(x)
if as_float is not None:
return as_float
# trim whitespace to simplify further matching
x = x.strip()
# try looking for a unit
unit_match = unit_pattern.match(x)
if unit_match is not None:
value = unit_match.group(1)
units = unit_match.group(2)
value_as_float = to_float(value)
if value_as_float is not None:
return value_as_float * unit_conversion_factor[units]
# try if it looks like an expression in feet via ' "
feet_match = feet_pattern.match(x)
if feet_match is not None:
feet = feet_match.group(1)
inches = feet_match.group(2)
feet_as_float = to_float(feet)
inches_as_float = to_float(inches)
total_inches = 0.0
parsed_feet_or_inches = False
if feet_as_float is not None:
total_inches = feet_as_float * 12.0
parsed_feet_or_inches = True
if inches_as_float is not None:
total_inches += inches_as_float
parsed_feet_or_inches = True
if parsed_feet_or_inches:
# international inch is exactly 25.4mm
meters = total_inches * 0.0254
return meters
# try and match the first number that can be parsed
for number_match in number_pattern.finditer(x):
potential_number = number_match.group(1)
as_float = to_float(potential_number)
if as_float is not None:
return as_float
return None
def _to_int_degrees(x):
if x is None:
return None
as_int = safe_int(x)
if as_int is not None:
# always return within range of 0 to 360
return as_int % 360
# trim whitespace to simplify further matching
x = x.strip()
cardinals = {
'north': 0, 'N': 0, 'NNE': 22, 'NE': 45, 'ENE': 67,
'east': 90, 'E': 90, 'ESE': 112, 'SE': 135, 'SSE': 157,
'south': 180, 'S': 180, 'SSW': 202, 'SW': 225, 'WSW': 247,
'west': 270, 'W': 270, 'WNW': 292, 'NW': 315, 'NNW': 337
}
# protect against bad cardinal notations
return cardinals.get(x)
def _coalesce(properties, *property_names):
for prop in property_names:
val = properties.get(prop)
if val:
return val
return None
def _remove_properties(properties, *property_names):
for prop in property_names:
properties.pop(prop, None)
return properties
def _is_name(key):
"""
Return True if this key looks like a name.
This isn't as simple as testing if key == 'name', as there are alternative
name-like tags such as 'official_name', translated names such as 'name:en',
and left/right names for boundaries. This function aims to match all of
those variants.
"""
# simplest and most common case first
if key == 'name':
return True
# translations next
if key.startswith('name:'):
return True
# then any of the alternative forms of name
return any(key.startswith(p) for p in tag_name_alternates)
def _remove_names(props):
"""
Remove entries in the props dict for which the key looks like a name.
Modifies the props dict in-place and also returns it.
"""
for k in props.keys():
if _is_name(k):
props.pop(k)
return props
def _has_name(props):
"""
Return true if any of the props look like a name.
"""
for k in props.keys():
if _is_name(k):
return True
return False
def _building_calc_levels(levels):
levels = max(levels, 1)
levels = (levels * 3) + 2
return levels
def _building_calc_min_levels(min_levels):
min_levels = max(min_levels, 0)
min_levels = min_levels * 3
return min_levels
# slightly bigger than the tallest structure in the world. at the time
# of writing, the Burj Khalifa at 829.8m. this is used as a check to make
# sure that nonsense values (e.g: buildings a million meters tall) don't
# make it into the data.
TALLEST_STRUCTURE_METERS = 1000.0
def _building_calc_height(height_val, levels_val, levels_calc_fn):
height = _to_float_meters(height_val)
if height is not None and 0 <= height <= TALLEST_STRUCTURE_METERS:
return height
levels = _to_float_meters(levels_val)
if levels is None:
return None
levels = levels_calc_fn(levels)
if 0 <= levels <= TALLEST_STRUCTURE_METERS:
return levels
return None
def add_id_to_properties(shape, properties, fid, zoom):
properties['id'] = fid
return shape, properties, fid
def detect_osm_relation(shape, properties, fid, zoom):
# Assume all negative ids indicate the data was a relation. At the
# moment, this is true because only osm contains negative
# identifiers. Should this change, this logic would need to become
# more robust
if isinstance(fid, Number) and fid < 0:
properties['osm_relation'] = True
return shape, properties, fid
def remove_feature_id(shape, properties, fid, zoom):
return shape, properties, None
def building_height(shape, properties, fid, zoom):
height = _building_calc_height(
properties.get('height'), properties.get('building_levels'),
_building_calc_levels)
if height is not None:
properties['height'] = height
else:
properties.pop('height', None)
return shape, properties, fid
def building_min_height(shape, properties, fid, zoom):
min_height = _building_calc_height(
properties.get('min_height'), properties.get('building_min_levels'),
_building_calc_min_levels)
if min_height is not None:
properties['min_height'] = min_height
else:
properties.pop('min_height', None)
return shape, properties, fid
def synthesize_volume(shape, props, fid, zoom):
area = props.get('area')
height = props.get('height')
if area is not None and height is not None:
props['volume'] = int(area * height)
return shape, props, fid
def building_trim_properties(shape, properties, fid, zoom):
properties = _remove_properties(
properties,
'building', 'building_part',
'building_levels', 'building_min_levels')
return shape, properties, fid
def road_classifier(shape, properties, fid, zoom):
source = properties.get('source')
assert source, 'Missing source in road query'
if source == 'naturalearthdata.com':
return shape, properties, fid
properties.pop('is_link', None)
properties.pop('is_tunnel', None)
properties.pop('is_bridge', None)
kind_detail = properties.get('kind_detail', '')
tunnel = properties.get('tunnel', '')
bridge = properties.get('bridge', '')
if kind_detail.endswith('_link'):
properties['is_link'] = True
if tunnel in ('yes', 'true'):
properties['is_tunnel'] = True
if bridge and bridge != 'no':
properties['is_bridge'] = True
return shape, properties, fid
def add_road_network_from_ncat(shape, properties, fid, zoom):
"""
Many South Korean roads appear to have an "ncat" tag, which seems to
correspond to the type of road network (perhaps "ncat" = "national
category"?)
This filter carries that through into "network", unless it is already
populated.
"""
if properties.get('network') is None:
tags = properties.get('tags', {})
ncat = _make_unicode_or_none(tags.get('ncat'))
if ncat == u'국도':
# national roads - gukdo
properties['network'] = 'KR:national'
elif ncat == u'광역시도로':
# metropolitan city roads - gwangyeoksido
properties['network'] = 'KR:metropolitan'
elif ncat == u'특별시도':
# special city (Seoul) roads - teukbyeolsido
properties['network'] = 'KR:metropolitan'
elif ncat == u'고속도로':
# expressways - gosokdoro
properties['network'] = 'KR:expressway'
elif ncat == u'지방도':
# local highways - jibangdo
properties['network'] = 'KR:local'
return shape, properties, fid
def road_trim_properties(shape, properties, fid, zoom):
properties = _remove_properties(properties, 'bridge', 'tunnel')
return shape, properties, fid
def _reverse_line_direction(shape):
if shape.type != 'LineString':
return False
shape.coords = shape.coords[::-1]
return True
def road_oneway(shape, properties, fid, zoom):
oneway = properties.get('oneway')
if oneway in ('-1', 'reverse'):
did_reverse = _reverse_line_direction(shape)
if did_reverse:
properties['oneway'] = 'yes'
elif oneway in ('true', '1'):
properties['oneway'] = 'yes'
elif oneway in ('false', '0'):
properties['oneway'] = 'no'
return shape, properties, fid
def road_abbreviate_name(shape, properties, fid, zoom):
name = properties.get('name', None)
if not name:
return shape, properties, fid
short_name = short_street_name(name)
properties['name'] = short_name
return shape, properties, fid
def route_name(shape, properties, fid, zoom):
rn = properties.get('route_name')
if rn:
name = properties.get('name')
if not name:
properties['name'] = rn
del properties['route_name']
elif rn == name:
del properties['route_name']
return shape, properties, fid
def place_population_int(shape, properties, fid, zoom):
population_str = properties.pop('population', None)
population = to_float(population_str)
if population is not None:
properties['population'] = int(population)
return shape, properties, fid
def population_rank(shape, properties, fid, zoom):
population = properties.get('population')
pop_breaks = [
1000000000,
100000000,
50000000,
20000000,
10000000,
5000000,
1000000,
500000,
200000,
100000,
50000,
20000,
10000,
5000,
2000,
1000,
200,
0,
]
for i, pop_break in enumerate(pop_breaks):
if population >= pop_break:
rank = len(pop_breaks) - i
break
else:
rank = 0
properties['population_rank'] = rank
return (shape, properties, fid)
def pois_capacity_int(shape, properties, fid, zoom):
pois_capacity_str = properties.pop('capacity', None)
capacity = to_float(pois_capacity_str)
if capacity is not None:
properties['capacity'] = int(capacity)
return shape, properties, fid
def pois_direction_int(shape, props, fid, zoom):
direction = props.get('direction')
if not direction:
return shape, props, fid
props['direction'] = _to_int_degrees(direction)
return shape, props, fid
def water_tunnel(shape, properties, fid, zoom):
tunnel = properties.pop('tunnel', None)
if tunnel in (None, 'no', 'false', '0'):
properties.pop('is_tunnel', None)
else:
properties['is_tunnel'] = True
return shape, properties, fid
def admin_level_as_int(shape, properties, fid, zoom):
admin_level_str = properties.pop('admin_level', None)
if admin_level_str is None:
return shape, properties, fid
try:
admin_level_int = int(admin_level_str)
except ValueError:
return shape, properties, fid
properties['admin_level'] = admin_level_int
return shape, properties, fid
def tags_create_dict(shape, properties, fid, zoom):
tags_hstore = properties.get('tags')
if tags_hstore:
tags = dict(tags_hstore)
properties['tags'] = tags
return shape, properties, fid
def tags_remove(shape, properties, fid, zoom):
properties.pop('tags', None)
return shape, properties, fid
tag_name_alternates = (
'int_name',
'loc_name',
'nat_name',
'official_name',
'old_name',
'reg_name',
'short_name',
'name_left',
'name_right',
'name:short',
)
def _alpha_2_code_of(lang):
try:
alpha_2_code = lang.alpha_2.encode('utf-8')
except AttributeError:
return None
return alpha_2_code
# a structure to return language code lookup results preserving the priority
# (lower is better) of the result for use in situations where multiple inputs
# can map to the same output.
LangResult = namedtuple('LangResult', ['code', 'priority'])
def _convert_wof_l10n_name(x):
lang_str_iso_639_3 = x[:3]
if len(lang_str_iso_639_3) != 3:
return None
try:
lang = pycountry.languages.get(alpha_3=lang_str_iso_639_3)
except KeyError:
return None
return LangResult(code=_alpha_2_code_of(lang), priority=0)
def _convert_ne_l10n_name(x):
if len(x) != 2:
return None
try:
lang = pycountry.languages.get(alpha_2=x)
except KeyError:
return None
return LangResult(code=_alpha_2_code_of(lang), priority=0)
def _normalize_osm_lang_code(x):
# first try an alpha-2 code
try:
lang = pycountry.languages.get(alpha_2=x)
except KeyError:
# next, try an alpha-3 code
try:
lang = pycountry.languages.get(alpha_3=x)
except KeyError:
# finally, try a "bibliographic" code
try:
lang = pycountry.languages.get(bibliographic=x)
except KeyError:
return None
return _alpha_2_code_of(lang)
def _normalize_country_code(x):
x = x.upper()
try:
c = pycountry.countries.get(alpha_2=x)
except KeyError:
try:
c = pycountry.countries.get(alpha_3=x)
except KeyError:
try:
c = pycountry.countries.get(numeric=x)
except KeyError:
return None
alpha2_code = c.alpha_2
return alpha2_code
osm_l10n_lookup = set([
'zh-min-nan',
'zh-yue'
])
def _convert_osm_l10n_name(x):
if x in osm_l10n_lookup:
return LangResult(code=x, priority=0)
if '_' not in x:
lang_code_candidate = x
country_candidate = None
else:
fields_by_underscore = x.split('_', 1)
lang_code_candidate, country_candidate = fields_by_underscore
lang_code_result = _normalize_osm_lang_code(lang_code_candidate)
if lang_code_result is None:
return None
priority = 0
if country_candidate:
country_result = _normalize_country_code(country_candidate)
if country_result is None:
result = lang_code_result
priority = 1
else:
result = '%s_%s' % (lang_code_result, country_result)
else:
result = lang_code_result
return LangResult(code=result, priority=priority)
def tags_name_i18n(shape, properties, fid, zoom):
tags = properties.get('tags')
if not tags:
return shape, properties, fid
name = properties.get('name')
if not name:
return shape, properties, fid
source = properties.get('source')
is_wof = source == 'whosonfirst.org'
is_osm = source == 'openstreetmap.org'
is_ne = source == 'naturalearthdata.com'
if is_osm:
alt_name_prefix_candidates = [
'name:left:', 'name:right:', 'name:', 'alt_name:', 'old_name:'
]
convert_fn = _convert_osm_l10n_name
elif is_wof:
alt_name_prefix_candidates = ['name:']
convert_fn = _convert_wof_l10n_name
elif is_ne:
# replace name_xx with name:xx in tags
for k in tags.keys():
if k.startswith('name_'):
value = tags.pop(k)
tag_k = k.replace('_', ':')
tags[tag_k] = value
alt_name_prefix_candidates = ['name:']
convert_fn = _convert_ne_l10n_name
else:
# conversion function only implemented for things which come from OSM,
# NE or WOF - implement more cases here when more localized named
# sources become available.
return shape, properties, fid
langs = {}
for k, v in tags.items():
for candidate in alt_name_prefix_candidates:
if k.startswith(candidate):
lang_code = k[len(candidate):]
normalized_lang_code = convert_fn(lang_code)
if normalized_lang_code:
code = normalized_lang_code.code
priority = normalized_lang_code.priority
lang_key = '%s%s' % (candidate, code)
if lang_key not in langs or \
priority < langs[lang_key][0].priority:
langs[lang_key] = (normalized_lang_code, v)
for lang_key, (lang, v) in langs.items():
properties[lang_key] = v
for alt_tag_name_candidate in tag_name_alternates:
alt_tag_name_value = tags.get(alt_tag_name_candidate)
if alt_tag_name_value and alt_tag_name_value != name:
properties[alt_tag_name_candidate] = alt_tag_name_value
return shape, properties, fid
def _no_none_min(a, b):
"""
Usually, `min(None, a)` will return None. This isn't
what we want, so this one will return a non-None
argument instead. This is basically the same as
treating None as greater than any other value.
"""
if a is None:
return b
elif b is None:
return a
else:
return min(a, b)
def _sorted_attributes(features, attrs, attribute):
"""
When the list of attributes is a dictionary, use the
sort key parameter to order the feature attributes.
evaluate it as a function and return it. If it's not
in the right format, attrs isn't a dict then returns
None.
"""
sort_key = attrs.get('sort_key')
reverse = attrs.get('reverse')
assert sort_key is not None, "Configuration " + \
"parameter 'sort_key' is missing, please " + \
"check your configuration."
# first, we find the _minimum_ ordering over the
# group of key values. this is because we only do
# the intersection in groups by the cutting
# attribute, so can only sort in accordance with
# that.
group = dict()
for feature in features:
val = feature[1].get(sort_key)
key = feature[1].get(attribute)
val = _no_none_min(val, group.get(key))
group[key] = val
# extract the sorted list of attributes from the
# grouped (attribute, order) pairs, ordering by
# the order.
all_attrs = sorted(group.iteritems(),
key=lambda x: x[1], reverse=bool(reverse))
# strip out the sort key in return
return [x[0] for x in all_attrs]
# the table of geometry dimensions indexed by geometry
# type name. it would be better to use geometry type ID,
# but it seems like that isn't exposed.
#
# each of these is a bit-mask, so zero dimentions is
# represented by 1, one by 2, etc... this is to support
# things like geometry collections where the type isn't
# statically known.
_NULL_DIMENSION = 0
_POINT_DIMENSION = 1
_LINE_DIMENSION = 2
_POLYGON_DIMENSION = 4
_GEOMETRY_DIMENSIONS = {
'Point': _POINT_DIMENSION,
'LineString': _LINE_DIMENSION,
'LinearRing': _LINE_DIMENSION,
'Polygon': _POLYGON_DIMENSION,
'MultiPoint': _POINT_DIMENSION,
'MultiLineString': _LINE_DIMENSION,
'MultiPolygon': _POLYGON_DIMENSION,
'GeometryCollection': _NULL_DIMENSION,
}
# returns the dimensionality of the object. so points have
# zero dimensions, lines one, polygons two. multi* variants
# have the same as their singular variant.
#
# geometry collections can hold many different types, so
# we use a bit-mask of the dimensions and recurse down to
# find the actual dimensionality of the stored set.
#
# returns a bit-mask, with these bits ORed together:
# 1: contains a point / zero-dimensional object
# 2: contains a linestring / one-dimensional object
# 4: contains a polygon / two-dimensional object
def _geom_dimensions(g):
dim = _GEOMETRY_DIMENSIONS.get(g.geom_type)
assert dim is not None, "Unknown geometry type " + \
"%s in transform._geom_dimensions." % \
repr(g.geom_type)
# recurse for geometry collections to find the true
# dimensionality of the geometry.
if dim == _NULL_DIMENSION:
for part in g.geoms:
dim = dim | _geom_dimensions(part)
return dim
def _flatten_geoms(shape):
"""
Flatten a shape so that it is returned as a list
of single geometries.
>>> [g.wkt for g in _flatten_geoms(shapely.wkt.loads('GEOMETRYCOLLECTION (MULTIPOINT(-1 -1, 0 0), GEOMETRYCOLLECTION (POINT(1 1), POINT(2 2), GEOMETRYCOLLECTION (POINT(3 3))), LINESTRING(0 0, 1 1))'))]
['POINT (-1 -1)', 'POINT (0 0)', 'POINT (1 1)', 'POINT (2 2)', 'POINT (3 3)', 'LINESTRING (0 0, 1 1)']
>>> _flatten_geoms(Polygon())
[]
>>> _flatten_geoms(MultiPolygon())
[]
""" # noqa
if shape.geom_type.startswith('Multi'):
return shape.geoms
elif shape.is_empty:
return []
elif shape.type == 'GeometryCollection':
geoms = []
for g in shape.geoms:
geoms.extend(_flatten_geoms(g))
return geoms
else:
return [shape]
def _filter_geom_types(shape, keep_dim):
"""
Return a geometry which consists of the geometries in
the input shape filtered so that only those of the
given dimension remain. Collapses any structure (e.g:
of geometry collections) down to a single or multi-
geometry.
>>> _filter_geom_types(GeometryCollection(), _POINT_DIMENSION).wkt
'GEOMETRYCOLLECTION EMPTY'
>>> _filter_geom_types(Point(0,0), _POINT_DIMENSION).wkt
'POINT (0 0)'
>>> _filter_geom_types(Point(0,0), _LINE_DIMENSION).wkt
'GEOMETRYCOLLECTION EMPTY'
>>> _filter_geom_types(Point(0,0), _POLYGON_DIMENSION).wkt
'GEOMETRYCOLLECTION EMPTY'
>>> _filter_geom_types(LineString([(0,0),(1,1)]), _LINE_DIMENSION).wkt
'LINESTRING (0 0, 1 1)'
>>> _filter_geom_types(Polygon([(0,0),(1,1),(1,0),(0,0)],[]), _POLYGON_DIMENSION).wkt
'POLYGON ((0 0, 1 1, 1 0, 0 0))'
>>> _filter_geom_types(shapely.wkt.loads('GEOMETRYCOLLECTION (POINT(0 0), LINESTRING(0 0, 1 1))'), _POINT_DIMENSION).wkt
'POINT (0 0)'
>>> _filter_geom_types(shapely.wkt.loads('GEOMETRYCOLLECTION (POINT(0 0), LINESTRING(0 0, 1 1))'), _LINE_DIMENSION).wkt
'LINESTRING (0 0, 1 1)'
>>> _filter_geom_types(shapely.wkt.loads('GEOMETRYCOLLECTION (POINT(0 0), LINESTRING(0 0, 1 1))'), _POLYGON_DIMENSION).wkt
'GEOMETRYCOLLECTION EMPTY'
>>> _filter_geom_types(shapely.wkt.loads('GEOMETRYCOLLECTION (POINT(0 0), GEOMETRYCOLLECTION (POINT(1 1), LINESTRING(0 0, 1 1)))'), _POINT_DIMENSION).wkt
'MULTIPOINT (0 0, 1 1)'
>>> _filter_geom_types(shapely.wkt.loads('GEOMETRYCOLLECTION (MULTIPOINT(-1 -1, 0 0), GEOMETRYCOLLECTION (POINT(1 1), POINT(2 2), GEOMETRYCOLLECTION (POINT(3 3))), LINESTRING(0 0, 1 1))'), _POINT_DIMENSION).wkt
'MULTIPOINT (-1 -1, 0 0, 1 1, 2 2, 3 3)'
>>> _filter_geom_types(shapely.wkt.loads('GEOMETRYCOLLECTION (LINESTRING(-1 -1, 0 0), GEOMETRYCOLLECTION (LINESTRING(1 1, 2 2), GEOMETRYCOLLECTION (POINT(3 3))), LINESTRING(0 0, 1 1))'), _LINE_DIMENSION).wkt
'MULTILINESTRING ((-1 -1, 0 0), (1 1, 2 2), (0 0, 1 1))'
>>> _filter_geom_types(shapely.wkt.loads('GEOMETRYCOLLECTION (POLYGON((-2 -2, -2 2, 2 2, 2 -2, -2 -2)), GEOMETRYCOLLECTION (LINESTRING(1 1, 2 2), GEOMETRYCOLLECTION (POLYGON((3 3, 0 0, 1 0, 3 3)))), LINESTRING(0 0, 1 1))'), _POLYGON_DIMENSION).wkt
'MULTIPOLYGON (((-2 -2, -2 2, 2 2, 2 -2, -2 -2)), ((3 3, 0 0, 1 0, 3 3)))'
""" # noqa
# flatten the geometries, and keep the parts with the
# dimension that we want. each item in the parts list
# should be a single (non-multi) geometry.
parts = []
for g in _flatten_geoms(shape):
if _geom_dimensions(g) == keep_dim:
parts.append(g)
# figure out how to construct a multi-geometry of the
# dimension wanted.
if keep_dim == _POINT_DIMENSION:
constructor = MultiPoint
elif keep_dim == _LINE_DIMENSION:
constructor = MultiLineString
elif keep_dim == _POLYGON_DIMENSION:
constructor = MultiPolygon
else:
raise ValueError('Unknown dimension %d in _filter_geom_types'
% keep_dim)
if len(parts) == 0:
return constructor()
elif len(parts) == 1:
# return the singular geometry
return parts[0]
else:
if keep_dim == _POINT_DIMENSION:
# not sure why the MultiPoint constructor wants
# its coordinates differently from MultiPolygon
# and MultiLineString...
coords = []
for p in parts:
coords.extend(p.coords)
return MultiPoint(coords)
else:
return constructor(parts)
# creates a list of indexes, each one for a different cut
# attribute value, in priority order.
#
# STRtree stores geometries and returns these from the query,
# but doesn't appear to allow any other attributes to be
# stored along with the geometries. this means we have to
# separate the index out into several "layers", each having
# the same attribute value. which isn't all that much of a
# pain, as we need to cut the shapes in a certain order to
# ensure priority anyway.
#
# intersect_func is a functor passed in to control how an
# intersection is performed. it is passed
class _Cutter:
def __init__(self, features, attrs, attribute,
target_attribute, keep_geom_type,
intersect_func):
group = defaultdict(list)
for feature in features:
shape, props, fid = feature
attr = props.get(attribute)
group[attr].append(shape)
# if the user didn't supply any options for controlling
# the cutting priority, then just make some up based on
# the attributes which are present in the dataset.
if attrs is None:
all_attrs = set()
for feature in features:
all_attrs.add(feature[1].get(attribute))
attrs = list(all_attrs)
# alternatively, the user can specify an ordering
# function over the attributes.
elif isinstance(attrs, dict):
attrs = _sorted_attributes(features, attrs,
attribute)
cut_idxs = list()
for attr in attrs:
if attr in group:
cut_idxs.append((attr, STRtree(group[attr])))
self.attribute = attribute
self.target_attribute = target_attribute
self.cut_idxs = cut_idxs
self.keep_geom_type = keep_geom_type
self.intersect_func = intersect_func
self.new_features = []
# cut up the argument shape, projecting the configured
# attribute to the properties of the intersecting parts
# of the shape. adds all the selected bits to the
# new_features list.
def cut(self, shape, props, fid):
original_geom_dim = _geom_dimensions(shape)
for cutting_attr, cut_idx in self.cut_idxs:
cutting_shapes = cut_idx.query(shape)
for cutting_shape in cutting_shapes:
if cutting_shape.intersects(shape):
shape = self._intersect(
shape, props, fid, cutting_shape,
cutting_attr, original_geom_dim)
# if there's no geometry left outside the
# shape, then we can exit the function
# early, as nothing else will intersect.
if shape.is_empty:
return
# if there's still geometry left outside, then it
# keeps the old, unaltered properties.
self._add(shape, props, fid, original_geom_dim)
# only keep geometries where either the type is the
# same as the original, or we're not trying to keep the
# same type.
def _add(self, shape, props, fid, original_geom_dim):
# if keeping the same geometry type, then filter
# out anything that's different.
if self.keep_geom_type:
shape = _filter_geom_types(
shape, original_geom_dim)
# don't add empty shapes, they're completely
# useless. the previous step may also have created
# an empty geometry if there weren't any items of
# the type we're looking for.
if shape.is_empty:
return
# add the shape as-is unless we're trying to keep
# the geometry type or the geometry dimension is
# identical.
self.new_features.append((shape, props, fid))
# intersects the shape with the cutting shape and
# handles attribute projection. anything "inside" is
# kept as it must have intersected the highest
# priority cutting shape already. the remainder is
# returned.
def _intersect(self, shape, props, fid, cutting_shape,
cutting_attr, original_geom_dim):
inside, outside = \
self.intersect_func(shape, cutting_shape)
# intersections are tricky, and it seems that the geos
# library (perhaps only certain versions of it) don't
# handle intersection of a polygon with its boundary
# very well. for example:
#
# >>> import shapely.geometry as g
# >>> p = g.Point(0,0).buffer(1.0, resolution=2)
# >>> b = p.boundary
# >>> b.intersection(p).wkt
# 'MULTILINESTRING ((1 0, 0.7071067811865481 -0.7071067811865469), (0.7071067811865481 -0.7071067811865469, 1.615544574432587e-15 -1), (1.615544574432587e-15 -1, -0.7071067811865459 -0.7071067811865491), (-0.7071067811865459 -0.7071067811865491, -1 -3.231089148865173e-15), (-1 -3.231089148865173e-15, -0.7071067811865505 0.7071067811865446), (-0.7071067811865505 0.7071067811865446, -4.624589118372729e-15 1), (-4.624589118372729e-15 1, 0.7071067811865436 0.7071067811865515), (0.7071067811865436 0.7071067811865515, 1 0))' # noqa
#
# the result multilinestring could be joined back into
# the original object. but because it has separate parts,
# each requires duplicating the start and end point, and
# each separate segment gets a different polygon buffer
# in Tangram - basically, it's a problem all round.
#
# two solutions to this: given that we're cutting, then
# the inside and outside should union back to the
# original shape - if either is empty then the whole
# object ought to be in the other.
#
# the second solution, for when there is actually some
# part cut, is that we can attempt to merge lines back
# together.
if outside.is_empty and not inside.is_empty:
inside = shape
elif inside.is_empty and not outside.is_empty:
outside = shape
elif original_geom_dim == _LINE_DIMENSION:
inside = _linemerge(inside)
outside = _linemerge(outside)
if cutting_attr is not None:
inside_props = props.copy()
inside_props[self.target_attribute] = cutting_attr
else:
inside_props = props
self._add(inside, inside_props, fid,
original_geom_dim)
return outside
def _intersect_cut(shape, cutting_shape):
"""
intersect by cutting, so that the cutting shape defines
a part of the shape which is inside and a part which is
outside as two separate shapes.
"""
inside = shape.intersection(cutting_shape)
outside = shape.difference(cutting_shape)
return inside, outside
# intersect by looking at the overlap size. we can define
# a cut-off fraction and if that fraction or more of the
# area of the shape is within the cutting shape, it's
# inside, else outside.
#
# this is done using a closure so that we can curry away
# the fraction parameter.
def _intersect_overlap(min_fraction):
# the inner function is what will actually get
# called, but closing over min_fraction means it
# will have access to that.
def _f(shape, cutting_shape):
overlap = shape.intersection(cutting_shape).area
area = shape.area
# need an empty shape of the same type as the
# original shape, which should be possible, as
# it seems shapely geometries all have a default
# constructor to empty.
empty = type(shape)()
if ((area > 0) and (overlap / area) >= min_fraction):
return shape, empty
else:
return empty, shape
return _f
# intersect by looking at the overlap length. if more than a minimum fraction
# of the shape's length is within the cutting area, then we will consider it
# totally "cut".
def _intersect_linear_overlap(min_fraction):
# the inner function is what will actually get
# called, but closing over min_fraction means it
# will have access to that.
def _f(shape, cutting_shape):
overlap = shape.intersection(cutting_shape).length
total = shape.length
empty = type(shape)()
if ((total > 0) and (overlap / total) >= min_fraction):
return shape, empty
else:
return empty, shape
return _f
# find a layer by iterating through all the layers. this
# would be easier if they layers were in a dict(), but
# that's a pretty invasive change.
#
# returns None if the layer can't be found.
def _find_layer(feature_layers, name):
for feature_layer in feature_layers:
layer_datum = feature_layer['layer_datum']
layer_name = layer_datum['name']
if layer_name == name:
return feature_layer
return None
# shared implementation of the intercut algorithm, used both when cutting
# shapes and using overlap to determine inside / outsideness.
#
# the filter_fn are used to filter which features from the base layer are cut
# with which features from the cutting layer. cutting layer features which do
# not match the filter are ignored, base layer features are left in the layer
# unchanged.
def _intercut_impl(intersect_func, feature_layers, base_layer, cutting_layer,
attribute, target_attribute, cutting_attrs, keep_geom_type,
cutting_filter_fn=None, base_filter_fn=None):
# the target attribute can default to the attribute if
# they are distinct. but often they aren't, and that's
# why target_attribute is a separate parameter.
if target_attribute is None:
target_attribute = attribute
# search through all the layers and extract the ones
# which have the names of the base and cutting layer.
# it would seem to be better to use a dict() for
# layers, and this will give odd results if names are
# allowed to be duplicated.
base = _find_layer(feature_layers, base_layer)
cutting = _find_layer(feature_layers, cutting_layer)
# base or cutting layer not available. this could happen
# because of a config problem, in which case you'd want
# it to be reported. but also can happen when the client
# selects a subset of layers which don't include either
# the base or the cutting layer. then it's not an error.
# the interesting case is when they select the base but
# not the cutting layer...
if base is None or cutting is None:
return None
base_features = base['features']
cutting_features = cutting['features']
# filter out any features that we don't want to cut with
if cutting_filter_fn is not None:
cutting_features = filter(cutting_filter_fn, cutting_features)
# short-cut return if there are no cutting features => there's nothing
# to do.
if not cutting_features:
return base
# make a cutter object to help out
cutter = _Cutter(cutting_features, cutting_attrs,
attribute, target_attribute,
keep_geom_type, intersect_func)
skipped_features = []
for base_feature in base_features:
if base_filter_fn is None or base_filter_fn(base_feature):
# we use shape to track the current remainder of the
# shape after subtracting bits which are inside cuts.
shape, props, fid = base_feature
cutter.cut(shape, props, fid)
else:
skipped_features.append(base_feature)
base['features'] = cutter.new_features + skipped_features
return base
class Where(object):
"""
A "where" clause for filtering features based on their properties.
This is commonly used in post-processing steps to configure which features
in the layer we want to operate on, allowing us to write simple Python
expressions in the YAML.
"""
def __init__(self, where):
self.fn = compile(where, 'queries.yaml', 'eval')
def __call__(self, feature):
shape, props, fid = feature
local = defaultdict(lambda: None)
local.update(props)
return eval(self.fn, {}, local)
# intercut takes features from a base layer and cuts each
# of them against a cutting layer, splitting any base
# feature which intersects into separate inside and outside
# parts.
#
# the parts of each base feature which are outside any
# cutting feature are left unchanged. the parts which are
# inside have their property with the key given by the
# 'target_attribute' parameter set to the same value as the
# property from the cutting feature with the key given by
# the 'attribute' parameter.
#
# the intended use of this is to project attributes from one
# layer to another so that they can be styled appropriately.
#
# - feature_layers: list of layers containing both the base
# and cutting layer.
# - base_layer: str name of the base layer.
# - cutting_layer: str name of the cutting layer.
# - attribute: optional str name of the property / attribute
# to take from the cutting layer.
# - target_attribute: optional str name of the property /
# attribute to assign on the base layer. defaults to the
# same as the 'attribute' parameter.
# - cutting_attrs: list of str, the priority of the values
# to be used in the cutting operation. this ensures that
# items at the beginning of the list get cut first and
# those values have priority (won't be overridden by any
# other shape cutting).
# - keep_geom_type: if truthy, then filter the output to be
# the same type as the input. defaults to True, because
# this seems like an eminently sensible behaviour.
# - base_where: if truthy, a Python expression which is
# evaluated in the context of a feature's properties and
# can return True if the feature is to be cut and False
# if it should be passed through unmodified.
# - cutting_where: if truthy, a Python expression which is
# evaluated in the context of a feature's properties and
# can return True if the feature is to be used for cutting
# and False if it should be ignored.
#
# returns a feature layer which is the base layer cut by the
# cutting layer.
def intercut(ctx):
feature_layers = ctx.feature_layers
base_layer = ctx.params.get('base_layer')
assert base_layer, \
'Parameter base_layer was missing from intercut config'
cutting_layer = ctx.params.get('cutting_layer')
assert cutting_layer, \
'Parameter cutting_layer was missing from intercut ' \
'config'
attribute = ctx.params.get('attribute')
# sanity check on the availability of the cutting
# attribute.
assert attribute is not None, \
'Parameter attribute to intercut was None, but ' + \
'should have been an attribute name. Perhaps check ' + \
'your configuration file and queries.'
target_attribute = ctx.params.get('target_attribute')
cutting_attrs = ctx.params.get('cutting_attrs')
keep_geom_type = ctx.params.get('keep_geom_type', True)
base_where = ctx.params.get('base_where')
cutting_where = ctx.params.get('cutting_where')
# compile the where-clauses, if any were configured
if base_where:
base_where = Where(base_where)
if cutting_where:
cutting_where = Where(cutting_where)
return _intercut_impl(
_intersect_cut, feature_layers, base_layer, cutting_layer,
attribute, target_attribute, cutting_attrs, keep_geom_type,
base_filter_fn=base_where, cutting_filter_fn=cutting_where)
# overlap measures the area overlap between each feature in
# the base layer and each in the cutting layer. if the
# fraction of overlap is greater than the min_fraction
# constant, then the feature in the base layer is assigned
# a property with its value derived from the overlapping
# feature from the cutting layer.
#
# the intended use of this is to project attributes from one
# layer to another so that they can be styled appropriately.
#
# it has the same parameters as intercut, see above.
#
# returns a feature layer which is the base layer with
# overlapping features having attributes projected from the
# cutting layer.
def overlap(ctx):
feature_layers = ctx.feature_layers
base_layer = ctx.params.get('base_layer')
assert base_layer, \
'Parameter base_layer was missing from overlap config'
cutting_layer = ctx.params.get('cutting_layer')
assert cutting_layer, \
'Parameter cutting_layer was missing from overlap ' \
'config'
attribute = ctx.params.get('attribute')
# sanity check on the availability of the cutting
# attribute.
assert attribute is not None, \
'Parameter attribute to overlap was None, but ' + \
'should have been an attribute name. Perhaps check ' + \
'your configuration file and queries.'
target_attribute = ctx.params.get('target_attribute')
cutting_attrs = ctx.params.get('cutting_attrs')
keep_geom_type = ctx.params.get('keep_geom_type', True)
min_fraction = ctx.params.get('min_fraction', 0.8)
base_where = ctx.params.get('base_where')
cutting_where = ctx.params.get('cutting_where')
# use a different function for linear overlaps (i.e: roads with polygons)
# than area overlaps. keeping this explicit (rather than relying on the
# geometry type) means we don't end up with unexpected lines in a polygonal
# layer.
linear = ctx.params.get('linear', False)
if linear:
overlap_fn = _intersect_linear_overlap(min_fraction)
else:
overlap_fn = _intersect_overlap(min_fraction)
# compile the where-clauses, if any were configured
if base_where:
base_where = Where(base_where)
if cutting_where:
cutting_where = Where(cutting_where)
return _intercut_impl(
overlap_fn, feature_layers, base_layer,
cutting_layer, attribute, target_attribute, cutting_attrs,
keep_geom_type, cutting_filter_fn=cutting_where,
base_filter_fn=base_where)
# intracut cuts a layer with a set of features from that same
# layer, which are then removed.
#
# for example, with water boundaries we get one set of linestrings
# from the admin polygons and another set from the original ways
# where the `maritime=yes` tag is set. we don't actually want
# separate linestrings, we just want the `maritime=yes` attribute
# on the first set of linestrings.
def intracut(ctx):
feature_layers = ctx.feature_layers
base_layer = ctx.params.get('base_layer')
assert base_layer, \
'Parameter base_layer was missing from intracut config'
attribute = ctx.params.get('attribute')
# sanity check on the availability of the cutting
# attribute.
assert attribute is not None, \
'Parameter attribute to intracut was None, but ' + \
'should have been an attribute name. Perhaps check ' + \
'your configuration file and queries.'
base = _find_layer(feature_layers, base_layer)
if base is None:
return None
# unlike intracut & overlap, which work on separate layers,
# intracut separates features in the same layer into
# different sets to work on.
base_features = list()
cutting_features = list()
for shape, props, fid in base['features']:
if attribute in props:
cutting_features.append((shape, props, fid))
else:
base_features.append((shape, props, fid))
cutter = _Cutter(cutting_features, None, attribute,
attribute, True, _intersect_cut)
for shape, props, fid in base_features:
cutter.cut(shape, props, fid)
base['features'] = cutter.new_features
return base
# place kinds, as used by OSM, mapped to their rough
# min_zoom so that we can provide a defaulted,
# non-curated min_zoom value.
_default_min_zoom_for_place_kind = {
'locality': 13,
'isolated_dwelling': 13,
'farm': 13,
'hamlet': 12,
'village': 11,
'suburb': 10,
'quarter': 10,
'borough': 10,
'town': 8,
'city': 8,
'province': 4,
'state': 4,
'sea': 3,
'country': 0,
'ocean': 0,
'continent': 0
}
# if the feature does not have a min_zoom attribute already,
# which would have come from a curated source, then calculate
# a default one based on the kind of place it is.
def calculate_default_place_min_zoom(shape, properties, fid, zoom):
min_zoom = properties.get('min_zoom')
if min_zoom is not None:
return shape, properties, fid
# base calculation off kind
kind = properties.get('kind')
if kind is None:
return shape, properties, fid
min_zoom = _default_min_zoom_for_place_kind.get(kind)
if min_zoom is None:
return shape, properties, fid
# adjust min_zoom for state / country capitals
if kind in ('city', 'town'):
if properties.get('region_capital'):
min_zoom -= 1
elif properties.get('country_capital'):
min_zoom -= 2
properties['min_zoom'] = min_zoom
return shape, properties, fid
def _make_new_properties(props, props_instructions):
"""
make new properties from existing properties and a
dict of instructions.
the algorithm is:
- where a key appears with value True, it will be
copied from the existing properties.
- where it's a dict, the values will be looked up
in that dict.
- otherwise the value will be used directly.
"""
new_props = dict()
for k, v in props_instructions.iteritems():
if v is True:
# this works even when props[k] = None
if k in props:
new_props[k] = props[k]
elif isinstance(v, dict):
# this will return None, which allows us to
# use the dict to set default values.
original_v = props.get(k)
if original_v in v:
new_props[k] = v[original_v]
elif isinstance(v, list) and len(v) == 1:
# this is a hack to implement escaping for when the output value
# should be a value, but that value (e.g: True, or a dict) is
# used for some other purpose above.
new_props[k] = v[0]
else:
new_props[k] = v
return new_props
def _snap_to_grid(shape, grid_size):
"""
Snap coordinates of a shape to a multiple of `grid_size`.
This can be useful when there's some error in point
positions, but we're using an algorithm which is very
sensitive to coordinate exactness. For example, when
calculating the boundary of several items, it makes a
big difference whether the shapes touch or there's a
very small gap between them.
This is implemented here because it doesn't exist in
GEOS or Shapely. It exists in PostGIS, but only because
it's implemented there as well. Seems like it would be a
useful thing to have in GEOS, though.
>>> _snap_to_grid(Point(0.5, 0.5), 1).wkt
'POINT (1 1)'
>>> _snap_to_grid(Point(0.1, 0.1), 1).wkt
'POINT (0 0)'
>>> _snap_to_grid(Point(-0.1, -0.1), 1).wkt
'POINT (-0 -0)'
>>> _snap_to_grid(LineString([(1.1,1.1),(1.9,0.9)]), 1).wkt
'LINESTRING (1 1, 2 1)'
_snap_to_grid(Polygon([(0.1,0.1),(3.1,0.1),(3.1,3.1),(0.1,3.1),(0.1,0.1)],[[(1.1,0.9),(1.1,1.9),(2.1,1.9),(2.1,0.9),(1.1,0.9)]]), 1).wkt
'POLYGON ((0 0, 3 0, 3 3, 0 3, 0 0), (1 1, 1 2, 2 2, 2 1, 1 1))'
>>> _snap_to_grid(MultiPoint([Point(0.1, 0.1), Point(0.9, 0.9)]), 1).wkt
'MULTIPOINT (0 0, 1 1)'
>>> _snap_to_grid(MultiLineString([LineString([(0.1, 0.1), (0.9, 0.9)]), LineString([(0.9, 0.1),(0.1,0.9)])]), 1).wkt
'MULTILINESTRING ((0 0, 1 1), (1 0, 0 1))'
""" # noqa
# snap a single coordinate value
def _snap(c):
return grid_size * round(c / grid_size, 0)
# snap all coordinate pairs in something iterable
def _snap_coords(c):
return [(_snap(x), _snap(y)) for x, y in c]
# recursively snap all coordinates in an iterable over
# geometries.
def _snap_multi(geoms):
return [_snap_to_grid(g, grid_size) for g in geoms]
shape_type = shape.geom_type
if shape.is_empty or shape_type == 'GeometryCollection':
return None
elif shape_type == 'Point':
return Point(_snap(shape.x), _snap(shape.y))
elif shape_type == 'LineString':
return LineString(_snap_coords(shape.coords))
elif shape_type == 'Polygon':
exterior = LinearRing(_snap_coords(shape.exterior.coords))
interiors = []
for interior in shape.interiors:
interiors.append(LinearRing(_snap_coords(interior.coords)))
return Polygon(exterior, interiors)
elif shape_type == 'MultiPoint':
return MultiPoint(_snap_multi(shape.geoms))
elif shape_type == 'MultiLineString':
return MultiLineString(_snap_multi(shape.geoms))
elif shape_type == 'MultiPolygon':
return MultiPolygon(_snap_multi(shape.geoms))
else:
raise ValueError('_snap_to_grid: unimplemented for shape type %s'
% repr(shape_type))
def exterior_boundaries(ctx):
"""
create new fetures from the boundaries of polygons
in the base layer, subtracting any sections of the
boundary which intersect other polygons. this is
added as a new layer if new_layer_name is not None
otherwise appended to the base layer.
the purpose of this is to provide us a shoreline /
river bank layer from the water layer without having
any of the shoreline / river bank draw over the top
of any of the base polygons.
properties on the lines returned are copied / adapted
from the existing layer using the new_props dict. see
_make_new_properties above for the rules.
buffer_size determines whether any buffering will be
done to the index polygons. a judiciously small
amount of buffering can help avoid "dashing" due to
tolerance in the intersection, but will also create
small overlaps between lines.
any features in feature_layers[layer] which aren't
polygons will be ignored.
note that the `bounds` kwarg should be filled out
automatically by tilequeue - it does not have to be
provided from the config.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
base_layer = ctx.params.get('base_layer')
assert base_layer, 'Missing base_layer parameter'
new_layer_name = ctx.params.get('new_layer_name')
prop_transform = ctx.params.get('prop_transform')
buffer_size = ctx.params.get('buffer_size')
start_zoom = ctx.params.get('start_zoom', 0)
snap_tolerance = ctx.params.get('snap_tolerance')
layer = None
# don't start processing until the start zoom
if zoom < start_zoom:
return layer
# search through all the layers and extract the one
# which has the name of the base layer we were given
# as a parameter.
layer = _find_layer(feature_layers, base_layer)
# if we failed to find the base layer then it's
# possible the user just didn't ask for it, so return
# an empty result.
if layer is None:
return None
if prop_transform is None:
prop_transform = {}
features = layer['features']
# this exists to enable a dirty hack to try and work
# around duplicate geometries in the database. this
# happens when a multipolygon relation can't
# supersede a member way because the way contains tags
# which aren't present on the relation. working around
# this by calling "union" on geometries proved to be
# too expensive (~3x current), so this hack looks at
# the way_area of each object, and uses that as a
# proxy for identity. it's not perfect, but the chance
# that there are two overlapping polygons of exactly
# the same size must be pretty small. however, the
# STRTree we're using as a spatial index doesn't
# directly support setting attributes on the indexed
# geometries, so this class exists to carry the area
# attribute through the index to the point where we
# want to use it.
class geom_with_area:
def __init__(self, geom, area):
self.geom = geom
self.area = area
self._geom = geom._geom
# STRtree started filtering out empty geoms at some version, so
# we need to proxy the is_empty property.
self.is_empty = geom.is_empty
# create an index so that we can efficiently find the
# polygons intersecting the 'current' one. Note that
# we're only interested in intersecting with other
# polygonal features, and that intersecting with lines
# can give some unexpected results.
indexable_features = list()
indexable_shapes = list()
for shape, props, fid in features:
if shape.type in ('Polygon', 'MultiPolygon'):
# the data comes back clipped from the queries now so we
# no longer need to clip here
snapped = shape
if snap_tolerance is not None:
snapped = _snap_to_grid(shape, snap_tolerance)
# geometry collections are returned as None
if snapped is None:
continue
# snapping coordinates and clipping shapes might make the shape
# invalid, so we need a way to clean them. one simple, but not
# foolproof, way is to buffer them by 0.
if not snapped.is_valid:
snapped = snapped.buffer(0)
# that still might not have done the trick, so drop any polygons
# which are still invalid so as not to cause errors later.
if not snapped.is_valid:
# TODO: log this as a warning!
continue
# skip any geometries that may have become empty
if snapped.is_empty:
continue
indexable_features.append((snapped, props, fid))
indexable_shapes.append(geom_with_area(snapped, props.get('area')))
index = STRtree(indexable_shapes)
new_features = list()
# loop through all the polygons, taking the boundary
# of each and subtracting any parts which are within
# other polygons. what remains (if anything) is the
# new feature.
for feature in indexable_features:
shape, props, fid = feature
boundary = shape.boundary
cutting_shapes = index.query(boundary)
for cutting_item in cutting_shapes:
cutting_shape = cutting_item.geom
cutting_area = cutting_item.area
# dirty hack: this object is probably a
# superseded way if the ID is positive and
# the area is the same as the cutting area.
# using the ID check here prevents the
# boundary from being duplicated.
is_superseded_way = \
cutting_area == props.get('area') and \
props.get('id') > 0
if cutting_shape is not shape and \
not is_superseded_way:
buf = cutting_shape
if buffer_size is not None:
buf = buf.buffer(buffer_size)
boundary = boundary.difference(buf)
# filter only linestring-like objects. we don't
# want any points which might have been created
# by the intersection.
boundary = _filter_geom_types(boundary, _LINE_DIMENSION)
if not boundary.is_empty:
new_props = _make_new_properties(props, prop_transform)
new_features.append((boundary, new_props, fid))
if new_layer_name is None:
# no new layer requested, instead add new
# features into the same layer.
layer['features'].extend(new_features)
return layer
else:
# make a copy of the old layer's information - it
# shouldn't matter about most of the settings, as
# post-processing is one of the last operations.
# but we need to override the name to ensure we get
# some output.
new_layer_datum = layer['layer_datum'].copy()
new_layer_datum['name'] = new_layer_name
new_layer = layer.copy()
new_layer['layer_datum'] = new_layer_datum
new_layer['features'] = new_features
new_layer['name'] = new_layer_name
return new_layer
def _inject_key(key, infix):
"""
OSM keys often have several parts, separated by ':'s.
When we merge properties from the left and right of a
boundary, we want to preserve information like the
left and right names, but prefer the form "name:left"
rather than "left:name", so we have to insert an
infix string to these ':'-delimited arrays.
>>> _inject_key('a:b:c', 'x')
'a:x:b:c'
>>> _inject_key('a', 'x')
'a:x'
"""
parts = key.split(':')
parts.insert(1, infix)
return ':'.join(parts)
def _merge_left_right_props(lprops, rprops):
"""
Given a set of properties to the left and right of a
boundary, we want to keep as many of these as possible,
but keeping them all might be a bit too much.
So we want to keep the key-value pairs which are the
same in both in the output, but merge the ones which
are different by infixing them with 'left' and 'right'.
>>> _merge_left_right_props({}, {})
{}
>>> _merge_left_right_props({'a':1}, {})
{'a:left': 1}
>>> _merge_left_right_props({}, {'b':2})
{'b:right': 2}
>>> _merge_left_right_props({'a':1, 'c':3}, {'b':2, 'c':3})
{'a:left': 1, 'c': 3, 'b:right': 2}
>>> _merge_left_right_props({'a':1},{'a':2})
{'a:left': 1, 'a:right': 2}
"""
keys = set(lprops.keys()) | set(rprops.keys())
new_props = dict()
# props in both are copied directly if they're the same
# in both the left and right. they get left/right
# inserted after the first ':' if they're different.
for k in keys:
lv = lprops.get(k)
rv = rprops.get(k)
if lv == rv:
new_props[k] = lv
else:
if lv is not None:
new_props[_inject_key(k, 'left')] = lv
if rv is not None:
new_props[_inject_key(k, 'right')] = rv
return new_props
def _make_joined_name(props):
"""
Updates the argument to contain a 'name' element
generated from joining the left and right names.
Just to make it easier for people, we generate a name
which is easy to display of the form "LEFT - RIGHT".
The individual properties are available if the user
wants to generate a more complex name.
>>> x = {}
>>> _make_joined_name(x)
>>> x
{}
>>> x = {'name:left':'Left'}
>>> _make_joined_name(x)
>>> x
{'name': 'Left', 'name:left': 'Left'}
>>> x = {'name:right':'Right'}
>>> _make_joined_name(x)
>>> x
{'name': 'Right', 'name:right': 'Right'}
>>> x = {'name:left':'Left', 'name:right':'Right'}
>>> _make_joined_name(x)
>>> x
{'name:right': 'Right', 'name': 'Left - Right', 'name:left': 'Left'}
>>> x = {'name:left':'Left', 'name:right':'Right', 'name': 'Already Exists'}
>>> _make_joined_name(x)
>>> x
{'name:right': 'Right', 'name': 'Already Exists', 'name:left': 'Left'}
""" # noqa
# don't overwrite an existing name
if 'name' in props:
return
lname = props.get('name:left')
rname = props.get('name:right')
if lname is not None:
if rname is not None:
props['name'] = "%s - %s" % (lname, rname)
else:
props['name'] = lname
elif rname is not None:
props['name'] = rname
def _linemerge(geom):
"""
Try to extract all the linear features from the geometry argument
and merge them all together into the smallest set of linestrings
possible.
This is almost identical to Shapely's linemerge, and uses it,
except that Shapely's throws exceptions when passed a single
linestring, or a geometry collection with lines and points in it.
So this can be thought of as a "safer" wrapper around Shapely's
function.
"""
geom_type = geom.type
result_geom = None
if geom_type == 'GeometryCollection':
# collect together everything line-like from the geometry
# collection and filter out anything that's empty
lines = []
for line in geom.geoms:
line = _linemerge(line)
if not line.is_empty:
lines.append(line)
result_geom = linemerge(lines) if lines else None
elif geom_type == 'LineString':
result_geom = geom
elif geom_type == 'MultiLineString':
result_geom = linemerge(geom)
else:
result_geom = None
if result_geom is not None:
# simplify with very small tolerance to remove duplicate points.
# almost duplicate or nearly colinear points can occur due to
# numerical round-off or precision in the intersection algorithm, and
# this should help get rid of those. see also:
# http://lists.gispython.org/pipermail/community/2014-January/003236.html
#
# the tolerance here is hard-coded to a fraction of the
# coordinate magnitude. there isn't a perfect way to figure
# out what this tolerance should be, so this may require some
# tweaking.
epsilon = max(map(abs, result_geom.bounds)) * float_info.epsilon * 1000
result_geom = result_geom.simplify(epsilon, True)
result_geom_type = result_geom.type
# the geometry may still have invalid or repeated points if it has zero
# length segments, so remove anything where the length is less than
# epsilon.
if result_geom_type == 'LineString':
if result_geom.length < epsilon:
result_geom = None
elif result_geom_type == 'MultiLineString':
parts = []
for line in result_geom.geoms:
if line.length >= epsilon:
parts.append(line)
result_geom = MultiLineString(parts)
return result_geom if result_geom else MultiLineString([])
def _orient(geom):
"""
Given a shape, returns the counter-clockwise oriented
version. Does not affect points or lines.
This version is required because Shapely's version is
only defined for single polygons, and we want
something that works generically.
In the example below, note the change in order of the
coordinates in `p2`, which is initially not oriented
CCW.
>>> p1 = Polygon([[0, 0], [1, 0], [0, 1], [0, 0]])
>>> p2 = Polygon([[0, 1], [1, 1], [1, 0], [0, 1]])
>>> orient(p1).wkt
'POLYGON ((0 0, 1 0, 0 1, 0 0))'
>>> orient(p2).wkt
'POLYGON ((0 1, 1 0, 1 1, 0 1))'
>>> _orient(MultiPolygon([p1, p2])).wkt
'MULTIPOLYGON (((0 0, 1 0, 0 1, 0 0)), ((0 1, 1 0, 1 1, 0 1)))'
"""
def oriented_multi(kind, geom):
oriented_geoms = [_orient(g) for g in geom.geoms]
return kind(oriented_geoms)
geom_type = geom.type
if geom_type == 'Polygon':
geom = orient(geom)
elif geom_type == 'MultiPolygon':
geom = oriented_multi(MultiPolygon, geom)
elif geom_type == 'GeometryCollection':
geom = oriented_multi(GeometryCollection, geom)
return geom
def _fix_disputed_left_right_kinds(props):
"""
After merging left/right props, we might find that any kind:XX for disputed
borders are mixed up as kind:left:XX or kind:right:XX and we want to merge
them back together again.
"""
keys = []
for k in props.keys():
if k.startswith('kind:left:') or k.startswith('kind:right:'):
keys.append(k)
for k in keys:
prefix = 'kind:left:' if k.startswith('kind:left:') else 'kind:right:'
new_key = 'kind:' + k[len(prefix):]
value = props.pop(k)
props[new_key] = value
def admin_boundaries(ctx):
"""
Given a layer with admin boundaries and inclusion polygons for
land-based boundaries, attempts to output a set of oriented
boundaries with properties from both the left and right admin
boundary, and also cut with the maritime information to provide
a `maritime_boundary: True` value where there's overlap between
the maritime lines and the admin boundaries.
Note that admin boundaries must alread be correctly oriented.
In other words, it must have a positive area and run counter-
clockwise around the polygon for which it is an outer (or
clockwise if it was an inner).
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
base_layer = ctx.params.get('base_layer')
assert base_layer, 'Parameter base_layer missing.'
start_zoom = ctx.params.get('start_zoom', 0)
layer = None
# don't start processing until the start zoom
if zoom < start_zoom:
return layer
layer = _find_layer(feature_layers, base_layer)
if layer is None:
return None
# layer will have polygonal features for the admin
# polygons and also linear features for the maritime
# boundaries. further, we want to group the admin
# polygons by their kind, as this will reduce the
# working set.
admin_features = defaultdict(list)
maritime_features = list()
new_features = list()
# Sorting here so that we have consistent ordering of left/right side
# on boundaries.
sorted_layer = sorted(layer['features'], key=lambda f: f[1]['id'])
for shape, props, fid in sorted_layer:
dims = _geom_dimensions(shape)
kind = props.get('kind')
maritime_boundary = props.get('maritime_boundary')
# the reason to use this rather than compare the
# string of types is to catch the "multi-" types
# as well.
if dims == _LINE_DIMENSION and kind is not None:
admin_features[kind].append((shape, props, fid))
elif dims == _POLYGON_DIMENSION and maritime_boundary:
maritime_features.append((shape, {'maritime_boundary': False}, 0))
# there are separate polygons for each admin level, and
# we only want to intersect like with like because it
# makes more sense to have Country-Country and
# State-State boundaries (and labels) rather than the
# (combinatoric) set of all different levels.
for kind, features in admin_features.iteritems():
num_features = len(features)
envelopes = [g[0].envelope for g in features]
for i, feature in enumerate(features):
boundary, props, fid = feature
prop_id = props['id']
envelope = envelopes[i]
# intersect with *preceding* features to remove
# those boundary parts. this ensures that there
# are no duplicate parts.
for j in range(0, i):
cut_shape, cut_props, cut_fid = features[j]
# don't intersect with self
if prop_id == cut_props['id']:
continue
cut_envelope = envelopes[j]
if envelope.intersects(cut_envelope):
try:
boundary = boundary.difference(cut_shape)
except shapely.errors.TopologicalError:
# NOTE: we have gotten errors Topological errors here
# that look like:
# TopologicalError: This operation could not be
# performed. Reason: unknown"
pass
if boundary.is_empty:
break
# intersect with every *later* feature. now each
# intersection represents a section of boundary
# that we want to keep.
for j in range(i+1, num_features):
cut_shape, cut_props, cut_fid = features[j]
# don't intersect with self
if prop_id == cut_props['id']:
continue
cut_envelope = envelopes[j]
if envelope.intersects(cut_envelope):
try:
inside, boundary = _intersect_cut(boundary, cut_shape)
except (StandardError, shapely.errors.ShapelyError):
# if the inside and remaining boundary can't be
# calculated, then we can't continue to intersect
# anything else with this shape. this means we might
# end up with erroneous one-sided boundaries.
# TODO: log warning!
break
inside = _linemerge(inside)
if not inside.is_empty:
new_props = _merge_left_right_props(props, cut_props)
new_props['id'] = props['id']
_make_joined_name(new_props)
_fix_disputed_left_right_kinds(new_props)
new_features.append((inside, new_props, fid))
if boundary.is_empty:
break
# anything left over at the end is still a boundary,
# but a one-sided boundary to international waters.
boundary = _linemerge(boundary)
if not boundary.is_empty:
new_props = props.copy()
_make_joined_name(new_props)
new_features.append((boundary, new_props, fid))
# use intracut for maritime, but it intersects in a positive
# way - it sets the tag on anything which intersects, whereas
# we want to set maritime where it _doesn't_ intersect. so
# we have to flip the attribute afterwards.
cutter = _Cutter(maritime_features, None,
'maritime_boundary', 'maritime_boundary',
_LINE_DIMENSION, _intersect_cut)
for shape, props, fid in new_features:
cutter.cut(shape, props, fid)
# flip the property, so define maritime_boundary=yes where
# it was previously unset and remove maritime_boundary=no.
for shape, props, fid in cutter.new_features:
maritime_boundary = props.pop('maritime_boundary', None)
if maritime_boundary is None:
props['maritime_boundary'] = True
layer['features'] = cutter.new_features
return layer
def _unicode_len(s):
if isinstance(s, str):
return len(s.decode('utf-8'))
elif isinstance(s, unicode):
return len(s)
return None
def _delete_labels_longer_than(max_label_chars, props):
"""
Delete entries in the props dict where the key starts with 'name' and the
unicode length of the value is greater than max_label_chars.
If one half of a left/right pair is too long, then the opposite in the pair
is also deleted.
"""
to_delete = set()
for k, v in props.iteritems():
if not k.startswith('name'):
continue
length_chars = _unicode_len(v)
if length_chars is None:
# huh? name isn't a string?
continue
if length_chars <= max_label_chars:
continue
to_delete.add(k)
if k.startswith('name:left:'):
opposite_k = k.replace(':left:', ':right:')
to_delete.add(opposite_k)
elif k.startswith('name:right:'):
opposite_k = k.replace(':right:', ':left:')
to_delete.add(opposite_k)
for k in to_delete:
if k in props:
del props[k]
def drop_names_on_short_boundaries(ctx):
"""
Drop all names on a boundaries which are too small to render the shortest
name.
"""
params = _Params(ctx, 'drop_names_on_short_boundaries')
layer_name = params.required('source_layer')
start_zoom = params.optional('start_zoom', typ=int, default=0)
end_zoom = params.optional('end_zoom', typ=int)
pixels_per_letter = params.optional('pixels_per_letter', typ=(int, float),
default=10.0)
layer = _find_layer(ctx.feature_layers, layer_name)
zoom = ctx.nominal_zoom
if zoom < start_zoom or \
(end_zoom is not None and zoom >= end_zoom):
return None
# tolerance for zoom gives us a value in meters for a pixel, so it's
# meters per pixel
meters_per_letter = pixels_per_letter * tolerance_for_zoom(zoom)
for shape, props, fid in layer['features']:
geom_type = shape.geom_type
if geom_type in ('LineString', 'MultiLineString'):
# simplify to one letter size. this gets close to what might
# practically be renderable, and means we're not counting any
# sub-letter scale fractal crinklyness towards the length of
# the line.
label_shape = shape.simplify(meters_per_letter)
if geom_type == 'LineString':
shape_length_meters = label_shape.length
else:
# get the longest section to see if that's labellable - if
# not, then none of the sections could have a label and we
# can drop the names.
shape_length_meters = max(part.length for part in label_shape)
# maximum number of characters we'll be able to print at this
# zoom.
max_label_chars = int(shape_length_meters / meters_per_letter)
_delete_labels_longer_than(max_label_chars, props)
return None
def handle_label_placement(ctx):
"""
Converts a geometry label column into a separate feature.
"""
layers = ctx.params.get('layers', None)
zoom = ctx.nominal_zoom
location_property = ctx.params.get('location_property', None)
label_property_name = ctx.params.get('label_property_name', None)
label_property_value = ctx.params.get('label_property_value', None)
label_where = ctx.params.get('label_where', None)
start_zoom = ctx.params.get('start_zoom', 0)
if zoom < start_zoom:
return None
assert layers, 'handle_label_placement: Missing layers'
assert location_property, \
'handle_label_placement: Missing location_property'
assert label_property_name, \
'handle_label_placement: Missing label_property_name'
assert label_property_value, \
'handle_label_placement: Missing label_property_value'
layers = set(layers)
if label_where:
label_where = compile(label_where, 'queries.yaml', 'eval')
for feature_layer in ctx.feature_layers:
if feature_layer['name'] not in layers:
continue
padded_bounds = feature_layer['padded_bounds']
point_padded_bounds = padded_bounds['point']
clip_bounds = Box(*point_padded_bounds)
new_features = []
for feature in feature_layer['features']:
shape, props, fid = feature
label_wkb = props.pop(location_property, None)
new_features.append(feature)
if not label_wkb:
continue
local_state = props.copy()
local_state['properties'] = props
if label_where and not eval(label_where, {}, local_state):
continue
label_shape = shapely.wkb.loads(label_wkb)
if not (label_shape.type in ('Point', 'MultiPoint') and
clip_bounds.intersects(label_shape)):
continue
point_props = props.copy()
point_props[label_property_name] = label_property_value
point_feature = label_shape, point_props, fid
new_features.append(point_feature)
feature_layer['features'] = new_features
def generate_address_points(ctx):
"""
Generates address points from building polygons where there is an
addr:housenumber tag on the building. Removes those tags from the
building.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
assert source_layer, 'generate_address_points: missing source_layer'
start_zoom = ctx.params.get('start_zoom', 0)
if zoom < start_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
new_features = []
for feature in layer['features']:
shape, properties, fid = feature
# We only want to create address points for polygonal
# buildings with address tags.
if shape.geom_type not in ('Polygon', 'MultiPolygon'):
continue
addr_housenumber = properties.get('addr_housenumber')
# consider it an address if the name of the building
# is just a number.
name = properties.get('name')
if name is not None and digits_pattern.match(name):
if addr_housenumber is None:
addr_housenumber = properties.pop('name')
# and also suppress the name if it's the same as
# the address.
elif name == addr_housenumber:
properties.pop('name')
# if there's no address, then keep the feature as-is,
# no modifications.
if addr_housenumber is None:
continue
label_point = shape.representative_point()
# we're only interested in a very few properties for
# address points.
label_properties = dict(
addr_housenumber=addr_housenumber,
kind='address')
source = properties.get('source')
if source is not None:
label_properties['source'] = source
addr_street = properties.get('addr_street')
if addr_street is not None:
label_properties['addr_street'] = addr_street
oid = properties.get('id')
if oid is not None:
label_properties['id'] = oid
label_feature = label_point, label_properties, fid
new_features.append(label_feature)
layer['features'].extend(new_features)
return layer
def parse_layer_as_float(shape, properties, fid, zoom):
"""
If the 'layer' property is present on a feature, then
this attempts to parse it as a floating point number.
The old value is removed and, if it could be parsed
as a floating point number, the number replaces the
original property.
"""
layer = properties.pop('layer', None)
if layer:
layer_float = to_float(layer)
if layer_float is not None:
properties['layer'] = layer_float
return shape, properties, fid
def drop_features_where(ctx):
"""
Drop features entirely that match the particular "where"
condition. Any feature properties are available to use, as well as
the properties dict itself, called "properties" in the scope.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
assert source_layer, 'drop_features_where: missing source layer'
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
where = ctx.params.get('where')
assert where, 'drop_features_where: missing where'
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
where = compile(where, 'queries.yaml', 'eval')
new_features = []
for feature in layer['features']:
shape, properties, fid = feature
local = properties.copy()
local['properties'] = properties
local['geom_type'] = shape.geom_type
if not eval(where, {}, local):
new_features.append(feature)
layer['features'] = new_features
return layer
def _project_properties(ctx, action):
"""
Project properties down to a subset of the existing properties based on a
predicate `where` which returns true when the function `action` should be
performed. The value returned from `action` replaces the properties of the
feature.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
where = ctx.params.get('where')
source_layer = ctx.params.get('source_layer')
assert source_layer, '_project_properties: missing source layer'
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
geom_types = ctx.params.get('geom_types')
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
if where is not None:
where = compile(where, 'queries.yaml', 'eval')
new_features = []
for feature in layer['features']:
shape, props, fid = feature
# skip some types of geometry
if geom_types and shape.geom_type not in geom_types:
new_features.append((shape, props, fid))
continue
# we're going to use a defaultdict for this, so that references to
# properties which don't exist just end up as None without causing an
# exception. we also add a 'zoom' one. would prefer '$zoom', but
# apparently that's not allowed in python syntax.
local = defaultdict(lambda: None)
local.update(props)
local['zoom'] = zoom
# allow decisions based on meters per pixel zoom too.
meters_per_pixel_area = calc_meters_per_pixel_area(zoom)
local['pixel_area'] = meters_per_pixel_area
if where is None or eval(where, {}, local):
props = action(props)
new_features.append((shape, props, fid))
layer['features'] = new_features
return layer
def drop_properties(ctx):
"""
Drop all configured properties for features in source_layer
"""
properties = ctx.params.get('properties')
all_name_variants = ctx.params.get('all_name_variants', False)
assert properties, 'drop_properties: missing properties'
def action(p):
if all_name_variants and 'name' in properties:
p = _remove_names(p)
return _remove_properties(p, *properties)
return _project_properties(ctx, action)
def drop_names(ctx):
"""
Drop all names on properties for features in this layer.
"""
def action(p):
return _remove_names(p)
return _project_properties(ctx, action)
def remove_zero_area(shape, properties, fid, zoom):
"""
All features get a numeric area tag, but for points this
is zero. The area probably isn't exactly zero, so it's
probably less confusing to just remove the tag to show
that the value is probably closer to "unspecified".
"""
# remove the property if it's present. we _only_ want
# to replace it if it matches the positive, float
# criteria.
area = properties.pop("area", None)
# try to parse a string if the area has been sent as a
# string. it should come through as a float, though,
# since postgres treats it as a real.
if isinstance(area, (str, unicode)):
area = to_float(area)
if area is not None:
# cast to integer to match what we do for polygons.
# also the fractional parts of a sq.m are just
# noise really.
area = int(area)
if area > 0:
properties['area'] = area
return shape, properties, fid
# circumference of the extent of the world in mercator "meters"
_MERCATOR_CIRCUMFERENCE = 40075016.68
# _Deduplicator handles the logic for deduplication. a feature
# is considered a duplicate if it has the same property tuple
# as another and is within a certain distance of the other.
#
# the property tuple is calculated by taking a tuple or list
# of keys and extracting the value of the matching property
# or None. if none_means_unique is true, then if any tuple
# entry is None the feature is considered unique and kept.
#
# note: distance here is measured in coordinate units; i.e:
# mercator meters!
class _Deduplicator:
def __init__(self, property_keys, min_distance,
none_means_unique):
self.property_keys = property_keys
self.min_distance = min_distance
self.none_means_unique = none_means_unique
self.seen_items = dict()
def keep_feature(self, feature):
"""
Returns true if the feature isn't a duplicate, and should
be kept in the output. Otherwise, returns false, as
another feature had the same tuple of values.
"""
shape, props, fid = feature
key = tuple([props.get(k) for k in self.property_keys])
if self.none_means_unique and any([v is None for v in key]):
return True
seen_geoms = self.seen_items.get(key)
if seen_geoms is None:
# first time we've seen this item, so keep it in
# the output.
self.seen_items[key] = [shape]
return True
else:
# if the distance is greater than the minimum set
# for this zoom, then we also keep it.
distance = min([shape.distance(s) for s in seen_geoms])
if distance > self.min_distance:
# this feature is far enough away to count as
# distinct, but keep this geom to suppress any
# other labels nearby.
seen_geoms.append(shape)
return True
else:
# feature is a duplicate
return False
def remove_duplicate_features(ctx):
"""
Removes duplicate features from a layer, or set of layers. The
definition of duplicate is anything which has the same values
for the tuple of values associated with the property_keys.
If `none_means_unique` is set, which it is by default, then a
value of None for *any* of the values in the tuple causes the
feature to be considered unique and completely by-passed. This
is mainly to handle things like features missing their name,
where we don't want to remove all but one unnamed feature.
For example, if property_keys was ['name', 'kind'], then only
the first feature of those with the same value for the name
and kind properties would be kept in the output.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
source_layers = ctx.params.get('source_layers')
start_zoom = ctx.params.get('start_zoom', 0)
property_keys = ctx.params.get('property_keys')
geometry_types = ctx.params.get('geometry_types')
min_distance = ctx.params.get('min_distance', 0.0)
none_means_unique = ctx.params.get('none_means_unique', True)
end_zoom = ctx.params.get('end_zoom')
# can use either a single source layer, or multiple source
# layers, but not both.
assert bool(source_layer) ^ bool(source_layers), \
('remove_duplicate_features: define either source layer or source '
'layers, but not both')
# note that the property keys or geometry types could be empty,
# but then this post-process filter would do nothing. so we
# assume that the user didn't intend this, or they wouldn't have
# included the filter in the first place.
assert property_keys, \
'remove_duplicate_features: missing or empty property keys'
assert geometry_types, \
'remove_duplicate_features: missing or empty geometry types'
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
# allow either a single or multiple layers to be used.
if source_layer:
source_layers = [source_layer]
# correct for zoom: min_distance is given in pixels, but we
# want to do the comparison in coordinate units to avoid
# repeated conversions.
min_distance = (min_distance * _MERCATOR_CIRCUMFERENCE /
float(1 << (zoom + 8)))
# keep a set of the tuple of the property keys. this will tell
# us if the feature is unique while allowing us to maintain the
# sort order by only dropping later, presumably less important,
# features. we keep the geometry of the seen items too, so that
# we can tell if any new feature is significantly far enough
# away that it should be shown again.
deduplicator = _Deduplicator(property_keys, min_distance,
none_means_unique)
for source_layer in source_layers:
layer_index = -1
# because this post-processor can potentially modify
# multiple layers, and that wasn't how the return value
# system was designed, instead it modifies layers
# *in-place*. this is abnormal, and as such requires a
# nice big comment like this!
for index, feature_layer in enumerate(feature_layers):
layer_datum = feature_layer['layer_datum']
layer_name = layer_datum['name']
if layer_name == source_layer:
layer_index = index
break
if layer_index < 0:
# TODO: warn about missing layer when we get the
# ability to log.
continue
layer = feature_layers[layer_index]
new_features = []
for feature in layer['features']:
shape, props, fid = feature
keep_feature = True
if geometry_types is not None and \
shape.geom_type in geometry_types:
keep_feature = deduplicator.keep_feature(feature)
if keep_feature:
new_features.append(feature)
# NOTE! modifying the layer *in-place*.
layer['features'] = new_features
feature_layers[index] = layer
# returning None here would normally indicate that the
# post-processor has done nothing. but because this
# modifies the layers *in-place* then all the return
# value is superfluous.
return None
def merge_duplicate_stations(ctx):
"""
Normalise station names by removing any parenthetical lines
lists at the end (e.g: "Foo St (A, C, E)"). Parse this and
use it to replace the `subway_routes` list if that is empty
or isn't present.
Use the root relation ID, calculated as part of the exploration of the
transit relations, plus the name, now appropriately trimmed, to merge
station POIs together, unioning their subway routes.
Finally, re-sort the features in case the merging has caused
the station POIs to be out-of-order.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
assert source_layer, \
'normalize_and_merge_duplicate_stations: missing source layer'
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
if zoom < start_zoom:
return None
# we probably don't want to do this at higher zooms (e.g: 17 &
# 18), even if there are a bunch of stations very close
# together.
if end_zoom is not None and zoom >= end_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
seen_stations = {}
new_features = []
for feature in layer['features']:
shape, props, fid = feature
kind = props.get('kind')
name = props.get('name')
if name is not None and kind == 'station':
# this should match station names where the name is
# followed by a ()-bracketed list of line names. this
# is common in NYC, and we want to normalise by
# stripping these off and using it to provide the
# list of lines if we haven't already got that info.
m = station_pattern.match(name)
subway_routes = props.get('subway_routes', [])
transit_route_relation_id = props.get(
'mz_transit_root_relation_id')
if m:
# if the lines aren't present or are empty
if not subway_routes:
lines = m.group(2).split(',')
subway_routes = [x.strip() for x in lines]
props['subway_routes'] = subway_routes
# update name so that it doesn't contain all the
# lines.
name = m.group(1).strip()
props['name'] = name
# if the root relation ID is available, then use that for
# identifying duplicates. otherwise, use the name.
key = transit_route_relation_id or name
seen_idx = seen_stations.get(key)
if seen_idx is None:
seen_stations[key] = len(new_features)
# ensure that transit routes is present and is of
# list type for when we append to it later if we
# find a duplicate.
props['subway_routes'] = subway_routes
new_features.append(feature)
else:
# get the properties and append this duplicate's
# transit routes to the list on the original
# feature.
seen_props = new_features[seen_idx][1]
# make sure routes are unique
unique_subway_routes = set(subway_routes) | \
set(seen_props['subway_routes'])
seen_props['subway_routes'] = list(unique_subway_routes)
else:
# not a station, or name is missing - we can't
# de-dup these.
new_features.append(feature)
# might need to re-sort, if we merged any stations:
# removing duplicates would have changed the number
# of routes for each station.
if seen_stations:
sort_pois(new_features, zoom)
layer['features'] = new_features
return layer
def normalize_station_properties(ctx):
"""
Normalise station properties by removing some which are only used
during importance calculation. Stations may also have route
information, which may appear as empty lists. These are
removed. Also, flags are put on the station to indicate what
kind(s) of station it might be.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
assert source_layer, \
'normalize_and_merge_duplicate_stations: missing source layer'
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
if zoom < start_zoom:
return None
# we probably don't want to do this at higher zooms (e.g: 17 &
# 18), even if there are a bunch of stations very close
# together.
if end_zoom is not None and zoom >= end_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
for shape, props, fid in layer['features']:
kind = props.get('kind')
# get rid of temporaries
root_relation_id = props.pop('mz_transit_root_relation_id', None)
props.pop('mz_transit_score', None)
if kind == 'station':
# remove anything that has an empty *_routes
# list, as this most likely indicates that we were
# not able to _detect_ what lines it's part of, as
# it seems unlikely that a station would be part of
# _zero_ routes.
for typ in ['train', 'subway', 'light_rail', 'tram']:
prop_name = '%s_routes' % typ
routes = props.pop(prop_name, [])
if routes:
props[prop_name] = routes
props['is_%s' % typ] = True
# if the station has a root relation ID then include
# that as a way for the client to link together related
# features.
if root_relation_id:
props['root_id'] = root_relation_id
return layer
def _match_props(props, items_matching):
"""
Checks if all the items in `items_matching` are also
present in `props`. If so, returns true. Otherwise
returns false.
Each value in `items_matching` can be a list, in which case the
value from `props` must be any one of those values.
"""
for k, v in items_matching.iteritems():
prop_val = props.get(k)
if isinstance(v, list):
if prop_val not in v:
return False
elif prop_val != v:
return False
return True
def keep_n_features(ctx):
"""
Keep only the first N features matching `items_matching`
in the layer. This is primarily useful for removing
features which are abundant in some places but scarce in
others. Rather than try to set some global threshold which
works well nowhere, instead sort appropriately and take a
number of features which is appropriate per-tile.
This is done by counting each feature which matches _all_
the key-value pairs in `items_matching` and, when the
count is larger than `max_items`, dropping those features.
Only features which are within the unpadded bounds of the
tile are considered for keeping or dropping. Features
entirely outside the bounds of the tile are always kept.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
assert source_layer, 'keep_n_features: missing source layer'
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
items_matching = ctx.params.get('items_matching')
max_items = ctx.params.get('max_items')
unpadded_bounds = Box(*ctx.unpadded_bounds)
# leaving items_matching or max_items as None (or zero)
# would mean that this filter would do nothing, so assume
# that this is really a configuration error.
assert items_matching, 'keep_n_features: missing or empty item match dict'
assert max_items, 'keep_n_features: missing or zero max number of items'
if zoom < start_zoom:
return None
# we probably don't want to do this at higher zooms (e.g: 17 &
# 18), even if there are a bunch of features in the tile, as
# we use the high-zoom tiles for overzooming to 20+, and we'd
# eventually expect to see _everything_.
if end_zoom is not None and zoom >= end_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
count = 0
new_features = []
for shape, props, fid in layer['features']:
keep_feature = True
if _match_props(props, items_matching) and \
shape.intersects(unpadded_bounds):
count += 1
if count > max_items:
keep_feature = False
if keep_feature:
new_features.append((shape, props, fid))
layer['features'] = new_features
return layer
def rank_features(ctx):
"""
Assign a rank to features in `rank_key`.
Enumerate the features matching `items_matching` and insert
the rank as a property with the key `rank_key`. This is
useful for the client, so that it can selectively display
only the top features, or de-emphasise the later features.
Note that only features within in the unpadded bounds are ranked.
Features entirely outside the bounds of the tile are not modified.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
assert source_layer, 'rank_features: missing source layer'
start_zoom = ctx.params.get('start_zoom', 0)
items_matching = ctx.params.get('items_matching')
rank_key = ctx.params.get('rank_key')
unpadded_bounds_shp = Box(*ctx.unpadded_bounds)
# leaving items_matching or rank_key as None would mean
# that this filter would do nothing, so assume that this
# is really a configuration error.
assert items_matching, 'rank_features: missing or empty item match dict'
assert rank_key, 'rank_features: missing or empty rank key'
if zoom < start_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
count = 0
for shape, props, fid in layer['features']:
if (_match_props(props, items_matching) and
unpadded_bounds_shp.intersects(shape)):
count += 1
props[rank_key] = count
return layer
def normalize_aerialways(shape, props, fid, zoom):
aerialway = props.get('aerialway')
# normalise cableway, apparently a deprecated
# value.
if aerialway == 'cableway':
props['aerialway'] = 'zip_line'
# 'yes' is a pretty unhelpful value, so normalise
# to a slightly more meaningful 'unknown', which
# is also a commonly-used value.
if aerialway == 'yes':
props['aerialway'] = 'unknown'
return shape, props, fid
def numeric_min_filter(ctx):
"""
Keep only features which have properties equal or greater
than the configured minima. These are in a dict per zoom
like this:
{ 15: { 'area': 1000 }, 16: { 'area': 2000 } }
This would mean that at zooms 15 and 16, the filter was
active. At other zooms it would do nothing.
Multiple filters can be given for a single zoom. The
`mode` parameter can be set to 'any' to require that only
one of the filters needs to match, or any other value to
use the default 'all', which requires all filters to
match.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
assert source_layer, 'rank_features: missing source layer'
filters = ctx.params.get('filters')
mode = ctx.params.get('mode')
# assume missing filter is a config error.
assert filters, 'numeric_min_filter: missing or empty filters dict'
# get the minimum filters for this zoom, and return if
# there are none to apply.
minima = filters.get(zoom)
if not minima:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
# choose whether all minima have to be met, or just
# one of them.
aggregate_func = all
if mode == 'any':
aggregate_func = any
new_features = []
for shape, props, fid in layer['features']:
keep = []
for prop, min_val in minima.iteritems():
val = props.get(prop)
keep.append(val >= min_val)
if aggregate_func(keep):
new_features.append((shape, props, fid))
layer['features'] = new_features
return layer
def copy_features(ctx):
"""
Copy features matching _both_ the `where` selection and the
`geometry_types` list to another layer. If the target layer
doesn't exist, it is created.
"""
feature_layers = ctx.feature_layers
source_layer = ctx.params.get('source_layer')
target_layer = ctx.params.get('target_layer')
where = ctx.params.get('where')
geometry_types = ctx.params.get('geometry_types')
assert source_layer, 'copy_features: source layer not configured'
assert target_layer, 'copy_features: target layer not configured'
assert where, \
('copy_features: you must specify how to match features in the where '
'parameter')
assert geometry_types, \
('copy_features: you must specify at least one type of geometry in '
'geometry_types')
src_layer = _find_layer(feature_layers, source_layer)
if src_layer is None:
return None
tgt_layer = _find_layer(feature_layers, target_layer)
if tgt_layer is None:
# create target layer if it doesn't already exist.
tgt_layer_datum = src_layer['layer_datum'].copy()
tgt_layer_datum['name'] = target_layer
tgt_layer = src_layer.copy()
tgt_layer['name'] = target_layer
tgt_layer['features'] = []
tgt_layer['layer_datum'] = tgt_layer_datum
new_features = []
for feature in src_layer['features']:
shape, props, fid = feature
if _match_props(props, where):
# need to deep copy, otherwise we could have some
# unintended side effects if either layer is
# mutated later on.
shape_copy = shape.__class__(shape)
new_features.append((shape_copy, props.copy(), fid))
tgt_layer['features'].extend(new_features)
return tgt_layer
def make_representative_point(shape, properties, fid, zoom):
"""
Replaces the geometry of each feature with its
representative point. This is a point which should be
within the interior of the geometry, which can be
important for labelling concave or doughnut-shaped
polygons.
"""
label_placement_wkb = properties.get('mz_label_placement', None)
if label_placement_wkb:
shape = shapely.wkb.loads(label_placement_wkb)
else:
shape = shape.representative_point()
return shape, properties, fid
def add_iata_code_to_airports(shape, properties, fid, zoom):
"""
If the feature is an airport, and it has a 3-character
IATA code in its tags, then move that code to its
properties.
"""
kind = properties.get('kind')
if kind not in ('aerodrome', 'airport'):
return shape, properties, fid
tags = properties.get('tags')
if not tags:
return shape, properties, fid
iata_code = tags.get('iata')
if not iata_code:
return shape, properties, fid
# IATA codes should be uppercase, and most are, but there
# might be some in lowercase, so just normalise to upper
# here.
iata_code = iata_code.upper()
if iata_short_code_pattern.match(iata_code):
properties['iata'] = iata_code
return shape, properties, fid
def add_uic_ref(shape, properties, fid, zoom):
"""
If the feature has a valid uic_ref tag (7 integers), then move it
to its properties.
"""
tags = properties.get('tags')
if not tags:
return shape, properties, fid
uic_ref = tags.get('uic_ref')
if not uic_ref:
return shape, properties, fid
uic_ref = uic_ref.strip()
if len(uic_ref) != 7:
return shape, properties, fid
try:
uic_ref_int = int(uic_ref)
except ValueError:
return shape, properties, fid
else:
properties['uic_ref'] = uic_ref_int
return shape, properties, fid
def _freeze(thing):
"""
Freezes something to a hashable item.
"""
if isinstance(thing, dict):
return frozenset([(_freeze(k), _freeze(v)) for k, v in thing.items()])
elif isinstance(thing, list):
return tuple([_freeze(i) for i in thing])
return thing
def _thaw(thing):
"""
Reverse of the freeze operation.
"""
if isinstance(thing, frozenset):
return dict([_thaw(i) for i in thing])
elif isinstance(thing, tuple):
return list([_thaw(i) for i in thing])
return thing
def quantize_val(val, step):
# special case: if val is very small, we don't want it rounding to zero, so
# round the smallest values up to the first step.
if val < step:
return int(step)
result = int(step * round(val / float(step)))
return result
def quantize_height_round_nearest_5_meters(height):
return quantize_val(height, 5)
def quantize_height_round_nearest_10_meters(height):
return quantize_val(height, 10)
def quantize_height_round_nearest_20_meters(height):
return quantize_val(height, 20)
def quantize_height_round_nearest_meter(height):
return round(height)
def _merge_lines(linestring_shapes, _unused_tolerance):
list_of_linestrings = []
for shape in linestring_shapes:
list_of_linestrings.extend(_flatten_geoms(shape))
# if the list of linestrings is empty, return None. this avoids generating
# an empty GeometryCollection, which causes problems further down the line,
# usually while formatting the tile.
if not list_of_linestrings:
return []
multi = MultiLineString(list_of_linestrings)
result = _linemerge(multi)
return [result]
def _drop_small_inners_multi(shape, area_tolerance):
"""
Drop inner rings (holes) of the given shape which are smaller than the area
tolerance. The shape must be either a Polygon or MultiPolygon. Returns a
shape which may be empty.
"""
from shapely.geometry import MultiPolygon
if shape.geom_type == 'Polygon':
shape = _drop_small_inners(shape, area_tolerance)
elif shape.geom_type == 'MultiPolygon':
multi = []
for poly in shape:
new_poly = _drop_small_inners(poly, area_tolerance)
if not new_poly.is_empty:
multi.append(new_poly)
shape = MultiPolygon(multi)
else:
shape = MultiPolygon([])
return shape
def _drop_small_outers_multi(shape, area_tolerance):
"""
Drop individual polygons which are smaller than the area tolerance. Input
can be a single Polygon or MultiPolygon, in which case each Polygon within
the MultiPolygon will be compared to the area tolerance individually.
Returns a shape, which may be empty.
"""
from shapely.geometry import MultiPolygon
if shape.geom_type == 'Polygon':
if shape.area < area_tolerance:
shape = MultiPolygon([])
elif shape.geom_type == 'MultiPolygon':
multi = []
for poly in shape:
if poly.area >= area_tolerance:
multi.append(poly)
shape = MultiPolygon(multi)
else:
shape = MultiPolygon([])
return shape
def _merge_polygons(polygon_shapes, tolerance):
"""
Merge a list of polygons together into a single shape. Returns list of
shapes, which might be empty.
"""
list_of_polys = []
for shape in polygon_shapes:
list_of_polys.extend(_flatten_geoms(shape))
# if the list of polygons is empty, return None. this avoids generating an
# empty GeometryCollection, which causes problems further down the line,
# usually while formatting the tile.
if not list_of_polys:
return []
# first, try to merge the polygons as they are.
try:
result = shapely.ops.unary_union(list_of_polys)
return [result]
except ValueError:
pass
# however, this can lead to numerical instability where polygons _almost_
# touch, so sometimes buffering them outwards a little bit can help.
try:
from shapely.geometry import JOIN_STYLE
# don't buffer by the full pixel, instead choose a smaller value that
# shouldn't be noticable.
buffer_size = tolerance / 16.0
list_of_buffered = [
p.buffer(buffer_size, join_style=JOIN_STYLE.mitre, mitre_limit=1.5)
for p in list_of_polys
]
result = shapely.ops.unary_union(list_of_buffered)
return [result]
except ValueError:
pass
# ultimately, if it's not possible to merge them then bail.
# TODO: when we get a logger in here, let's log a big FAIL message.
return []
def _merge_polygons_with_buffer(polygon_shapes, tolerance):
"""
Merges polygons together with a buffer operation to blend together
adjacent polygons. Originally designed for buildings.
It does this by first merging the polygons into a single MultiPolygon and
then dilating or buffering the polygons by a small amount (tolerance). The
shape is then simplified, small inners are dropped and it is shrunk back
by the same amount it was dilated by. Finally, small polygons are dropped.
Many cities around the world have dense buildings in blocks, but these
buildings can be quite detailed; having complex facades or interior
courtyards or lightwells. As we zoom out, we often would like to keep the
"visual texture" of the buildings, but reducing the level of detail
significantly. This method aims to get closer to that, merging neighbouring
buildings together into blocks.
"""
from shapely.geometry import JOIN_STYLE
area_tolerance = tolerance * tolerance
# small factor, relative to tolerance. this is used so that we don't buffer
# polygons out by exactly the same amount as we buffer them inwards. using
# the exact same value ends up causing topology problems when two points on
# opposing sides of the polygon meet eachother exactly.
epsilon = tolerance * 1.0e-6
result = _merge_polygons(polygon_shapes, tolerance)
if not result:
return result
assert len(result) == 1
result = result[0]
# buffer with a mitre join, as this keeps the corners sharp and (mostly)
# keeps angles the same. to avoid spikes, we limit the mitre to a little
# under 90 degrees.
result = result.buffer(
tolerance - epsilon, join_style=JOIN_STYLE.mitre, mitre_limit=1.5)
result = result.simplify(tolerance)
result = _drop_small_inners_multi(result, area_tolerance)
result = result.buffer(
-tolerance, join_style=JOIN_STYLE.mitre, mitre_limit=1.5)
result = _drop_small_outers_multi(result, area_tolerance)
# don't return invalid results!
if result.is_empty or not result.is_valid:
return []
return [result]
def _union_bounds(a, b):
"""
Union two (minx, miny, maxx, maxy) tuples of bounds, returning a tuple
which covers both inputs.
"""
if a is None:
return b
elif b is None:
return a
else:
aminx, aminy, amaxx, amaxy = a
bminx, bminy, bmaxx, bmaxy = b
return (min(aminx, bminx), min(aminy, bminy),
max(amaxx, bmaxx), max(amaxy, bmaxy))
def _intersects_bounds(a, b):
"""
Return true if two bounding boxes intersect.
"""
aminx, aminy, amaxx, amaxy = a
bminx, bminy, bmaxx, bmaxy = b
if aminx > bmaxx or amaxx < bminx:
return False
elif aminy > bmaxy or amaxy < bminy:
return False
return True
# RecursiveMerger is a set of functions to merge geometry recursively in a
# quad tree.
#
# It consists of three functions, any of which can be `id` for a no-op, and
# all of which take a single argument which will be a list of shapes, and
# should return a list of shapes as output.
#
# * leaf: called at the leaves of the quad tree with original geometry.
# * node: called at internal nodes of the quad tree with the results of
# either calls to leaf() or node().
# * root: called once at the root with the results of the top node (or leaf
# if it's a degenerate single-level tree).
# * tolerance: a length that is approximately a pixel, or the size by which
# things can be simplified or snapped to.
#
# These allow us to merge transformed versions of geometry, where leaf()
# transforms the geometry to some other form (e.g: buffered for buildings),
# node merges those recursively, and then root reverses the buffering.
#
RecursiveMerger = namedtuple('RecursiveMerger', 'leaf node root tolerance')
# A bucket used to sort shapes into the next level of the quad tree.
Bucket = namedtuple("Bucket", "bounds box shapes")
def _mkbucket(*bounds):
"""
Convenience method to make a bucket from a tuple of bounds (minx, miny,
maxx, maxy) and also make the Shapely shape for that.
"""
from shapely.geometry import box
return Bucket(bounds, box(*bounds), [])
def _merge_shapes_recursively(shapes, shapes_per_merge, merger, depth=0,
bounds=None):
"""
Group the shapes geographically, returning a list of shapes. The merger,
which must be a RecursiveMerger, controls how the shapes are merged.
This is to help merging/unioning, where it's better to try and merge shapes
which are adjacent or near each other, rather than just taking a slice of
a list of shapes which might be in any order.
The shapes_per_merge controls at what depth the tree starts merging.
Smaller values mean a deeper tree, which might increase performance if
merging large numbers of items at once is slow.
This method is recursive, and will bottom out after 5 levels deep, which
might mean that sometimes more than shapes_per_merge items are merged at
once.
"""
assert isinstance(merger, RecursiveMerger)
# don't keep recursing. if we haven't been able to get to a smaller number
# of shapes by 5 levels down, then perhaps there are particularly large
# shapes which are preventing things getting split up correctly.
if len(shapes) <= shapes_per_merge and depth == 0:
return merger.root(merger.leaf(shapes, merger.tolerance))
elif depth >= 5:
return merger.leaf(shapes, merger.tolerance)
# on the first call, figure out what the bounds of the shapes are. when
# recursing, use the bounds passed in from the parent.
if bounds is None:
for shape in shapes:
bounds = _union_bounds(bounds, shape.bounds)
minx, miny, maxx, maxy = bounds
midx = 0.5 * (minx + maxx)
midy = 0.5 * (miny + maxy)
# find the 4 quadrants of the bounding box and use those to bucket the
# shapes so that neighbouring shapes are more likely to stay together.
buckets = [
_mkbucket(minx, miny, midx, midy),
_mkbucket(minx, midy, midx, maxy),
_mkbucket(midx, miny, maxx, midy),
_mkbucket(midx, midy, maxx, maxy),
]
for shape in shapes:
for bucket in buckets:
if shape.intersects(bucket.box):
bucket.shapes.append(shape)
break
else:
raise AssertionError(
"Expected shape %r to intersect at least one quadrant, but "
"intersects none." % (shape.wkt))
# recurse if necessary to get below the number of shapes per merge that
# we want.
grouped_shapes = []
for bucket in buckets:
if len(bucket.shapes) > shapes_per_merge:
recursed = _merge_shapes_recursively(
bucket.shapes, shapes_per_merge, merger,
depth=depth+1, bounds=bucket.bounds)
grouped_shapes.extend(recursed)
# don't add empty lists!
elif bucket.shapes:
grouped_shapes.extend(merger.leaf(bucket.shapes, merger.tolerance))
fn = merger.root if depth == 0 else merger.node
return fn(grouped_shapes)
def _noop(x):
return x
def _merge_features_by_property(
features, geom_dim, tolerance,
update_props_pre_fn=None,
update_props_post_fn=None,
max_merged_features=None,
merge_shape_fn=None,
merge_props_fn=None):
assert geom_dim in (_POLYGON_DIMENSION, _LINE_DIMENSION)
if merge_shape_fn is not None:
_merge_shape_fn = merge_shape_fn
elif geom_dim == _LINE_DIMENSION:
_merge_shape_fn = _merge_lines
else:
_merge_shape_fn = _merge_polygons
features_by_property = {}
skipped_features = []
for feature in features:
shape, props, fid = feature
shape_dim = _geom_dimensions(shape)
if shape_dim != geom_dim:
skipped_features.append(feature)
continue
orig_props = props.copy()
p_id = props.pop('id', None)
if update_props_pre_fn:
props = update_props_pre_fn((shape, props, fid))
if props is None:
skipped_features.append((shape, orig_props, fid))
continue
frozen_props = _freeze(props)
if frozen_props in features_by_property:
record = features_by_property[frozen_props]
record[-1].append(shape)
record[-2].append(orig_props)
else:
features_by_property[frozen_props] = (
(fid, p_id, [orig_props], [shape]))
new_features = []
for frozen_props, (fid, p_id, orig_props, shapes) in \
features_by_property.iteritems():
if len(shapes) == 1:
# restore original properties if we only have a single shape
new_features.append((shapes[0], orig_props[0], fid))
continue
num_shapes = len(shapes)
shapes_per_merge = num_shapes
if max_merged_features and max_merged_features < shapes_per_merge:
shapes_per_merge = max_merged_features
# reset fid if we're going to split up features, as we don't want
# them all to have duplicate IDs.
fid = None
merger = RecursiveMerger(root=_noop, node=_noop, leaf=_merge_shape_fn,
tolerance=tolerance)
for merged_shape in _merge_shapes_recursively(
shapes, shapes_per_merge, merger):
# don't keep any features which have become degenerate or empty
# after having been merged.
if merged_shape is None or merged_shape.is_empty:
continue
if merge_props_fn is None:
# thaw the frozen properties to use in the new feature.
props = _thaw(frozen_props)
else:
props = merge_props_fn(orig_props)
if update_props_post_fn:
props = update_props_post_fn((merged_shape, props, fid))
new_features.append((merged_shape, props, fid))
new_features.extend(skipped_features)
return new_features
def quantize_height(ctx):
"""
Quantize the height property of features in the layer according to the
per-zoom configured quantize function.
"""
params = _Params(ctx, 'quantize_height')
zoom = ctx.nominal_zoom
source_layer = params.required('source_layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
quantize_cfg = params.required('quantize', typ=dict)
layer = _find_layer(ctx.feature_layers, source_layer)
if layer is None:
return None
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
quantize_fn_dotted_name = quantize_cfg.get(zoom)
if not quantize_fn_dotted_name:
# no changes at this zoom
return None
quantize_height_fn = resolve(quantize_fn_dotted_name)
for shape, props, fid in layer['features']:
height = props.get('height', None)
if height is not None:
props['height'] = quantize_height_fn(height)
return None
def merge_building_features(ctx):
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
drop = ctx.params.get('drop')
exclusions = ctx.params.get('exclude')
max_merged_features = ctx.params.get('max_merged_features')
assert source_layer, 'merge_building_features: missing source layer'
layer = _find_layer(ctx.feature_layers, source_layer)
if layer is None:
return None
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
# this formula seems to give a good balance between larger values, which
# merge more but can merge everything into a blob if too large, and small
# values which retain detail.
tolerance = min(5, 0.4 * tolerance_for_zoom(zoom))
def _props_pre((shape, props, fid)):
if exclusions:
for prop in exclusions:
if prop in props:
return None
# also drop building properties that we won't want to consider
# for merging. area and volume will be re-calculated afterwards
props.pop('area', None)
props.pop('volume', None)
if drop:
for prop in drop:
props.pop(prop, None)
return props
def _props_post((merged_shape, props, fid)):
# add the area and volume back in
area = int(merged_shape.area)
props['area'] = area
height = props.get('height')
if height is not None:
props['volume'] = height * area
return props
layer['features'] = _merge_features_by_property(
layer['features'], _POLYGON_DIMENSION, tolerance, _props_pre,
_props_post, max_merged_features,
merge_shape_fn=_merge_polygons_with_buffer)
return layer
def merge_polygon_features(ctx):
"""
Merge polygons having the same properties, apart from 'id' and 'area', in
the source_layer between start_zoom and end_zoom inclusive.
Area is re-calculated post-merge and IDs are preserved for features which
are unique in the merge.
"""
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
merge_min_zooms = ctx.params.get('merge_min_zooms', False)
buffer_merge = ctx.params.get('buffer_merge', False)
buffer_merge_tolerance = ctx.params.get('buffer_merge_tolerance')
assert source_layer, 'merge_polygon_features: missing source layer'
layer = _find_layer(ctx.feature_layers, source_layer)
if layer is None:
return None
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
tfz = tolerance_for_zoom(zoom)
if buffer_merge_tolerance:
tolerance = eval(buffer_merge_tolerance, {}, {
'tolerance_for_zoom': tfz,
})
else:
tolerance = tfz
def _props_pre((shape, props, fid)):
# drop area while merging, as we'll recalculate after.
props.pop('area', None)
if merge_min_zooms:
props.pop('min_zoom', None)
return props
def _props_post((merged_shape, props, fid)):
# add the area back in
area = int(merged_shape.area)
props['area'] = area
return props
def _props_merge(all_props):
merged_props = None
for props in all_props:
if merged_props is None:
merged_props = props.copy()
else:
min_zoom = props.get('min_zoom')
merged_min_zoom = merged_props.get('min_zoom')
if min_zoom and (merged_min_zoom is None or
min_zoom < merged_min_zoom):
merged_props['min_zoom'] = min_zoom
return merged_props
merge_props_fn = _props_merge if merge_min_zooms else None
merge_shape_fn = _merge_polygons_with_buffer if buffer_merge else None
layer['features'] = _merge_features_by_property(
layer['features'], _POLYGON_DIMENSION, tolerance, _props_pre,
_props_post, merge_props_fn=merge_props_fn,
merge_shape_fn=merge_shape_fn)
return layer
def _angle_at(linestring, pt):
import math
if pt == linestring.coords[0]:
nx = linestring.coords[1]
elif pt == linestring.coords[-1]:
nx = pt
pt = linestring.coords[-2]
else:
assert False, "Expected point to be first or last"
if nx == pt:
return None
dx = nx[0] - pt[0]
dy = nx[1] - pt[1]
if dy < 0.0:
dx = -dx
dy = -dy
a = math.atan2(dy, dx) / math.pi * 180.0
# wrap around at exactly 180, because we don't care about the direction of
# the road, only what angle the line is at, and 180 is horizontal same as
# 0.
if a == 180.0:
a = 0.0
assert 0 <= a < 180
return a
def _junction_merge_candidates(ids, geoms, pt, angle_tolerance):
# find the angles at which the lines join the point
angles = []
for i in ids:
a = _angle_at(geoms[i], pt)
if a is not None:
angles.append((a, i))
# turn that into an angle->index associative list, so
# that we can tell which are the closest pair of angles.
angles.sort()
# list of pairs of ids, candidates to be merged.
candidates = []
# loop over the list, removing the closest pair, as long
# as they're within the tolerance angle of eachother.
while len(angles) > 1:
min_angle = None
for j in xrange(0, len(angles)):
angle1, idx1 = angles[j]
angle0, idx0 = angles[j-1]
# usually > 0 since angles are sorted, but might be negative
# on the first index (angles[-1]). note that, since we're
# taking the non-directional angle, the result should be
# between 0 and 180.
delta_angle = angle1 - angle0
if delta_angle < 0:
delta_angle += 180
if min_angle is None or delta_angle < min_angle[0]:
min_angle = (delta_angle, j)
if min_angle[0] >= angle_tolerance or min_angle is None:
break
candidates.append((angles[j][1], angles[j-1][1]))
del angles[j]
del angles[j-1]
return candidates
def _merge_junctions_in_multilinestring(mls, angle_tolerance):
"""
Merge LineStrings within a MultiLineString across junctions where more
than two lines meet and the lines appear to continue across the junction
at the same angle.
The angle_tolerance (in degrees) is used to judge whether two lines
look like they continue across a junction.
Returns a new shape.
"""
endpoints = defaultdict(list)
for i, ls in enumerate(mls.geoms):
endpoints[ls.coords[0]].append(i)
endpoints[ls.coords[-1]].append(i)
seen = set()
merged_geoms = []
for pt, ids in endpoints.iteritems():
# we can't merge unless we've got at least 2 lines!
if len(ids) < 2:
continue
candidates = _junction_merge_candidates(
ids, mls.geoms, pt, angle_tolerance)
for a, b in candidates:
if a not in seen and b not in seen and a != b:
merged = linemerge(MultiLineString(
[mls.geoms[a], mls.geoms[b]]))
if merged.geom_type == 'LineString':
merged_geoms.append(merged)
seen.add(a)
seen.add(b)
elif (merged.geom_type == 'MultiLineString' and
len(merged.geoms) == 1):
merged_geoms.append(merged.geoms[0])
seen.add(a)
seen.add(b)
# add back any left over linestrings which didn't get merged.
for i, ls in enumerate(mls.geoms):
if i not in seen:
merged_geoms.append(ls)
if len(merged_geoms) == 1:
return merged_geoms[0]
else:
return MultiLineString(merged_geoms)
def _loop_merge_junctions(geom, angle_tolerance):
"""
Keep applying junction merging to the MultiLineString until there are no
merge opportunities left.
A single merge step will only carry out one merge per LineString, which
means that the other endpoint might miss out on a possible merge. So we
loop over the merge until all opportunities are exhausted: either we end
up with a single LineString or we run a step and it fails to merge any
candidates.
For a total number of possible merges, N, we could potentially be left
with two thirds of these left over, depending on the order of the
candidates. This means we should need only O(log N) steps to merge them
all.
"""
if geom.geom_type != 'MultiLineString':
return geom
# keep track of the number of linestrings in the multilinestring. we'll
# use that to figure out if we've merged as much as we possibly can.
mls_size = len(geom.geoms)
while True:
geom = _merge_junctions_in_multilinestring(geom, angle_tolerance)
# merged everything down to a single linestring
if geom.geom_type == 'LineString':
break
# made no progress
elif len(geom.geoms) == mls_size:
break
assert len(geom.geoms) < mls_size, \
"Number of geometries should stay the same or reduce after merge."
# otherwise, keep looping
mls_size = len(geom.geoms)
return geom
def _simplify_line_collection(shape, tolerance):
"""
Calling simplify on a MultiLineString doesn't always simplify if it would
make the MultiLineString non-simple.
However, we're trying to sort linestrings into nonoverlapping sets, and we
don't care whether they overlap at this point. However, we do want to make
sure that any colinear points in the individual LineStrings are removed.
"""
if shape.geom_type == 'LineString':
shape = shape.simplify(tolerance)
elif shape.geom_type == 'MultiLineString':
new_geoms = []
for geom in shape.geoms:
new_geoms.append(geom.simplify(tolerance))
shape = MultiLineString(new_geoms)
return shape
def _merge_junctions(features, angle_tolerance, simplify_tolerance,
split_threshold):
"""
Merge LineStrings within MultiLineStrings within features across junction
boundaries where the lines appear to continue at the same angle.
If simplify_tolerance is provided, apply a simplification step. This can
help to remove colinear junction points left over from any merging.
Finally, group the lines into non-overlapping sets, each of which generates
a separate MultiLineString feature to ensure they're already simple and
further geometric operations won't re-introduce intersection points.
Large linestrings, with more than split_threshold members, use a slightly
different algorithm which is more efficient at very large sizes.
Returns a new list of features.
"""
new_features = []
for shape, props, fid in features:
if shape.geom_type == 'MultiLineString':
shape = _loop_merge_junctions(shape, angle_tolerance)
if simplify_tolerance > 0.0:
shape = _simplify_line_collection(shape, simplify_tolerance)
if shape.geom_type == 'MultiLineString':
disjoint_shapes = _linestring_nonoverlapping_partition(
shape, split_threshold)
for disjoint_shape in disjoint_shapes:
new_features.append((disjoint_shape, props, None))
else:
new_features.append((shape, props, fid))
return new_features
def _first_positive_integer_not_in(s):
"""
Given a set of positive integers, s, return the smallest positive integer
which is _not_ in s.
For example:
>>> _first_positive_integer_not_in(set())
1
>>> _first_positive_integer_not_in(set([1]))
2
>>> _first_positive_integer_not_in(set([1,3,4]))
2
>>> _first_positive_integer_not_in(set([1,2,3,4]))
5
"""
if len(s) == 0:
return 1
last = max(s)
for i in xrange(1, last):
if i not in s:
return i
return last + 1
# utility class so that we can store the array index of the geometry
# inside the shape index.
class _geom_with_index(object):
def __init__(self, geom, index):
self.geom = geom
self.index = index
self._geom = geom._geom
self.is_empty = geom.is_empty
class OrderedSTRTree(object):
"""
An STR-tree geometry index which remembers the array index of the
geometries it was built with, and only returns geometries with lower
indices when queried.
This is used as a substitute for a dynamic index, where we'd be able
to add new geometries as the algorithm progressed.
"""
def __init__(self, geoms):
self.shape_index = STRtree([
_geom_with_index(g, i) for i, g in enumerate(geoms)
])
def query(self, shape, idx):
"""
Return the index elements which have bounding boxes intersecting the
given shape _and_ have array indices less than idx.
"""
for geom in self.shape_index.query(shape):
if geom.index < idx:
yield geom
class SplitOrderedSTRTree(object):
"""
An ordered STR-tree index which splits the geometries it is managing.
This is a simple, first-order approximation to a dynamic index. If the
input geometries are sorted by increasing size, then the "small" first
section are much less likely to overlap, and we know we're not interested
in anything in the "big" section because the index isn't large enough.
This should cut down the number of expensive queries, as well as the
number of subsequent intersection tests to check if the shapes within the
bounding boxes intersect.
"""
def __init__(self, geoms):
split = int(0.75 * len(geoms))
self.small_index = STRtree([
_geom_with_index(g, i) for i, g in enumerate(geoms[0:split])
])
self.big_index = STRtree([
_geom_with_index(g, i + split) for i, g in enumerate(geoms[split:])
])
self.split = split
def query(self, shape, i):
for geom in self.small_index.query(shape):
if geom.index < i:
yield geom
# don't need to query the big index at all unless i >= split. this
# should cut down on the number of yielded items that need further
# intersection tests.
if i >= self.split:
for geom in self.big_index.query(shape):
if geom.index < i:
yield geom
def _linestring_nonoverlapping_partition(mls, split_threshold=15000):
"""
Given a MultiLineString input, returns a list of MultiLineStrings
which are individually simple, but cover all the points in the
input MultiLineString.
The OGC definition of a MultiLineString says it's _simple_ if it
consists of simple LineStrings and the LineStrings only meet each
other at their endpoints. This means that anything which makes
MultiLineStrings simple is going to insert intersections between
crossing lines, and decompose them into separate LineStrings.
In general we _do not want_ this behaviour, as it prevents
simplification and results in more points in the geometry. However,
there are many operations which will result in simple outputs, such
as intersections and unions. Therefore, we would prefer to take the
hit of having multiple features, if the features can be decomposed
in such a way that they are individually simple.
"""
# only interested in MultiLineStrings for this method!
assert mls.geom_type == 'MultiLineString'
# simple (and sub-optimal) greedy algorithm for making sure that
# linestrings don't intersect: put each into the first bucket which
# doesn't already contain a linestring which intersects it.
#
# this will be suboptimal. for example:
#
# 2 4
# | |
# 3 ---+-+---
# | |
# 1 -----+---
# |
#
# (lines 1 & 2 do _not_ intersect).
#
# the greedy algorithm will use 3 buckets, as it'll put lines 1 & 2 in
# the same bucket, forcing 3 & 4 into individual buckets for a total
# of 3 buckets. optimally, we can bucket 1 & 3 together and 2 & 4
# together to only use 2 buckets. however, making this optimal seems
# like it might be a Hard problem.
#
# note that we don't create physical buckets, but assign each shape a
# bucket ID which hasn't been assigned to any other intersecting shape.
# we can assign these in an arbitrary order, and use an index to reduce
# the number of intersection tests needed down to O(n log n). this can
# matter quite a lot at low zooms, where it's possible to get 150,000
# tiny road segments in a single shape!
# sort the geometries before we use them. this can help if we sort things
# which have fewer intersections towards the front of the array, so that
# they can be done more quickly.
def _bbox_area(geom):
minx, miny, maxx, maxy = geom.bounds
return (maxx - minx) * (maxy - miny)
# if there's a large number of geoms, switch to the split index and sort
# so that the spatially largest objects are towards the end of the list.
# this should make it more likely that earlier queries are fast.
if len(mls.geoms) > split_threshold:
geoms = sorted(mls.geoms, key=_bbox_area)
shape_index = SplitOrderedSTRTree(geoms)
else:
geoms = mls.geoms
shape_index = OrderedSTRTree(geoms)
# first, assign everything the "null" bucket with index zero. this means
# we haven't gotten around to it yet, and we can use it as a sentinel
# value to check for logic errors.
bucket_for_shape = [0] * len(geoms)
for idx, shape in enumerate(geoms):
overlapping_buckets = set()
# assign the lowest bucket ID that hasn't been assigned to any
# overlapping shape with a lower index. this is because:
# 1. any overlapping shape would cause the insertion of a point if it
# were allowed in this bucket, and
# 2. we're assigning in-order, so shapes at higher array indexes will
# still be assigned to the null bucket. we'll get to them later!
for indexed_shape in shape_index.query(shape, idx):
if indexed_shape.geom.intersects(shape):
bucket = bucket_for_shape[indexed_shape.index]
assert bucket > 0
overlapping_buckets.add(bucket)
bucket_for_shape[idx] = _first_positive_integer_not_in(
overlapping_buckets)
results = []
for bucket_id in set(bucket_for_shape):
# by this point, no shape should be assigned to the null bucket any
# more.
assert bucket_id > 0
# collect all the shapes which have been assigned to this bucket.
shapes = []
for idx, shape in enumerate(geoms):
if bucket_for_shape[idx] == bucket_id:
shapes.append(shape)
if len(shapes) == 1:
results.append(shapes[0])
else:
results.append(MultiLineString(shapes))
return results
def _drop_short_segments_from_multi(tolerance, mls):
return MultiLineString(
[g for g in mls.geoms if g.length >= tolerance])
def _drop_short_segments(tolerance, features):
new_features = []
for shape, props, fid in features:
if shape.geom_type == 'MultiLineString':
shape = _drop_short_segments_from_multi(tolerance, shape)
elif shape.geom_type == 'LineString':
if shape.length < tolerance:
shape = None
if shape and not shape.is_empty:
new_features.append((shape, props, fid))
return new_features
def merge_line_features(ctx):
"""
Merge linestrings having the same properties, in the source_layer
between start_zoom and end_zoom inclusive.
By default, will not merge features across points where more than
two lines meet. If you set merge_junctions, then it will try to
merge where the line looks contiguous.
"""
params = _Params(ctx, 'merge_line_features')
zoom = ctx.nominal_zoom
source_layer = params.required('source_layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
merge_junctions = params.optional(
'merge_junctions', default=False, typ=bool)
junction_angle_tolerance = params.optional(
'merge_junction_angle', default=15.0, typ=float)
drop_short_segments = params.optional(
'drop_short_segments', default=False, typ=bool)
short_segment_factor = params.optional(
'drop_length_pixels', default=0.1, typ=float)
simplify_tolerance = params.optional(
'simplify_tolerance', default=0.0, typ=float)
split_threshold = params.optional(
'split_threshold', default=15000, typ=int)
assert source_layer, 'merge_line_features: missing source layer'
layer = _find_layer(ctx.feature_layers, source_layer)
if layer is None:
return None
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
layer['features'] = _merge_features_by_property(
layer['features'], _LINE_DIMENSION, simplify_tolerance)
if drop_short_segments:
tolerance = short_segment_factor * tolerance_for_zoom(zoom)
layer['features'] = _drop_short_segments(
tolerance, layer['features'])
if merge_junctions:
layer['features'] = _merge_junctions(
layer['features'], junction_angle_tolerance, simplify_tolerance,
split_threshold)
return layer
def normalize_tourism_kind(shape, properties, fid, zoom):
"""
There are many tourism-related tags, including 'zoo=*' and
'attraction=*' in addition to 'tourism=*'. This function promotes
things with zoo and attraction tags have those values as their
main kind.
See https://github.com/mapzen/vector-datasource/issues/440 for more details.
""" # noqa
zoo = properties.pop('zoo', None)
if zoo is not None:
properties['kind'] = zoo
properties['tourism'] = 'attraction'
return (shape, properties, fid)
attraction = properties.pop('attraction', None)
if attraction is not None:
properties['kind'] = attraction
properties['tourism'] = 'attraction'
return (shape, properties, fid)
return (shape, properties, fid)
# a whitelist of the most common fence types from OSM.
# see https://taginfo.openstreetmap.org/keys/fence_type#values
_WHITELIST_FENCE_TYPES = set([
'avalanche',
'barbed_wire',
'bars',
'brick', # some might say a fence made of brick is called a wall...
'chain',
'chain_link',
'concrete',
'drystone_wall',
'electric',
'grate',
'hedge',
'metal',
'metal_bars',
'net',
'pole',
'railing',
'railings',
'split_rail',
'steel',
'stone',
'wall',
'wire',
'wood',
])
def build_fence(ctx):
"""
Some landuse polygons have an extra barrier fence tag, in thouse cases we
want to create an additional feature for the fence.
See https://github.com/mapzen/vector-datasource/issues/857 for more
details.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
base_layer = ctx.params.get('base_layer')
new_layer_name = ctx.params.get('new_layer_name')
prop_transform = ctx.params.get('prop_transform')
assert base_layer, 'Missing base_layer parameter'
start_zoom = ctx.params.get('start_zoom', 16)
layer = None
# don't start processing until the start zoom
if zoom < start_zoom:
return layer
# search through all the layers and extract the one
# which has the name of the base layer we were given
# as a parameter.
layer = _find_layer(feature_layers, base_layer)
# if we failed to find the base layer then it's
# possible the user just didn't ask for it, so return
# an empty result.
if layer is None:
return None
if prop_transform is None:
prop_transform = {}
features = layer['features']
new_features = list()
# loop through all the polygons, if it's a fence, duplicate it.
for feature in features:
shape, props, fid = feature
barrier = props.pop('barrier', None)
if barrier == 'fence':
fence_type = props.pop('fence_type', None)
# filter only linestring-like objects. we don't
# want any points which might have been created
# by the intersection.
filtered_shape = _filter_geom_types(shape, _POLYGON_DIMENSION)
if not filtered_shape.is_empty:
new_props = _make_new_properties(props, prop_transform)
new_props['kind'] = 'fence'
if fence_type in _WHITELIST_FENCE_TYPES:
new_props['kind_detail'] = fence_type
new_features.append((filtered_shape, new_props, fid))
if new_layer_name is None:
# no new layer requested, instead add new
# features into the same layer.
layer['features'].extend(new_features)
return layer
else:
# make a copy of the old layer's information - it
# shouldn't matter about most of the settings, as
# post-processing is one of the last operations.
# but we need to override the name to ensure we get
# some output.
new_layer_datum = layer['layer_datum'].copy()
new_layer_datum['name'] = new_layer_name
new_layer = layer.copy()
new_layer['layer_datum'] = new_layer_datum
new_layer['features'] = new_features
new_layer['name'] = new_layer_name
return new_layer
def normalize_social_kind(shape, properties, fid, zoom):
"""
Social facilities have an `amenity=social_facility` tag, but more
information is generally available in the `social_facility=*` tag, so it
is more informative to put that as the `kind`. We keep the old tag as
well, for disambiguation.
Additionally, we normalise the `social_facility:for` tag, which is a
semi-colon delimited list, to an actual list under the `for` property.
This should make it easier to consume.
"""
kind = properties.get('kind')
if kind == 'social_facility':
tags = properties.get('tags', {})
if tags:
social_facility = tags.get('social_facility')
if social_facility:
properties['kind'] = social_facility
# leave the original tag on for disambiguation
properties['social_facility'] = social_facility
# normalise the 'for' list to an actual list
for_list = tags.get('social_facility:for')
if for_list:
properties['for'] = for_list.split(';')
return (shape, properties, fid)
def normalize_medical_kind(shape, properties, fid, zoom):
"""
Many medical practices, such as doctors and dentists, have a speciality,
which is indicated through the `healthcare:speciality` tag. This is a
semi-colon delimited list, so we expand it to an actual list.
"""
kind = properties.get('kind')
if kind in ['clinic', 'doctors', 'dentist']:
tags = properties.get('tags', {})
if tags:
speciality = tags.get('healthcare:speciality')
if speciality:
properties['speciality'] = speciality.split(';')
return (shape, properties, fid)
class _AnyMatcher(object):
def match(self, other):
return True
def __repr__(self):
return "*"
class _NoneMatcher(object):
def match(self, other):
return other is None
def __repr__(self):
return "-"
class _SomeMatcher(object):
def match(self, other):
return other is not None
def __repr__(self):
return "+"
class _TrueMatcher(object):
def match(self, other):
return other is True
def __repr__(self):
return "true"
class _ExactMatcher(object):
def __init__(self, value):
self.value = value
def match(self, other):
return other == self.value
def __repr__(self):
return repr(self.value)
class _NotEqualsMatcher(object):
def __init__(self, value):
self.value = value
def match(self, other):
return other != self.value
def __repr__(self):
return repr(self.value)
class _SetMatcher(object):
def __init__(self, values):
self.values = values
def match(self, other):
return other in self.values
def __repr__(self):
return repr(self.value)
class _GreaterThanEqualMatcher(object):
def __init__(self, value):
self.value = value
def match(self, other):
return other >= self.value
def __repr__(self):
return '>=%r' % self.value
class _GreaterThanMatcher(object):
def __init__(self, value):
self.value = value
def match(self, other):
return other > self.value
def __repr__(self):
return '>%r' % self.value
class _LessThanEqualMatcher(object):
def __init__(self, value):
self.value = value
def match(self, other):
return other <= self.value
def __repr__(self):
return '<=%r' % self.value
class _LessThanMatcher(object):
def __init__(self, value):
self.value = value
def match(self, other):
return other < self.value
def __repr__(self):
return '<%r' % self.value
_KEY_TYPE_LOOKUP = {
'int': int,
'float': float,
}
def _parse_kt(key_type):
kt = key_type.split("::")
type_key = kt[1] if len(kt) > 1 else None
fn = _KEY_TYPE_LOOKUP.get(type_key, str)
return (kt[0], fn)
class CSVMatcher(object):
def __init__(self, fh):
keys = None
types = []
rows = []
self.static_any = _AnyMatcher()
self.static_none = _NoneMatcher()
self.static_some = _SomeMatcher()
self.static_true = _TrueMatcher()
# CSV - allow whitespace after the comma
reader = csv.reader(fh, skipinitialspace=True)
for row in reader:
if keys is None:
target_key = row.pop(-1)
keys = []
for key_type in row:
key, typ = _parse_kt(key_type)
keys.append(key)
types.append(typ)
else:
target_val = row.pop(-1)
for i in range(0, len(row)):
row[i] = self._match_val(row[i], types[i])
rows.append((row, target_val))
self.keys = keys
self.rows = rows
self.target_key = target_key
def _match_val(self, v, typ):
if v == '*':
return self.static_any
if v == '-':
return self.static_none
if v == '+':
return self.static_some
if v == 'true':
return self.static_true
if isinstance(v, str) and ';' in v:
return _SetMatcher(set(v.split(';')))
if v.startswith('>='):
assert len(v) > 2, 'Invalid >= matcher'
return _GreaterThanEqualMatcher(typ(v[2:]))
if v.startswith('<='):
assert len(v) > 2, 'Invalid <= matcher'
return _LessThanEqualMatcher(typ(v[2:]))
if v.startswith('>'):
assert len(v) > 1, 'Invalid > matcher'
return _GreaterThanMatcher(typ(v[1:]))
if v.startswith('<'):
assert len(v) > 1, 'Invalid > matcher'
return _LessThanMatcher(typ(v[1:]))
if v.startswith('!'):
assert len(v) > 1, 'Invalid ! matcher'
return _NotEqualsMatcher(typ(v[1:]))
return _ExactMatcher(typ(v))
def __call__(self, shape, properties, zoom):
vals = []
for key in self.keys:
# NOTE zoom and geometrytype have special meaning
if key == 'zoom':
val = zoom
elif key.lower() == 'geometrytype':
val = shape.type
else:
val = properties.get(key)
vals.append(val)
for row, target_val in self.rows:
if all([a.match(b) for (a, b) in zip(row, vals)]):
return (self.target_key, target_val)
return None
class YAMLToDict(dict):
def __init__(self, fh):
import yaml
data = yaml.load(fh)
assert isinstance(data, dict)
for k, v in data.iteritems():
self[k] = v
def csv_match_properties(ctx):
"""
Add or update a property on all features which match properties which are
given as headings in a CSV file.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
target_value_type = ctx.params.get('target_value_type')
matcher = ctx.resources.get('matcher')
assert source_layer, 'csv_match_properties: missing source layer'
assert matcher, 'csv_match_properties: missing matcher resource'
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
def _type_cast(v):
if target_value_type == 'int':
return int(v)
return v
for shape, props, fid in layer['features']:
m = matcher(shape, props, zoom)
if m is not None:
k, v = m
props[k] = _type_cast(v)
return layer
def update_parenthetical_properties(ctx):
"""
If a feature's name ends with a set of values in parens, update
its kind and increase the min_zoom appropriately.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
parenthetical_values = ctx.params.get('values')
target_min_zoom = ctx.params.get('target_min_zoom')
drop_below_zoom = ctx.params.get('drop_below_zoom')
assert parenthetical_values is not None, \
'update_parenthetical_properties: missing values'
assert target_min_zoom is not None, \
'update_parenthetical_properties: missing target_min_zoom'
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
new_features = []
for shape, props, fid in layer['features']:
name = props.get('name', '')
if not name:
new_features.append((shape, props, fid))
continue
keep = True
for value in parenthetical_values:
if name.endswith('(%s)' % value):
props['kind'] = value
props['min_zoom'] = target_min_zoom
if drop_below_zoom and zoom < drop_below_zoom:
keep = False
if keep:
new_features.append((shape, props, fid))
layer['features'] = new_features
return layer
def height_to_meters(shape, props, fid, zoom):
"""
If the properties has a "height" entry, then convert that to meters.
"""
height = props.get('height')
if not height:
return shape, props, fid
props['height'] = _to_float_meters(height)
return shape, props, fid
def elevation_to_meters(shape, props, fid, zoom):
"""
If the properties has an "elevation" entry, then convert that to meters.
"""
elevation = props.get('elevation')
if not elevation:
return shape, props, fid
props['elevation'] = _to_float_meters(elevation)
return shape, props, fid
def normalize_cycleway(shape, props, fid, zoom):
"""
If the properties contain both a cycleway:left and cycleway:right
with the same values, those should be removed and replaced with a
single cycleway property. Additionally, if a cycleway_both tag is
present, normalize that to the cycleway tag.
"""
cycleway = props.get('cycleway')
cycleway_left = props.get('cycleway_left')
cycleway_right = props.get('cycleway_right')
cycleway_both = props.pop('cycleway_both', None)
if cycleway_both and not cycleway:
props['cycleway'] = cycleway = cycleway_both
if (cycleway_left and cycleway_right and
cycleway_left == cycleway_right and
(not cycleway or cycleway_left == cycleway)):
props['cycleway'] = cycleway_left
del props['cycleway_left']
del props['cycleway_right']
return shape, props, fid
def add_is_bicycle_related(shape, props, fid, zoom):
"""
If the props contain a bicycle_network tag, cycleway, or
highway=cycleway, it should have an is_bicycle_related
boolean. Depends on the normalize_cycleway transform to have been
run first.
"""
props.pop('is_bicycle_related', None)
if ('bicycle_network' in props or
'cycleway' in props or
'cycleway_left' in props or
'cycleway_right' in props or
props.get('bicycle') in ('yes', 'designated') or
props.get('ramp_bicycle') in ('yes', 'left', 'right') or
props.get('kind_detail') == 'cycleway'):
props['is_bicycle_related'] = True
return shape, props, fid
def drop_properties_with_prefix(ctx):
"""
Iterate through all features, dropping all properties that start
with prefix.
"""
prefix = ctx.params.get('prefix')
assert prefix, 'drop_properties_with_prefix: missing prefix param'
feature_layers = ctx.feature_layers
for feature_layer in feature_layers:
for shape, props, fid in feature_layer['features']:
for k in props.keys():
if k.startswith(prefix):
del props[k]
def _drop_small_inners(poly, area_tolerance):
ext = poly.exterior
inners = []
for inner in poly.interiors:
area = Polygon(inner).area
if area >= area_tolerance:
inners.append(inner)
return Polygon(ext, inners)
def drop_small_inners(ctx):
"""
Drop inners which are smaller than the given scale.
"""
zoom = ctx.nominal_zoom
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
pixel_area = ctx.params.get('pixel_area')
source_layers = ctx.params.get('source_layers')
assert source_layers, \
"You must provide source_layers (layer names) to drop_small_inners"
assert pixel_area, \
"You must provide a pixel_area parameter to drop_small_inners"
if zoom < start_zoom:
return None
if end_zoom and zoom >= end_zoom:
return None
meters_per_pixel_area = calc_meters_per_pixel_area(zoom)
area_tolerance = meters_per_pixel_area * pixel_area
for layer in ctx.feature_layers:
layer_datum = layer['layer_datum']
layer_name = layer_datum['name']
if layer_name not in source_layers:
continue
new_features = []
for feature in layer['features']:
shape, props, fid = feature
geom_type = shape.geom_type
if geom_type == 'Polygon':
new_shape = _drop_small_inners(shape, area_tolerance)
if not new_shape.is_empty:
new_features.append((new_shape, props, fid))
elif geom_type == 'MultiPolygon':
polys = []
for g in shape.geoms:
new_g = _drop_small_inners(g, area_tolerance)
if not new_g.is_empty:
polys.append(new_g)
if polys:
new_features.append((MultiPolygon(polys), props, fid))
else:
new_features.append(feature)
layer['features'] = new_features
def simplify_layer(ctx):
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
source_layer = ctx.params.get('source_layer')
assert source_layer, 'simplify_layer: missing source layer'
tolerance = ctx.params.get('tolerance', 1.0)
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
# adjust tolerance to be in coordinate units
tolerance = tolerance * tolerance_for_zoom(zoom)
new_features = []
for (shape, props, fid) in layer['features']:
simplified_shape = shape.simplify(tolerance,
preserve_topology=True)
shape = _make_valid_if_necessary(simplified_shape)
new_features.append((shape, props, fid))
layer['features'] = new_features
return layer
def simplify_and_clip(ctx):
"""simplify geometries according to zoom level and clip"""
zoom = ctx.nominal_zoom
simplify_before = ctx.params.get('simplify_before')
assert simplify_before, 'simplify_and_clip: missing simplify_before param'
meters_per_pixel_area = calc_meters_per_pixel_area(zoom)
tolerance = tolerance_for_zoom(zoom)
for feature_layer in ctx.feature_layers:
simplified_features = []
layer_datum = feature_layer['layer_datum']
is_clipped = layer_datum['is_clipped']
clip_factor = layer_datum.get('clip_factor', 1.0)
padded_bounds = feature_layer['padded_bounds']
area_threshold_pixels = layer_datum['area_threshold']
area_threshold_meters = meters_per_pixel_area * area_threshold_pixels
layer_tolerance = layer_datum.get('tolerance', 1.0) * tolerance
# The logic behind simplifying before intersecting rather than the
# other way around is extensively explained here:
# https://github.com/mapzen/TileStache/blob/d52e54975f6ec2d11f63db13934047e7cd5fe588/TileStache/Goodies/VecTiles/server.py#L509,L527
simplify_before_intersect = layer_datum['simplify_before_intersect']
# perform any simplification as necessary
simplify_start = layer_datum['simplify_start']
should_simplify = simplify_start <= zoom < simplify_before
for shape, props, feature_id in feature_layer['features']:
geom_type = normalize_geometry_type(shape.type)
original_geom_dim = _geom_dimensions(shape)
padded_bounds_by_type = padded_bounds[geom_type]
layer_padded_bounds = calculate_padded_bounds(
clip_factor, padded_bounds_by_type)
if should_simplify and simplify_before_intersect:
# To reduce the performance hit of simplifying potentially huge
# geometries to extract only a small portion of them when
# cutting out the actual tile, we cut out a slightly larger
# bounding box first. See here for an explanation:
# https://github.com/mapzen/TileStache/blob/d52e54975f6ec2d11f63db13934047e7cd5fe588/TileStache/Goodies/VecTiles/server.py#L509,L527
min_x, min_y, max_x, max_y = layer_padded_bounds.bounds
gutter_bbox_size = (max_x - min_x) * 0.1
gutter_bbox = Box(
min_x - gutter_bbox_size,
min_y - gutter_bbox_size,
max_x + gutter_bbox_size,
max_y + gutter_bbox_size)
clipped_shape = shape.intersection(gutter_bbox)
simplified_shape = clipped_shape.simplify(
layer_tolerance, preserve_topology=True)
shape = _make_valid_if_necessary(simplified_shape)
if is_clipped:
shape = shape.intersection(layer_padded_bounds)
if should_simplify and not simplify_before_intersect:
simplified_shape = shape.simplify(layer_tolerance,
preserve_topology=True)
shape = _make_valid_if_necessary(simplified_shape)
# this could alter multipolygon geometries
if zoom < simplify_before:
shape = _visible_shape(shape, area_threshold_meters)
# don't keep features which have been simplified to empty or
# None.
if shape is None or shape.is_empty:
continue
# if clipping and simplifying caused this to become a geometry
# collection of different geometry types (e.g: by just touching
# the clipping box), then trim it back to the original geometry
# type.
if shape.type == 'GeometryCollection':
shape = _filter_geom_types(shape, original_geom_dim)
# if that removed all the geometry, then don't keep the
# feature.
if shape is None or shape.is_empty:
continue
simplified_feature = shape, props, feature_id
simplified_features.append(simplified_feature)
feature_layer['features'] = simplified_features
_lookup_operator_rules = {
'United States National Park Service': (
'National Park Service',
'US National Park Service',
'U.S. National Park Service',
'US National Park service'),
'United States Forest Service': (
'US Forest Service',
'U.S. Forest Service',
'USDA Forest Service',
'United States Department of Agriculture',
'US National Forest Service',
'United State Forest Service',
'U.S. National Forest Service'),
'National Parks & Wildife Service NSW': (
'Department of National Parks NSW',
'Dept of NSW National Parks',
'Dept of National Parks NSW',
'Department of National Parks NSW',
'NSW National Parks',
'NSW National Parks & Wildlife Service',
'NSW National Parks and Wildlife Service',
'NSW Parks and Wildlife Service',
'NSW Parks and Wildlife Service (NPWS)',
'National Parks and Wildlife NSW',
'National Parks and Wildlife Service NSW')}
normalized_operator_lookup = {}
for normalized_operator, variants in _lookup_operator_rules.items():
for variant in variants:
normalized_operator_lookup[variant] = normalized_operator
def normalize_operator_values(shape, properties, fid, zoom):
"""
There are many operator-related tags, including 'National Park Service',
'U.S. National Park Service', 'US National Park Service' etc that refer
to the same operator tag. This function promotes a normalized value
for all alternatives in specific operator values.
See https://github.com/tilezen/vector-datasource/issues/927.
"""
operator = properties.get('operator', None)
if operator is not None:
normalized_operator = normalized_operator_lookup.get(operator, None)
if normalized_operator:
properties['operator'] = normalized_operator
return (shape, properties, fid)
return (shape, properties, fid)
def _guess_type_from_network(network):
"""
Return a best guess of the type of network (road, hiking, bus, bicycle)
from the network tag itself.
"""
if network in ['iwn', 'nwn', 'rwn', 'lwn']:
return 'hiking'
elif network in ['icn', 'ncn', 'rcn', 'lcn']:
return 'bicycle'
else:
# hack for now - how can we tell bus routes from road routes?
# it seems all bus routes are relations, where we have a route type
# given, so this should default to roads.
return 'road'
# a mapping of operator tag values to the networks that they are (probably)
# part of. this would be better specified directly on the data, but sometimes
# it's just not available.
#
# this is a list of the operators with >=100 uses on ways tagged as motorways,
# which should hopefully allow us to catch most of the important ones. they're
# mapped to the country they're in, which should be enough in most cases to
# render the appropriate shield.
_NETWORK_OPERATORS = {
'Highways England': 'GB',
'ASF': 'FR',
'Autopista Litoral Sul': 'BR',
'DNIT': 'BR',
'Εγνατία Οδός': 'GR',
'Αυτοκινητόδρομος Αιγαίου': 'GR',
'Transport Scotland': 'GB',
'The Danish Road Directorate': 'DK',
"Autostrade per l' Italia S.P.A.": 'IT',
'Νέα Οδός': 'GR',
'Autostrada dei Fiori S.P.A.': 'IT',
'S.A.L.T.': 'IT',
'Welsh Government': 'GB',
'Euroscut': 'PT',
'DIRIF': 'FR',
'Administración central': 'ES',
'Αττική Οδός': 'GR',
'Autocamionale della Cisa S.P.A.': 'IT',
'Κεντρική Οδός': 'GR',
'Bundesrepublik Deutschland': 'DE',
'Ecovias': 'BR',
'東日本高速道路': 'JP',
'NovaDutra': 'BR',
'APRR': 'FR',
'Via Solutions Südwest': 'DE',
'Autoroutes du Sud de la France': 'FR',
'Transport for Scotland': 'GB',
'Departamento de Infraestructuras Viarias y Movilidad': 'ES',
'ViaRondon': 'BR',
'DIRNO': 'FR',
'SATAP': 'IT',
'Ολυμπία Οδός': 'GR',
'Midland Expressway Ltd': 'GB',
'autobahnplus A8 GmbH': 'DE',
'Cart': 'BR',
'Μορέας': 'GR',
'Hyderabad Metropolitan Development Authority': 'PK',
'Viapar': 'BR',
'Autostrade Centropadane': 'IT',
'Triângulo do Sol': 'BR',
}
def _ref_importance(ref):
try:
# first, see if the reference is a number, or easily convertible
# into one.
ref = int(ref or 0)
except ValueError:
# if not, we can try to extract anything that looks like a sequence
# of digits from the ref.
m = _ANY_NUMBER.match(ref)
if m:
ref = int(m.group(1))
else:
# failing that, we assume that a completely non-numeric ref is
# a name, which would make it quite important.
ref = 0
# make sure no ref is negative
ref = abs(ref)
return ref
def _guess_network_gb(tags):
# for roads we put the original OSM highway tag value in kind_detail, so we
# can recover it here.
highway = tags.get('kind_detail')
ref = tags.get('ref', '')
networks = []
# although roads are part of only one network in the UK, some roads are
# tagged incorrectly as being part of two, so we have to handle this case.
for part in ref.split(';'):
if not part:
continue
# letter at the start of the ref indicates the road class. generally
# one of 'M', 'A', or 'B' - although other letters exist, they are
# rarely used.
letter, number = _splitref(part)
# UK is tagged a bit weirdly, using the highway tag value in addition
# to the ref to figure out which road class should be applied. the
# following is not applied strictly, but is a "best guess" at the
# appropriate signage colour.
#
# https://wiki.openstreetmap.org/wiki/United_Kingdom_Tagging_Guidelines
if letter == 'M' and highway == 'motorway':
networks.append(('GB:M-road', 'M' + number))
elif ref.endswith('(M)') and highway == 'motorway':
networks.append(('GB:M-road', 'A' + number))
elif letter == 'A' and highway == 'trunk':
networks.append(('GB:A-road-green', 'A' + number))
elif letter == 'A' and highway == 'primary':
networks.append(('GB:A-road-white', 'A' + number))
elif letter == 'B' and highway == 'secondary':
networks.append(('GB:B-road', 'B' + number))
return networks
def _guess_network_ar(tags):
ref = tags.get('ref')
if ref is None:
return None
elif ref.startswith('RN'):
return [('AR:national', ref)]
elif ref.startswith('RP'):
return [('AR:provincial', ref)]
return None
def _guess_network_with(tags, fn):
"""
Common function for backfilling (network, ref) pairs by running the
"normalize" function on the parts of the ref. For example, if the
ref was 'A1;B2;C3', then the normalize function would be run on
fn(None, 'A1'), fn(None, 'B2'), etc...
This allows us to back-fill the network where it can be deduced from
the ref in a particular country (e.g: if all motorways are A[0-9]).
"""
ref = tags.get('ref', '')
networks = []
for part in ref.split(';'):
part = part.strip()
if not part:
continue
network, ref = fn(None, part)
networks.append((network, part))
return networks
def _guess_network_au(tags):
return _guess_network_with(tags, _normalize_au_netref)
# list of all the state codes in Brazil, see
# https://en.wikipedia.org/wiki/ISO_3166-2:BR
_BR_STATES = set([
'DF', # Distrito Federal (federal district, not really a state)
'AC', # Acre
'AL', # Alagoas
'AP', # Amapá
'AM', # Amazonas
'BA', # Bahia
'CE', # Ceará
'ES', # Espírito Santo
'GO', # Goiás
'MA', # Maranhão
'MT', # Mato Grosso
'MS', # Mato Grosso do Sul
'MG', # Minas Gerais
'PA', # Pará
'PB', # Paraíba
'PR', # Paraná
'PE', # Pernambuco
'PI', # Piauí
'RJ', # Rio de Janeiro
'RN', # Rio Grande do Norte
'RS', # Rio Grande do Sul
'RO', # Rondônia
'RR', # Roraima
'SC', # Santa Catarina
'SP', # São Paulo
'SE', # Sergipe
'TO', # Tocantins
])
# additional road types
_BR_NETWORK_EXPANSION = {
# Minas Gerais state roads
'AMG': 'BR:MG',
'LMG': 'BR:MG:local',
'MGC': 'BR:MG',
# CMG seems to be coupled with BR- roads of the same number
'CMG': 'BR:MG',
# Rio Grande do Sul state roads
'ERS': 'BR:RS',
'VRS': 'BR:RS',
'RSC': 'BR:RS',
# access roads in São Paulo?
'SPA': 'BR:SP',
# connecting roads in Paraná?
'PRC': 'BR:PR',
# municipal roads in Paulínia
'PLN': 'BR:SP:PLN',
# municipal roads in São Carlos
# https://pt.wikipedia.org/wiki/Estradas_do_munic%C3%ADpio_de_S%C3%A3o_Carlos#Identifica%C3%A7%C3%A3o
'SCA': 'BR:SP:SCA',
}
def _guess_network_br(tags):
ref = tags.get('ref')
networks = []
# a missing or blank ref isn't going to give us much information
if not ref:
return networks
# track last prefix, so that we can handle cases where the ref is written
# as "BR-XXX/YYY" to mean "BR-XXX; BR-YYY".
last_prefix = None
for prefix, num in re.findall('([A-Za-z]+)?[- ]?([0-9]+)', ref):
# if there's a prefix, save it for potential later use. if there isn't
# then use the previous one - if any.
if prefix:
last_prefix = prefix
else:
prefix = last_prefix
# make sure the prefix is from a network that we know about.
if prefix == 'BR':
network = prefix
elif prefix in _BR_STATES:
network = 'BR:' + prefix
elif prefix in _BR_NETWORK_EXPANSION:
network = _BR_NETWORK_EXPANSION[prefix]
else:
continue
networks.append((network, '%s-%s' % (prefix, num)))
return networks
def _guess_network_ca(tags):
nat_name = tags.get('nat_name:en') or tags.get('nat_name')
ref = tags.get('ref')
network = tags.get('network')
networks = []
if network and ref:
networks.append((network, ref))
if nat_name and nat_name.lower() == 'trans-canada highway':
# note: no ref for TCH. some states appear to add route numbers from
# the state highway to the TCH shields, e.g:
# https://commons.wikimedia.org/wiki/File:TCH-16_(BC).svg
networks.append(('CA:transcanada', ref))
if not networks and ref:
# final fallback - all we know is that this is a road in Canada.
networks.append(('CA', ref))
return networks
def _guess_network_ch(tags):
ref = tags.get('ref', '')
networks = []
for part in ref.split(';'):
if not part:
continue
network, ref = _normalize_ch_netref(None, part)
if network or ref:
networks.append((network, ref))
return networks
def _guess_network_cn(tags):
return _guess_network_with(tags, _normalize_cn_netref)
def _guess_network_es(tags):
return _guess_network_with(tags, _normalize_es_netref)
def _guess_network_fr(tags):
return _guess_network_with(tags, _normalize_fr_netref)
def _guess_network_de(tags):
return _guess_network_with(tags, _normalize_de_netref)
def _guess_network_ga(tags):
return _guess_network_with(tags, _normalize_ga_netref)
def _guess_network_gr(tags):
ref = tags.get('ref', '')
networks = []
for part in ref.split(';'):
if not part:
continue
# ignore provincial refs, they should be on reg_ref. see:
# https://wiki.openstreetmap.org/wiki/WikiProject_Greece/Provincial_Road_Network
if part.startswith(u'ΕΠ'.encode('utf-8')):
continue
network, ref = _normalize_gr_netref(None, part)
networks.append((network, part))
return networks
def _guess_network_in(tags):
ref = tags.get('ref', '')
networks = []
for part in ref.split(';'):
if not part:
continue
network, ref = _normalize_in_netref(None, part)
# note: we return _ref_ here, as normalize_in_netref might have changed
# the ref part (e.g: in order to split MDR54 into (network=MDR, ref=54)
networks.append((network, ref))
return networks
def _guess_network_mx(tags):
return _guess_network_with(tags, _normalize_mx_netref)
def _guess_network_my(tags):
return _guess_network_with(tags, _normalize_my_netref)
def _guess_network_no(tags):
return _guess_network_with(tags, _normalize_no_netref)
def _guess_network_pe(tags):
return _guess_network_with(tags, _normalize_pe_netref)
def _guess_network_jp(tags):
ref = tags.get('ref', '')
name = tags.get('name:ja') or tags.get('name')
network_from_name = None
if name:
if isinstance(name, str):
name = unicode(name, 'utf-8')
if name.startswith(u'国道') and \
name.endswith(u'号'):
network_from_name = 'JP:national'
networks = []
for part in ref.split(';'):
if not part:
continue
network, ref = _normalize_jp_netref(None, part)
if network is None and network_from_name is not None:
network = network_from_name
if network and part:
networks.append((network, part))
return networks
def _guess_network_kr(tags):
ref = tags.get('ref', '')
network_from_tags = tags.get('network')
# the name often ends with a word which appears to mean expressway or
# national road.
name_ko = _make_unicode_or_none(tags.get('name:ko') or tags.get('name'))
if name_ko and network_from_tags is None:
if name_ko.endswith(u'국도'):
# national roads - gukdo
network_from_tags = 'KR:national'
elif name_ko.endswith(u'광역시도로'):
# metropolitan city roads - gwangyeoksido
network_from_tags = 'KR:metropolitan'
elif name_ko.endswith(u'특별시도'):
# special city (Seoul) roads - teukbyeolsido
network_from_tags = 'KR:metropolitan'
elif (name_ko.endswith(u'고속도로') or
name_ko.endswith(u'고속도로지선')):
# expressways - gosokdoro (and expressway branches)
network_from_tags = 'KR:expressway'
elif name_ko.endswith(u'지방도'):
# local highways - jibangdo
network_from_tags = 'KR:local'
networks = []
for part in ref.split(';'):
if not part:
continue
network, ref = _normalize_kr_netref(None, part)
if network is None and network_from_tags is not None:
network = network_from_tags
if network and part:
networks.append((network, part))
return networks
def _guess_network_pl(tags):
return _guess_network_with(tags, _normalize_pl_netref)
def _guess_network_pt(tags):
return _guess_network_with(tags, _normalize_pt_netref)
def _guess_network_ro(tags):
return _guess_network_with(tags, _normalize_ro_netref)
def _guess_network_ru(tags):
ref = tags.get('ref', '')
network = tags.get('network')
networks = []
for part in ref.split(';'):
if not part:
continue
# note: we pass in the network tag, as that can be important for
# disambiguating Russian refs.
network, ref = _normalize_ru_netref(network, part)
networks.append((network, part))
return networks
def _guess_network_sg(tags):
return _guess_network_with(tags, _normalize_sg_netref)
def _guess_network_tr(tags):
ref = tags.get('ref', '')
networks = []
for part in _COMMON_SEPARATORS.split(ref):
part = part.strip()
if not part:
continue
network, ref = _normalize_tr_netref(None, part)
if network or ref:
networks.append((network, ref))
return networks
def _guess_network_ua(tags):
return _guess_network_with(tags, _normalize_ua_netref)
_COMMON_SEPARATORS = re.compile('[;,/,]')
def _guess_network_vn(tags):
ref = tags.get('ref', '')
# some bare refs can be augmented from the network tag on the way, or
# guessed from the name, which often starts with the type of the road.
network_from_tags = tags.get('network')
if not network_from_tags:
name = tags.get('name') or tags.get('name:vi')
if name:
name = unicode(name, 'utf-8')
if name.startswith(u'Tỉnh lộ'):
network_from_tags = 'VN:provincial'
elif name.startswith(u'Quốc lộ'):
network_from_tags = 'VN:national'
networks = []
for part in _COMMON_SEPARATORS.split(ref):
if not part:
continue
network, ref = _normalize_vn_netref(network_from_tags, part)
if network or ref:
networks.append((network, ref))
return networks
def _guess_network_za(tags):
ref = tags.get('ref', '')
networks = []
for part in _COMMON_SEPARATORS.split(ref):
if not part:
continue
network, ref = _normalize_za_netref(tags.get('network'), part)
networks.append((network, part))
return networks
def _do_not_backfill(tags):
return None
def _sort_network_us(network, ref):
if network is None:
network_code = 9999
elif network == 'US:I':
network_code = 1
elif network == 'US:US':
network_code = 2
else:
network_code = len(network.split(':')) + 3
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
_AU_NETWORK_IMPORTANCE = {
'N-highway': 0,
'A-road': 1,
'M-road': 2,
'B-road': 3,
'C-road': 4,
'N-route': 5,
'S-route': 6,
'Metro-road': 7,
'T-drive': 8,
'R-route': 9,
}
def _sort_network_au(network, ref):
if network is None or \
not network.startswith('AU:'):
network_code = 9999
else:
network_code = _AU_NETWORK_IMPORTANCE.get(network[3:], 9999)
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_br(network, ref):
if network is None:
network_code = 9999
elif network == 'BR:Trans-Amazonian':
network_code = 0
else:
network_code = len(network.split(':')) + 1
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_ca(network, ref):
if network is None:
network_code = 9999
elif network == 'CA:transcanada':
network_code = 0
elif network == 'CA:yellowhead':
network_code = 1
else:
network_code = len(network.split(':')) + 2
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_ch(network, ref):
if network is None:
network_code = 9999
elif network == 'CH:national':
network_code = 0
elif network == 'CH:regional':
network_code = 1
elif network == 'e-road':
network_code = 99
else:
network_code = len(network.split(':')) + 2
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_cn(network, ref):
if network is None:
network_code = 9999
elif network == 'CN:expressway':
network_code = 0
elif network == 'CN:expressway:regional':
network_code = 1
elif network == 'CN:JX':
network_code = 2
elif network == 'AsianHighway':
network_code = 99
else:
network_code = len(network.split(':')) + 3
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_es(network, ref):
if network is None:
network_code = 9999
elif network == 'ES:A-road':
network_code = 0
elif network == 'ES:N-road':
network_code = 1
elif network == 'ES:autonoma':
network_code = 2
elif network == 'ES:province':
network_code = 3
elif network == 'ES:city':
network_code = 4
elif network == 'e-road':
network_code = 99
else:
network_code = 5 + len(network.split(':'))
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_fr(network, ref):
if network is None:
network_code = 9999
elif network == 'FR:A-road':
network_code = 0
elif network == 'FR:N-road':
network_code = 1
elif network == 'FR:D-road':
network_code = 2
elif network == 'FR':
network_code = 3
elif network == 'e-road':
network_code = 99
else:
network_code = 5 + len(network.split(':'))
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_de(network, ref):
if network is None:
network_code = 9999
elif network == 'DE:BAB':
network_code = 0
elif network == 'DE:BS':
network_code = 1
elif network == 'DE:LS':
network_code = 2
elif network == 'DE:KS':
network_code = 3
elif network == 'DE:STS':
network_code = 4
elif network == 'DE:Hamburg:Ring':
network_code = 5
elif network == 'e-road':
network_code = 99
else:
network_code = len(network.split(':')) + 6
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_ga(network, ref):
if network is None:
network_code = 9999
elif network == 'GA:national':
network_code = 0
elif network == 'GA:L-road':
network_code = 1
else:
network_code = 2 + len(network.split(':'))
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_gr(network, ref):
if network is None:
network_code = 9999
elif network == 'GR:motorway':
network_code = 0
elif network == 'GR:national':
network_code = 1
elif network == 'e-road':
network_code = 99
else:
network_code = len(network.split(':')) + 3
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_in(network, ref):
if network is None:
network_code = 9999
elif network == 'IN:NH':
network_code = 0
elif network == 'IN:SH':
network_code = 1
elif network == 'IN:MDR':
network_code = 2
else:
network_code = len(network.split(':')) + 3
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_ir(network, ref):
if network is None:
network_code = 9999
elif network == 'AsianHighway':
network_code = 99
else:
network_code = len(network.split(':'))
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_kz(network, ref):
if network is None:
network_code = 9999
elif network == 'KZ:national':
network_code = 0
elif network == 'KZ:regional':
network_code = 1
elif network == 'e-road':
network_code = 99
elif network == 'AsianHighway':
network_code = 99
else:
network_code = 2 + len(network.split(':'))
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_la(network, ref):
if network is None:
network_code = 9999
elif network == 'LA:national':
network_code = 0
elif network == 'AsianHighway':
network_code = 99
else:
network_code = 1 + len(network.split(':'))
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_mx(network, ref):
if network is None:
network_code = 9999
elif network == 'MX:MEX':
network_code = 0
else:
network_code = len(network.split(':')) + 1
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_my(network, ref):
if network is None:
network_code = 9999
elif network == 'MY:federal':
network_code = 0
elif network == 'MY:expressway':
network_code = 1
elif network == 'AsianHighway':
network_code = 99
else:
network_code = len(network.split(':')) + 2
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_no(network, ref):
if network is None:
network_code = 9999
elif network == 'NO:oslo:ring':
network_code = 0
elif network == 'e-road':
network_code = 1
elif network == 'NO:Riksvei':
network_code = 2
elif network == 'NO:Fylkesvei':
network_code = 3
else:
network_code = len(network.split(':')) + 4
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_gb(network, ref):
if network is None:
network_code = 9999
elif network == 'GB:M-road':
network_code = 0
elif network == 'GB:A-road-green':
network_code = 1
elif network == 'GB:A-road-white':
network_code = 2
elif network == 'GB:B-road':
network_code = 3
elif network == 'e-road':
network_code = 99
else:
network_code = len(network.split(':')) + 4
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_pl(network, ref):
if network is None:
network_code = 9999
elif network == 'PL:motorway':
network_code = 0
elif network == 'PL:expressway':
network_code = 1
elif network == 'PL:national':
network_code = 2
elif network == 'e-road':
network_code = 99
else:
network_code = len(network.split(':')) + 3
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_pt(network, ref):
if network is None:
network_code = 9999
elif network == 'PT:motorway':
network_code = 0
elif network == 'PT:primary':
network_code = 1
elif network == 'PT:secondary':
network_code = 2
elif network == 'PT:national':
network_code = 3
elif network == 'PT:rapid':
network_code = 4
elif network == 'PT:express':
network_code = 5
elif network == 'PT:regional':
network_code = 6
elif network == 'PT:municipal':
network_code = 7
elif network == 'e-road':
network_code = 99
else:
network_code = len(network.split(':')) + 8
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_ro(network, ref):
if network is None:
network_code = 9999
elif network == 'RO:motorway':
network_code = 0
elif network == 'RO:national':
network_code = 1
elif network == 'RO:county':
network_code = 2
elif network == 'RO:local':
network_code = 3
elif network == 'e-road':
network_code = 99
else:
network_code = len(network.split(':')) + 4
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_ru(network, ref):
ref = _make_unicode_or_none(ref)
if network is None:
network_code = 9999
elif network == 'RU:national' and ref:
if ref.startswith(u'М'):
network_code = 0
elif ref.startswith(u'Р'):
network_code = 1
elif ref.startswith(u'А'):
network_code = 2
else:
network_code = 9999
elif network == 'RU:regional':
network_code = 3
elif network == 'e-road':
network_code = 99
elif network == 'AsianHighway':
network_code = 99
else:
network_code = len(network.split(':')) + 4
if ref is None:
ref = 9999
else:
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_tr(network, ref):
ref = _make_unicode_or_none(ref)
if network is None:
network_code = 9999
elif network == 'TR:motorway':
network_code = 0
elif network == 'TR:highway':
# some highways are "main highways", so it makes sense to show them
# before regular other highways.
# see footer of https://en.wikipedia.org/wiki/State_road_D.010_(Turkey)
if ref in ('D010', 'D100', 'D200', 'D300', 'D400',
'D550', 'D650', 'D750', 'D850', 'D950'):
network_code = 1
else:
network_code = 2
elif network == 'TR:provincial':
network_code = 3
elif network == 'e-road':
network_code = 99
elif network == 'AsianHighway':
network_code = 99
else:
network_code = len(network.split(':')) + 4
if ref is None:
ref = 9999
else:
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_ua(network, ref):
ref = _make_unicode_or_none(ref)
if network is None:
network_code = 9999
elif network == 'UA:international':
network_code = 0
elif network == 'UA:national':
network_code = 1
elif network == 'UA:regional':
network_code = 2
elif network == 'UA:territorial':
network_code = 3
elif network == 'e-road':
network_code = 99
else:
network_code = len(network.split(':')) + 4
if ref is None:
ref = 9999
else:
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_vn(network, ref):
if network is None:
network_code = 9999
elif network == 'VN:expressway':
network_code = 0
elif network == 'VN:national':
network_code = 1
elif network == 'VN:provincial':
network_code = 2
elif network == 'VN:road':
network_code = 3
elif network == 'AsianHighway':
network_code = 99
else:
network_code = len(network.split(':')) + 4
if ref is None:
ref = 9999
else:
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
def _sort_network_za(network, ref):
if network is None:
network_code = 9999
elif network == 'ZA:national':
network_code = 0
elif network == 'ZA:provincial':
network_code = 1
elif network == 'ZA:regional':
network_code = 2
elif network == 'ZA:metropolitan':
network_code = 3
elif network == 'ZA:kruger':
network_code = 4
elif network == 'ZA:S-road':
network_code = 5
else:
network_code = len(network.split(':')) + 6
if ref is None:
ref = 9999
else:
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
_AU_NETWORK_EXPANSION = {
'A': 'AU:A-road',
'M': 'AU:M-road',
'B': 'AU:B-road',
'C': 'AU:C-road',
'N': 'AU:N-route',
'R': 'AU:R-route',
'S': 'AU:S-route',
'T': 'AU:T-drive',
'MR': 'AU:Metro-road',
}
def _splitref(ref):
"""
Split ref into a leading alphabetic part and a trailing (possibly numeric)
part.
"""
# empty strings don't have a prefix
if not ref:
return None, ref
for i in xrange(0, len(ref)):
if not ref[i].isalpha():
return ref[0:i], ref[i:].strip()
# got to the end, must be all "prefix", which probably indicates it's not
# a ref of the expected prefix-suffix form, and we should just return the
# ref without a prefix.
return None, ref
def _normalize_au_netref(network, ref):
"""
Take the network and ref of an Australian road and normalise them so that
the network is in the form 'AU:road-type' and the ref is numeric. This is
based on a bunch of logic about what kinds of Australian roads exist.
Returns new (network, ref) values.
"""
# grab the prefix, if any, from the ref. we can use this to "back-fill" the
# network.
prefix, ref = _splitref(ref)
if network and network.startswith('AU:') and \
network[3:] in _AU_NETWORK_IMPORTANCE:
# network is already in the form we want!
pass
elif network in _AU_NETWORK_EXPANSION:
network = _AU_NETWORK_EXPANSION[network]
elif prefix in _AU_NETWORK_EXPANSION:
# backfill network from ref, if possible. (note that ref must
# be non-None, since mz_networks entries have either network or
# ref, or both).
network = _AU_NETWORK_EXPANSION[prefix]
return network, ref
def _normalize_br_netref(network, ref):
# try to add detail to the network by looking at the ref value,
# which often has additional information.
for guess_net, guess_ref in _guess_network_br(dict(ref=ref)):
if guess_ref == ref and (
network is None or guess_net.startswith(network)):
network = guess_net
break
if network == 'BR':
if ref == 'BR-230':
return 'BR:Trans-Amazonian', ref
else:
return network, ref
elif network and network.startswith('BR:'):
# turn things like "BR:BA-roads" into just "BR:BA"
if network.endswith('-roads'):
network = network[:-6]
return network, ref
elif network in _BR_STATES:
# just missing the 'BR:' at the start?
return 'BR:' + network, ref
else:
return None, ref
def _normalize_ca_netref(network, ref):
if isinstance(network, (str, unicode)) and \
network.startswith('CA:NB') and \
ref.isdigit():
refnum = int(ref)
if refnum >= 200:
network = 'CA:NB3'
elif refnum >= 100:
network = 'CA:NB2'
return network, ref
def _normalize_cd_netref(network, ref):
if network == 'CD:rrig':
network = 'CD:RRIG'
return network, ref
def _normalize_ch_netref(network, ref):
prefix, ref = _splitref(ref)
if network == 'CH:Nationalstrasse':
# clean up the ref by removing any prefixes and extra stuff after
# the number.
ref = ref.split(' ')[0]
network = 'CH:national'
elif prefix == 'A':
network = 'CH:motorway'
elif network not in ('CH:motorway', 'CH:national', 'CH:regional'):
network = None
ref = None
return network, ref
def _normalize_cn_netref(network, ref):
if ref and ref.startswith('S'):
network = 'CN:expressway:regional'
elif ref and ref.startswith('G'):
network = 'CN:expressway'
elif ref and ref.startswith('X'):
network = 'CN:JX'
elif network == 'CN-expressways':
network = 'CN:expressway'
elif network == 'CN-expressways-regional':
network = 'CN:expressway:regional'
elif network == 'JX-roads':
network = 'CN:JX'
return network, ref
# mapping the network prefixes onto ISO 3166-2 codes
_ES_AUTONOMA = set([
'ARA', # Aragon
'A', # Aragon & Andalusia (and also Álava, Basque Country)
'CA', # Cantabria (also Cadiz?)
'CL', # Castile & Leon
'CM', # Castilla-La Mancha
'C', # Catalonia (also Cistierna & Eivissa?)
'EX', # Extremadura
'AG', # Galicia
'M', # Madrid
'R', # Madrid
'Ma', # Mallorca
'Me', # Menorca
'ML', # Melilla
'RC', # Menorca
'RM', # Murcia
'V', # Valencia (also A Coruna?)
'CV', # Valencia
'Cv', # Valencia
])
# mapping the network prefixes onto ISO 3166-2 codes
_ES_PROVINCES = set([
'AC', # A Coruna
'DP', # A Coruna
'AB', # Albacete
'F', # Alicante?
'AL', # Almeria
'AE', # Asturias
'AS', # Asturias
'AV', # Avila
'BA', # Badajoz
'B', # Barcelona
'BP', # Barcelona
'BV', # Barcelona
'BI', # Bizkaia
'BU', # Burgos
'CC', # Caceres
'CO', # Cordoba
'CR', # Cuidad Real
'GIP', # Girona
'GIV', # Girona
'GI', # Gipuzkoa & Girona
'GR', # Granada
'GU', # Guadalajara
'HU', # Huesca
'JA', # Jaen
'JV', # Jaen
'LR', # La Rioja
'LE', # Leon
'L', # Lerida
'LP', # Lerida
'LV', # Lerida
'LU', # Lugo
'MP', # Madrid
'MA', # Malaga
'NA', # Navarre
'OU', # Orense
'P', # Palencia
'PP', # Palencia
'EP', # Pontevedra
'PO', # Pontevedra
'DSA', # Salamanca
'SA', # Salamanca
'NI', # Segovia
'SG', # Segovia
'SE', # Sevilla
'SO', # Soria
'TP', # Tarragona
'TV', # Tarragona
'TE', # Teruel
'TO', # Toledo
'VA', # Valladolid
'ZA', # Zamora
'CP', # Zaragoza
'Z', # Zaragoza
'PM', # Baleares
'PMV', # Baleares
])
# mapping city codes to the name of the city
_ES_CITIES = set([
'AI', # Aviles
'IA', # Aviles
'CT', # Cartagena
'CS', # Castello
'CU', # Cudillero
'CHE', # Ejea de los Caballeros
'EL', # Elx/Elche
'FE', # Ferrol
'GJ', # Gijon
'H', # Huelva
'VM', # Huelva
'J', # Jaen
'LN', # Lena
'LL', # Lleida
'LO', # Logrono
'ME', # Merida
'E', # Mollerussa? / Eivissa
'MU', # Murcia
'O', # Oviedo
'PA', # Pamplona
'PR', # Parres
'PI', # Pilona
'CHMS', # Ponferrada?
'PT', # Puertollano
'SL', # Salas
'S', # Santander
'SC', # Santiago de Compostela
'SI', # Siero
'VG', # Vigo
'EI', # Eivissa
])
def _normalize_es_netref(network, ref):
prefix, num = _splitref(ref)
# some A-roads in Spain are actually province or autonoma roads. these are
# distinguished from the national A-roads by whether they have 1 or 2
# digits (national) or 3 or more digits (autonoma / province). sadly, it
# doesn't seem to be possible to tell whether it's an autonoma or province
# without looking at the geometry, which is left as a TODO for later rainy
# days.
num_digits = 0
if num:
num = num.lstrip('-')
for c in num:
if c.isdigit():
num_digits += 1
else:
break
if prefix in ('A', 'AP') and num_digits > 0 and num_digits < 3:
network = 'ES:A-road'
elif prefix == 'N':
network = 'ES:N-road'
elif prefix == 'E' and num:
# e-roads seem to be signed without leading zeros.
network = 'e-road'
ref = 'E-' + num.lstrip('0')
elif prefix in _ES_AUTONOMA:
network = 'ES:autonoma'
elif prefix in _ES_PROVINCES:
network = 'ES:province'
elif prefix in _ES_CITIES:
network = 'ES:city'
else:
network = None
ref = None
return network, ref
_FR_DEPARTMENTAL_D_ROAD = re.compile(
'^FR:[0-9]+:([A-Z]+)-road$', re.UNICODE | re.IGNORECASE)
def _normalize_fr_netref(network, ref):
prefix, ref = _splitref(ref)
if prefix:
# routes nationales (RN) are actually signed just "N"? also, RNIL
# are routes delegated to local departments, but still signed as
# routes nationales.
if prefix in ('RN', 'RNIL'):
prefix = 'N'
# strip spaces and leading zeros
if ref:
ref = prefix + ref.strip().lstrip('0')
# backfill network from refs if network wasn't provided from another
# source.
if network is None:
network = 'FR:%s-road' % (prefix,)
# networks are broken down by department, e.g: FR:01:D-road, but we
# only want to match on the D-road part, so throw away the department
# number.
if isinstance(network, (str, unicode)):
m = _FR_DEPARTMENTAL_D_ROAD.match(network)
if m:
# see comment above. TODO: figure out how to not say this twice.
prefix = m.group(1).upper()
if prefix in ('RN', 'RNIL'):
prefix = 'N'
network = 'FR:%s-road' % (prefix,)
return network, ref
def _normalize_de_netref(network, ref):
prefix, ref = _splitref(ref)
if prefix:
if prefix == 'Ring':
ref = 'Ring ' + ref
else:
ref = prefix + ref
if not network:
network = {
'A': 'DE:BAB',
'B': 'DE:BS',
'L': 'DE:LS',
'K': 'DE:KS',
'St': 'DE:STS',
'S': 'DE:STS',
'Ring': 'DE:Hamburg:Ring',
}.get(prefix)
if network == 'Landesstra\xc3\x9fen NRW':
network = 'DE:LS'
elif network == 'Kreisstra\xc3\x9fen Hildesheim':
network = 'DE:KS'
elif network == 'BAB':
network = 'DE:BAB'
return network, ref
def _normalize_ga_netref(network, ref):
prefix, num = _splitref(ref)
if prefix in ('N', 'RN'):
network = 'GA:national'
ref = 'N' + num
elif prefix == 'L':
network = 'GA:L-road'
ref = 'L' + num
else:
network = None
ref = None
return network, ref
def _normalize_gr_netref(network, ref):
ref = _make_unicode_or_none(ref)
prefix, ref = _splitref(ref)
# this might look bizzare, but it's because the Greek capital letters
# epsilon and omicron look very similar (in some fonts identical) to the
# Latin characters E and O. it's the same below for capital alpha and A.
# these are sometimes mixed up in the data, so we map them to the same
# networks.
if prefix in (u'ΕΟ', u'EO'):
network = 'GR:national'
elif (prefix in (u'Α', u'A') and
(network is None or network == 'GR:motorway')):
network = 'GR:motorway'
# keep A prefix for shield text
ref = u'Α' + ref
elif network == 'e-road':
ref = 'E' + ref
elif network and network.startswith('GR:provincial:'):
network = 'GR:provincial'
return network, ref
def _normalize_in_netref(network, ref):
prefix, ref = _splitref(ref)
if prefix == 'NH':
network = 'IN:NH'
elif prefix == 'SH':
network = 'IN:SH'
elif prefix == 'MDR':
network = 'IN:MDR'
elif network and network.startswith('IN:NH'):
network = 'IN:NH'
elif network and network.startswith('IN:SH'):
network = 'IN:SH'
elif network and network.startswith('IN:MDR'):
network = 'IN:MDR'
elif ref == 'MDR':
network = 'IN:MDR'
ref = None
elif ref == 'ORR':
network = 'IN:NH'
else:
network = None
return network, ref
def _normalize_ir_netref(network, ref):
net, num = _splitref(ref)
if network == 'AH' or net == 'AH':
network = 'AsianHighway'
# in Iran, the Wikipedia page for the AsianHighway template suggests
# that the AH route is shown as "A1Tr" (with the "Tr" in a little box)
# https://en.wikipedia.org/wiki/Template:AHN-AH
#
# however, i haven't been able to find an example on a real road sign,
# so perhaps it's not widely shown. anyway, we probably want "A1" as
# the shield text.
ref = 'A' + num
elif network == 'IR:freeways':
network = 'IR:freeway'
return network, ref
def _normalize_la_netref(network, ref):
# apparently common mistake: Laos is LA, not LO
if network == 'LO:network':
network = 'LA:national'
return network, ref
# mapping of mexican road prefixes into their network values.
_MX_ROAD_NETWORK_PREFIXES = {
'AGS': 'MX:AGU', # Aguascalientes
'BC': 'MX:BCN', # Baja California
'BCS': 'MX:BCS', # Baja California Sur
'CAM': 'MX:CAM', # Campeche
'CHIS': 'MX:CHP', # Chiapas
'CHIH': 'MX:CHH', # Chihuahua
'COAH': 'MX:COA', # Coahuila
'COL': 'MX:COL', # Colima
'DGO': 'MX:DUR', # Durango
'GTO': 'MX:GUA', # Guanajuato
'GRO': 'MX:GRO', # Guerrero
'HGO': 'MX:HID', # Hidalgo
'JAL': 'MX:JAL', # Jalisco
# NOTE: couldn't find an example for Edomex.
'MICH': 'MX:MIC', # Michoacán
'MOR': 'MX:MOR', # Morelos
'NAY': 'MX:NAY', # Nayarit
'NL': 'MX:NLE', # Nuevo León
'OAX': 'MX:OAX', # Oaxaca
'PUE': 'MX:PUE', # Puebla
'QRO': 'MX:QUE', # Querétaro
'ROO': 'MX:ROO', # Quintana Roo
'SIN': 'MX:SIN', # Sinaloa
'SLP': 'MX:SLP', # San Luis Potosí
'SON': 'MX:SON', # Sonora
'TAB': 'MX:TAB', # Tabasco
'TAM': 'MX:TAM', # Tamaulipas
# NOTE: couldn't find an example for Tlaxcala.
'VER': 'MX:VER', # Veracruz
'YUC': 'MX:YUC', # Yucatán
'ZAC': 'MX:ZAC', # Zacatecas
# National roads
'MEX': 'MX:MEX',
}
def _normalize_mx_netref(network, ref):
# interior ring road in Mexico City
if ref == 'INT':
network = 'MX:CMX:INT'
ref = None
elif ref == 'EXT':
network = 'MX:CMX:EXT'
ref = None
prefix, part = _splitref(ref)
if prefix:
net = _MX_ROAD_NETWORK_PREFIXES.get(prefix.upper())
if net:
network = net
ref = part
# sometimes Quintana Roo is also written as "Q. Roo", which trips up
# the _splitref() function, so this just adjusts for that.
if ref and ref.upper().startswith('Q. ROO'):
network = 'MX:ROO'
ref = ref[len('Q. ROO'):].strip()
return network, ref
# roads in Malaysia can have a state prefix similar to the letters used on
# vehicle license plates. see Wikipedia for a list:
#
# https://en.wikipedia.org/wiki/Malaysian_State_Roads_system
#
# these are mapped to the abbreviations given in the table on:
#
# https://en.wikipedia.org/wiki/States_and_federal_territories_of_Malaysia
#
_MY_ROAD_STATE_CODES = {
'A': 'PRK', # Perak
'B': 'SGR', # Selangor
'C': 'PHG', # Pahang
'D': 'KTN', # Kelantan
'J': 'JHR', # Johor
'K': 'KDH', # Kedah
'M': 'MLK', # Malacca
'N': 'NSN', # Negiri Sembilan
'P': 'PNG', # Penang
'R': 'PLS', # Perlis
'SA': 'SBH', # Sabah
'T': 'TRG', # Terengganu
'Q': 'SWK', # Sarawak
}
def _normalize_my_netref(network, ref):
prefix, number = _splitref(ref)
if prefix == 'E':
network = 'MY:expressway'
elif prefix in ('FT', ''):
network = 'MY:federal'
# federal highway 1 has many parts (1-1, 1-2, etc...) but it's not
# clear that they're actually signed that way. so throw the part
# after the dash away.
ref = number.split('-')[0]
elif prefix == 'AH':
network = 'AsianHighway'
elif prefix == 'MBSA':
network = 'MY:SGR:municipal'
# shorten ref so that it is more likely to fit in a 5-char shield.
ref = 'BSA' + number
elif prefix in _MY_ROAD_STATE_CODES:
network = 'MY:' + _MY_ROAD_STATE_CODES[prefix]
else:
network = None
return network, ref
def _normalize_jp_netref(network, ref):
if network and network.startswith('JP:prefectural:'):
network = 'JP:prefectural'
elif network is None:
prefix, _ = _splitref(ref)
if prefix in ('C', 'E'):
network = 'JP:expressway'
return network, ref
def _normalize_kr_netref(network, ref):
net, part = _splitref(ref)
if net == 'AH':
network = 'AsianHighway'
ref = part
elif network == 'AH':
network = 'AsianHighway'
return network, ref
def _normalize_kz_netref(network, ref):
net, num = _splitref(ref)
if net == 'AH' or network == 'AH':
network = 'AsianHighway'
ref = 'AH' + num
elif net == 'E' or network == 'e-road':
network = 'e-road'
ref = 'E' + num
return network, ref
def _normalize_no_netref(network, ref):
prefix, number = _splitref(ref)
if prefix == 'Rv':
network = 'NO:riksvei'
ref = number
elif prefix == 'Fv':
network = 'NO:fylkesvei'
ref = number
elif prefix == 'E' and number:
network = 'e-road'
ref = 'E ' + number.lstrip('0')
elif prefix == 'Ring':
network = 'NO:oslo:ring'
ref = 'Ring ' + number
elif network and network.lower().startswith('no:riksvei'):
network = 'NO:riksvei'
elif network and network.lower().startswith('no:fylkesvei'):
network = 'NO:fylkesvei'
else:
network = None
return network, ref
_PE_STATES = set([
'AM', # Amazonas
'AN', # Ancash
'AP', # Apurímac
'AR', # Arequipa
'AY', # Ayacucho
'CA', # Cajamarca
'CU', # Cusco
'HU', # Huánuco
'HV', # Huancavelica
'IC', # Ica
'JU', # Junín
'LA', # Lambayeque
'LI', # La Libertad
'LM', # Lima (including Callao)
'LO', # Loreto
'MD', # Madre de Dios
'MO', # Moquegua
'PA', # Pasco
'PI', # Piura
'PU', # Puno
'SM', # San Martín
'TA', # Tacna
'TU', # Tumbes
'UC', # Ucayali
])
def _normalize_pe_netref(network, ref):
prefix, number = _splitref(ref)
# Peruvian refs seem to be usually written "XX-YY" with a dash, so we have
# to remove that as it's not part of the shield text.
if number:
number = number.lstrip('-')
if prefix == 'PE':
network = 'PE:PE'
ref = number
elif prefix in _PE_STATES:
network = 'PE:' + prefix
ref = number
else:
network = None
return network, ref
def _normalize_ph_netref(network, ref):
if network == 'PH:nhn':
network = 'PH:NHN'
return network, ref
def _normalize_pl_netref(network, ref):
if network == 'PL:motorways':
network = 'PL:motorway'
elif network == 'PL:expressways':
network = 'PL:expressway'
if ref and ref.startswith('A'):
network = 'PL:motorway'
elif ref and ref.startswith('S'):
network = 'PL:expressway'
return network, ref
# expansion from ref prefixes to (network, shield text prefix).
#
# https://en.wikipedia.org/wiki/Roads_in_Portugal
#
# note that it seems signs generally don't have EN, ER or EM on them. instead,
# they have N, R and, presumably, M - although i wasn't able to find one of
# those. perhaps they're not important enough to sign with a number.
_PT_NETWORK_EXPANSION = {
'A': ('PT:motorway', 'A'),
'IP': ('PT:primary', 'IP'),
'IC': ('PT:secondary', 'IC'),
'VR': ('PT:rapid', 'VR'),
'VE': ('PT:express', 'VE'),
'EN': ('PT:national', 'N'),
'ER': ('PT:regional', 'R'),
'EM': ('PT:municipal', 'M'),
'E': ('e-road', 'E'),
}
def _normalize_pt_netref(network, ref):
prefix, num = _splitref(ref)
result = _PT_NETWORK_EXPANSION.get(prefix)
if result and num:
network, letter = result
ref = letter + num.lstrip('0')
else:
network = None
return network, ref
# note that there's another road class, DX, which is documented, but doesn't
# currently exist.
# see https://en.wikipedia.org/wiki/Roads_in_Romania
#
_RO_NETWORK_PREFIXES = {
'A': 'RO:motorway',
'DN': 'RO:national',
'DJ': 'RO:county',
'DC': 'RO:local',
'E': 'e-road',
}
def _normalize_ro_netref(network, ref):
prefix, num = _splitref(ref)
network = _RO_NETWORK_PREFIXES.get(prefix)
if network is not None:
ref = prefix + num
else:
ref = None
return network, ref
def _normalize_ru_netref(network, ref):
ref = _make_unicode_or_none(ref)
prefix, num = _splitref(ref)
# get rid of any stuff trailing the '-'. seems to be a section number or
# mile marker?
if num:
num = num.lstrip('-').split('-')[0]
if prefix in (u'М', 'M'): # cyrillic M & latin M!
ref = u'М' + num
elif prefix in (u'Р', 'P'):
if network is None:
network = 'RU:regional'
ref = u'Р' + num
elif prefix in (u'А', 'A'):
if network is None:
network = 'RU:regional'
ref = u'А' + num
elif prefix == 'E':
network = 'e-road'
ref = u'E' + num
elif prefix == 'AH':
network = 'AsianHighway'
ref = u'AH' + num
else:
ref = None
if isinstance(ref, unicode):
ref = ref.encode('utf-8')
return network, ref
_TR_PROVINCIAL = re.compile('^[0-9]{2}-[0-9]{2}$')
# NOTE: there's aslo an "NSC", which is under construction
_SG_EXPRESSWAYS = set([
'AYE', # Ayer Rajah Expressway
'BKE', # Bukit Timah Expressway
'CTE', # Central Expressway
'ECP', # East Coast Parkway
'KJE', # Kranji Expressway
'KPE', # Kallang-Paya Lebar Expressway
'MCE', # Marina Coastal Expressway
'PIE', # Pan Island Expressway
'SLE', # Seletar Expressway
'TPE', # Tampines Expressway
])
def _normalize_sg_netref(network, ref):
if ref in _SG_EXPRESSWAYS:
network = 'SG:expressway'
else:
network = None
ref = None
return network, ref
def _normalize_tr_netref(network, ref):
prefix, num = _splitref(ref)
if num:
num = num.lstrip('-')
if prefix == 'O' and num:
# see https://en.wikipedia.org/wiki/Otoyol
network = 'TR:motorway'
ref = 'O' + num.lstrip('0')
elif prefix == 'D' and num:
# see https://en.wikipedia.org/wiki/Turkish_State_Highway_System
network = 'TR:highway'
# drop section suffixes
ref = 'D' + num.split('-')[0]
elif ref and _TR_PROVINCIAL.match(ref):
network = 'TR:provincial'
elif prefix == 'E' and num:
network = 'e-road'
ref = 'E' + num
else:
network = None
ref = None
return network, ref
def _normalize_ua_netref(network, ref):
ref = _make_unicode_or_none(ref)
prefix, num = _splitref(ref)
if num:
num = num.lstrip('-')
if not num:
network = None
ref = None
elif prefix in (u'М', 'M'): # cyrillic M & latin M!
if network is None:
network = 'UA:international'
ref = u'М' + num
elif prefix in (u'Н', 'H'):
if network is None:
network = 'UA:national'
ref = u'Н' + num
elif prefix in (u'Р', 'P'):
if network is None:
network = 'UA:regional'
ref = u'Р' + num
elif prefix in (u'Т', 'T'):
network = 'UA:territorial'
ref = u'Т' + num.replace('-', '')
elif prefix == 'E':
network = 'e-road'
ref = u'E' + num
else:
ref = None
network = None
if isinstance(ref, unicode):
ref = ref.encode('utf-8')
return network, ref
def _normalize_vn_netref(network, ref):
ref = _make_unicode_or_none(ref)
prefix, num = _splitref(ref)
if num:
num = num.lstrip(u'.')
if not num:
network = None
ref = None
elif prefix == u'CT' or network == 'VN:expressway':
network = 'VN:expressway'
ref = u'CT' + num
elif prefix == u'QL' or network == 'VN:national':
network = 'VN:national'
ref = u'QL' + num
elif prefix in (u'ĐT', u'DT'):
network = 'VN:provincial'
ref = u'ĐT' + num
elif prefix == u'TL' or network in ('VN:provincial', 'VN:TL'):
network = 'VN:provincial'
ref = u'TL' + num
elif ref:
network = 'VN:road'
else:
network = None
ref = None
if isinstance(ref, unicode):
ref = ref.encode('utf-8')
return network, ref
def _normalize_za_netref(network, ref):
prefix, num = _splitref(ref)
ndigits = len(num) if num else 0
# N, R & M numbered routes all have special shields which have the letter
# above the number, which would make it part of the shield artwork rather
# than the shield text.
if prefix == 'N':
network = 'ZA:national'
ref = num
elif prefix == 'R' and ndigits == 2:
# 2-digit R numbers are provincial routes, 3-digit are regional routes.
# https://en.wikipedia.org/wiki/Numbered_routes_in_South_Africa
network = 'ZA:provincial'
ref = num
elif prefix == 'R' and ndigits == 3:
network == 'ZA:regional'
ref = num
elif prefix == 'M':
# there are various different metropolitan networks, but according to
# the Wikipedia page, they all have the same shield. so lumping them
# all together under "metropolitan".
network = 'ZA:metropolitan'
ref = num
elif prefix == 'H':
# i wasn't able to find documentation for these, but there are
# H-numbered roads with good signage, which appear to be only in the
# Kruger National Park, so i've named them that way.
network = 'ZA:kruger'
elif prefix == 'S':
# i wasn't able to find any documentation for these, but there are
# plain white-on-green signs for some of these visible.
network = 'ZA:S-road'
else:
ref = None
network = None
return network, ref
def _shield_text_ar(network, ref):
# Argentinian national routes start with "RN" (ruta nacional), which
# should be stripped, but other letters shouldn't be!
if network == 'AR:national' and ref and ref.startswith('RN'):
return ref[2:]
# Argentinian provincial routes start with "RP" (ruta provincial)
if network == 'AR:provincial' and ref and ref.startswith('RP'):
return ref[2:]
return ref
_AU_NETWORK_SHIELD_TEXT = {
'AU:M-road': 'M',
'AU:A-road': 'A',
'AU:B-road': 'B',
'AU:C-road': 'C',
}
def _shield_text_au(network, ref):
# shields on M, A, B & C roads should have the letter, but not other types
# of roads.
prefix = _AU_NETWORK_SHIELD_TEXT.get(network)
if prefix:
ref = prefix + ref
return ref
def _shield_text_gb(network, ref):
# just remove any space between the letter and number(s)
prefix, number = _splitref(ref)
if prefix and number:
return prefix + number
else:
return ref
def _shield_text_ro(network, ref):
# the DN, DJ & DC networks don't have a prefix on the displayed shields,
# see:
# https://upload.wikimedia.org/wikipedia/commons/b/b0/Autostrada_Sibiu_01.jpg
# https://upload.wikimedia.org/wikipedia/commons/7/7a/A1_Arad-Timisoara_-_01.JPG
if network in ('RO:national', 'RO:county', 'RO:local'):
return ref[2:]
return ref
# do not strip anything from the ref apart from whitespace.
def _use_ref_as_is(network, ref):
return ref.strip()
# CountryNetworkLogic centralises the logic around country-specific road
# network processing. this allows us to do different things, such as
# back-filling missing network tag values or sorting networks differently
# based on which country they are in. (e.g: in the UK, an "M" road is more
# important than an "A" road, even though they'd sort the other way
# alphabetically).
#
# the different logic sections are:
#
# * backfill: this is called as fn(tags) to unpack the ref tag (and any other
# meaningful tags) into a list of (network, ref) tuples to use
# instead. For example, it's common to give ref=A1;B2;C3 to
# indicate multiple networks & shields.
#
# * fix: this is called as fn(network, ref) and should fix whatever problems it
# can and return the replacement (network, ref). remember! either
# network or ref can be None!
#
# * sort: this is called as fn(network, ref) and should return a numeric value
# where lower numeric values mean _more_ important networks.
#
# * shield_text: this is called as fn(network, ref) and should return the
# shield text to output. this might mean stripping leading alpha
# numeric characters - or not, depending on the country.
#
CountryNetworkLogic = namedtuple(
'CountryNetworkLogic', 'backfill fix sort shield_text')
CountryNetworkLogic.__new__.__defaults__ = (None,) * len(
CountryNetworkLogic._fields)
_COUNTRY_SPECIFIC_ROAD_NETWORK_LOGIC = {
'AR': CountryNetworkLogic(
backfill=_guess_network_ar,
shield_text=_shield_text_ar,
),
'AU': CountryNetworkLogic(
backfill=_guess_network_au,
fix=_normalize_au_netref,
sort=_sort_network_au,
shield_text=_shield_text_au,
),
'BR': CountryNetworkLogic(
backfill=_guess_network_br,
fix=_normalize_br_netref,
sort=_sort_network_br,
),
'CA': CountryNetworkLogic(
backfill=_guess_network_ca,
fix=_normalize_ca_netref,
sort=_sort_network_ca,
),
'CH': CountryNetworkLogic(
backfill=_guess_network_ch,
fix=_normalize_ch_netref,
sort=_sort_network_ch,
),
'CD': CountryNetworkLogic(
fix=_normalize_cd_netref,
),
'CN': CountryNetworkLogic(
backfill=_guess_network_cn,
fix=_normalize_cn_netref,
sort=_sort_network_cn,
shield_text=_use_ref_as_is,
),
'DE': CountryNetworkLogic(
backfill=_guess_network_de,
fix=_normalize_de_netref,
sort=_sort_network_de,
shield_text=_use_ref_as_is,
),
'ES': CountryNetworkLogic(
backfill=_guess_network_es,
fix=_normalize_es_netref,
sort=_sort_network_es,
shield_text=_use_ref_as_is,
),
'FR': CountryNetworkLogic(
backfill=_guess_network_fr,
fix=_normalize_fr_netref,
sort=_sort_network_fr,
shield_text=_use_ref_as_is,
),
'GA': CountryNetworkLogic(
backfill=_guess_network_ga,
fix=_normalize_ga_netref,
sort=_sort_network_ga,
shield_text=_use_ref_as_is,
),
'GB': CountryNetworkLogic(
backfill=_guess_network_gb,
sort=_sort_network_gb,
shield_text=_shield_text_gb,
),
'GR': CountryNetworkLogic(
backfill=_guess_network_gr,
fix=_normalize_gr_netref,
sort=_sort_network_gr,
),
'IN': CountryNetworkLogic(
backfill=_guess_network_in,
fix=_normalize_in_netref,
sort=_sort_network_in,
shield_text=_use_ref_as_is,
),
'IR': CountryNetworkLogic(
fix=_normalize_ir_netref,
sort=_sort_network_ir,
shield_text=_use_ref_as_is,
),
'JP': CountryNetworkLogic(
backfill=_guess_network_jp,
fix=_normalize_jp_netref,
shield_text=_use_ref_as_is,
),
'KR': CountryNetworkLogic(
backfill=_guess_network_kr,
fix=_normalize_kr_netref,
),
'KZ': CountryNetworkLogic(
fix=_normalize_kz_netref,
sort=_sort_network_kz,
shield_text=_use_ref_as_is,
),
'LA': CountryNetworkLogic(
fix=_normalize_la_netref,
sort=_sort_network_la,
),
'MX': CountryNetworkLogic(
backfill=_guess_network_mx,
fix=_normalize_mx_netref,
sort=_sort_network_mx,
),
'MY': CountryNetworkLogic(
backfill=_guess_network_my,
fix=_normalize_my_netref,
sort=_sort_network_my,
shield_text=_use_ref_as_is,
),
'NO': CountryNetworkLogic(
backfill=_guess_network_no,
fix=_normalize_no_netref,
sort=_sort_network_no,
shield_text=_use_ref_as_is,
),
'PE': CountryNetworkLogic(
backfill=_guess_network_pe,
fix=_normalize_pe_netref,
shield_text=_use_ref_as_is,
),
'PH': CountryNetworkLogic(
fix=_normalize_ph_netref,
),
'PL': CountryNetworkLogic(
backfill=_guess_network_pl,
fix=_normalize_pl_netref,
sort=_sort_network_pl,
),
'PT': CountryNetworkLogic(
backfill=_guess_network_pt,
fix=_normalize_pt_netref,
sort=_sort_network_pt,
shield_text=_use_ref_as_is,
),
'RO': CountryNetworkLogic(
backfill=_guess_network_ro,
fix=_normalize_ro_netref,
sort=_sort_network_ro,
shield_text=_shield_text_ro,
),
'RU': CountryNetworkLogic(
backfill=_guess_network_ru,
fix=_normalize_ru_netref,
sort=_sort_network_ru,
shield_text=_use_ref_as_is,
),
'SG': CountryNetworkLogic(
backfill=_guess_network_sg,
fix=_normalize_sg_netref,
shield_text=_use_ref_as_is,
),
'TR': CountryNetworkLogic(
backfill=_guess_network_tr,
fix=_normalize_tr_netref,
sort=_sort_network_tr,
shield_text=_use_ref_as_is,
),
'UA': CountryNetworkLogic(
backfill=_guess_network_ua,
fix=_normalize_ua_netref,
sort=_sort_network_ua,
shield_text=_use_ref_as_is,
),
'US': CountryNetworkLogic(
backfill=_do_not_backfill,
sort=_sort_network_us,
),
'VN': CountryNetworkLogic(
backfill=_guess_network_vn,
fix=_normalize_vn_netref,
sort=_sort_network_vn,
shield_text=_use_ref_as_is,
),
'ZA': CountryNetworkLogic(
backfill=_guess_network_za,
fix=_normalize_za_netref,
sort=_sort_network_za,
shield_text=_use_ref_as_is,
),
}
# regular expression to look for a country code at the beginning of the network
# tag.
_COUNTRY_CODE = re.compile('^([a-z][a-z])[:-](.*)', re.UNICODE | re.IGNORECASE)
def _fixup_network_country_code(network):
if network is None:
return None
m = _COUNTRY_CODE.match(network)
if m:
suffix = m.group(2)
# fix up common suffixes which are plural with ones which are singular.
if suffix.lower() == 'roads':
suffix = 'road'
network = m.group(1).upper() + ':' + suffix
return network
def merge_networks_from_tags(shape, props, fid, zoom):
"""
Take the network and ref tags from the feature and, if they both exist, add
them to the mz_networks list. This is to make handling of networks and refs
more consistent across elements.
"""
network = props.get('network')
ref = props.get('ref')
mz_networks = props.get('mz_networks', [])
country_code = props.get('country_code')
# apply some generic fixes to networks:
# * if they begin with two letters and a colon, then make sure the two
# letters are upper case, as they're probably a country code.
# * if they begin with two letters and a dash, then make the letters upper
# case and replace the dash with a colon.
# * expand ;-delimited lists in refs
for i in xrange(0, len(mz_networks), 3):
t, n, r = mz_networks[i:i+3]
if t == 'road' and n is not None:
n = _fixup_network_country_code(n)
mz_networks[i+1] = n
if r is not None and ';' in r:
refs = r.split(';')
mz_networks[i+2] = refs.pop()
for new_ref in refs:
mz_networks.extend((t, n, new_ref))
# for road networks, if there's no explicit network, but the country code
# and ref are both available, then try to use them to back-fill the
# network.
if props.get('kind') in ('highway', 'major_road') and \
country_code and ref:
# apply country-specific logic to try and backfill the network from
# structure we know about how refs work in the country.
logic = _COUNTRY_SPECIFIC_ROAD_NETWORK_LOGIC.get(country_code)
# if the road is a member of exactly one road relation, which provides
# a network and no ref, and the element itself provides no network,
# then use the network from the relation instead.
if network is None:
solo_networks_from_relations = []
for i in xrange(0, len(mz_networks), 3):
t, n, r = mz_networks[i:i+3]
if t == 'road' and n and (r is None or r == ref):
solo_networks_from_relations.append((n, i))
# if we found one _and only one_ road network, then we use the
# network value and delete the [type, network, ref] 3-tuple from
# mz_networks (which is a flattened list of them). because there's
# only one, we can delete it by using its index.
if len(solo_networks_from_relations) == 1:
network, i = solo_networks_from_relations[0]
# add network back into properties in case we need to pass it
# to the backfill.
props['network'] = network
del mz_networks[i:i+3]
if logic and logic.backfill:
networks_and_refs = logic.backfill(props) or []
# if we found a ref, but the network was not provided, then "use
# up" the network tag by assigning it to the first network. this
# deals with cases where people write network="X", ref="1;Y2" to
# mean "X1" and "Y2".
if networks_and_refs:
net, r = networks_and_refs[0]
if net is None and network is not None:
networks_and_refs[0] = (network, r)
# if we extracted information from the network and ref, then
# we don't want to process it again.
network = None
ref = None
for net, r in networks_and_refs:
mz_networks.extend(['road', net, r])
elif network is None:
# last ditch backfill, if we know nothing else about this element,
# at least we know what country it is in. but don't add if there's
# an entry in mz_networks with the same ref!
if ref:
found = False
for i in xrange(0, len(mz_networks), 3):
t, _, r = mz_networks[i:i+3]
if t == 'road' and r == ref:
found = True
break
if not found:
network = country_code
# if there's no network, but the operator indicates a network, then we can
# back-fill an approximate network tag from the operator. this can mean
# that extra refs are available for road networks.
elif network is None:
operator = props.get('operator')
backfill_network = _NETWORK_OPERATORS.get(operator)
if backfill_network:
network = backfill_network
if network and ref:
props.pop('network', None)
props.pop('ref')
mz_networks.extend([_guess_type_from_network(network), network, ref])
if mz_networks:
props['mz_networks'] = mz_networks
return (shape, props, fid)
# a pattern to find any number in a string, as a fallback for looking up road
# reference numbers.
_ANY_NUMBER = re.compile('[^0-9]*([0-9]+)')
def _default_sort_network(network, ref):
"""
Returns an integer representing the numeric importance of the network,
where lower numbers are more important.
This is to handle roads which are part of many networks, and ensuring
that the most important one is displayed. For example, in the USA many
roads can be part of both interstate (US:I) and "US" (US:US) highways,
and possibly state ones as well (e.g: US:NY:xxx). In addition, there
are international conventions around the use of "CC:national" and
"CC:regional:*" where "CC" is an ISO 2-letter country code.
Here we treat national-level roads as more important than regional or
lower, and assume that the deeper the network is in the hierarchy, the
less important the road. Roads with lower "ref" numbers are considered
more important than higher "ref" numbers, if they are part of the same
network.
"""
if network is None:
network_code = 9999
elif ':national' in network:
network_code = 1
elif ':regional' in network:
network_code = 2
elif network == 'e-road' in network:
network_code = 9000
else:
network_code = len(network.split(':')) + 3
ref = _ref_importance(ref)
return network_code * 10000 + min(ref, 9999)
_WALKING_NETWORK_CODES = {
'iwn': 1,
'nwn': 2,
'rwn': 3,
'lwn': 4,
}
_BICYCLE_NETWORK_CODES = {
'icn': 1,
'ncn': 2,
'rcn': 3,
'lcn': 4,
}
def _generic_network_importance(network, ref, codes):
# get a code based on the "largeness" of the network
code = codes.get(network, len(codes))
# get a numeric ref, if one is available. treat things with no ref as if
# they had a very high ref, and so reduced importance.
try:
ref = max(int(ref or 9999), 0)
except ValueError:
# if ref isn't an integer, then it's likely a name, which might be
# more important than a number
ref = 0
return code * 10000 + min(ref, 9999)
def _walking_network_importance(network, ref):
return _generic_network_importance(network, ref, _WALKING_NETWORK_CODES)
def _bicycle_network_importance(network, ref):
return _generic_network_importance(network, ref, _BICYCLE_NETWORK_CODES)
def _bus_network_importance(network, ref):
return _generic_network_importance(network, ref, {})
_NUMBER_AT_FRONT = re.compile(r'^(\d+\w*)', re.UNICODE)
_SINGLE_LETTER_AT_FRONT = re.compile(r'^([^\W\d]) *(\d+)', re.UNICODE)
_LETTER_THEN_NUMBERS = re.compile(r'^[^\d\s_]+[ -]?([^\s]+)',
re.UNICODE | re.IGNORECASE)
_UA_TERRITORIAL_RE = re.compile(r'^(\w)-(\d+)-(\d+)$',
re.UNICODE | re.IGNORECASE)
def _make_unicode_or_none(ref):
if isinstance(ref, unicode):
# no need to do anything, it's already okay
return ref
elif isinstance(ref, str):
# it's UTF-8 encoded bytes, so make it a unicode
return unicode(ref, 'utf-8')
# dunno what this is?!!
return None
def _road_shield_text(network, ref):
"""
Try to extract the string that should be displayed within the road shield,
based on the raw ref and the network value.
"""
# FI-PI-LI is just a special case?
if ref == 'FI-PI-LI':
return ref
# These "belt" roads have names in the ref which should be in the shield,
# there's no number.
if network and network == 'US:PA:Belt':
return ref
# Ukranian roads sometimes have internal dashes which should be removed.
if network and network.startswith('ua:'):
m = _UA_TERRITORIAL_RE.match(ref)
if m:
return m.group(1) + m.group(2) + m.group(3)
# Greek roads sometimes have alphabetic prefixes which we should _keep_,
# unlike for other roads.
if network and (network.startswith('GR:') or network.startswith('gr:')):
return ref
# If there's a number at the front (optionally with letters following),
# then that's the ref.
m = _NUMBER_AT_FRONT.match(ref)
if m:
return m.group(1)
# If there's a letter at the front, optionally space, and then a number,
# the ref is the concatenation (without space) of the letter and number.
m = _SINGLE_LETTER_AT_FRONT.match(ref)
if m:
return m.group(1) + m.group(2)
# Otherwise, try to match a bunch of letters followed by a number.
m = _LETTER_THEN_NUMBERS.match(ref)
if m:
return m.group(1)
# Failing that, give up and just return the ref as-is.
return ref
def _default_shield_text(network, ref):
"""
Without any special properties of the ref to make the shield text from,
just use the 'ref' property.
"""
return ref
# _Network represents a type of route network.
# prefix is what we should insert into
# the property we put on the feature (e.g: prefix + 'network' for
# 'bicycle_network' and so forth). shield_text_fn is a function called with the
# network and ref to get the text which should be shown on the shield.
_Network = namedtuple(
'_Network', 'prefix shield_text_fn network_importance_fn')
_ROAD_NETWORK = _Network(
'',
_road_shield_text,
None)
_FOOT_NETWORK = _Network(
'walking_',
_default_shield_text,
_walking_network_importance)
_BIKE_NETWORK = _Network(
'bicycle_',
_default_shield_text,
_bicycle_network_importance)
_BUS_NETWORK = _Network(
'bus_',
_default_shield_text,
_bus_network_importance)
_NETWORKS = {
'road': _ROAD_NETWORK,
'foot': _FOOT_NETWORK,
'hiking': _FOOT_NETWORK,
'bicycle': _BIKE_NETWORK,
'bus': _BUS_NETWORK,
'trolleybus': _BUS_NETWORK,
}
def extract_network_information(shape, properties, fid, zoom):
"""
Take the triples of (route_type, network, ref) from `mz_networks` and
extract them into two arrays of network and shield_text information.
"""
mz_networks = properties.pop('mz_networks', None)
country_code = properties.get('country_code')
country_logic = _COUNTRY_SPECIFIC_ROAD_NETWORK_LOGIC.get(country_code)
if mz_networks is not None:
# take the list and make triples out of it
itr = iter(mz_networks)
groups = defaultdict(list)
for (type, network, ref) in zip(itr, itr, itr):
n = _NETWORKS.get(type)
if n:
groups[n].append([network, ref])
for network, vals in groups.items():
all_networks = 'all_' + network.prefix + 'networks'
all_shield_texts = 'all_' + network.prefix + 'shield_texts'
shield_text_fn = network.shield_text_fn
if network is _ROAD_NETWORK and country_logic and \
country_logic.shield_text:
shield_text_fn = country_logic.shield_text
shield_texts = list()
network_names = list()
for network_name, ref in vals:
network_names.append(network_name)
ref = _make_unicode_or_none(ref)
if ref is not None:
ref = shield_text_fn(network_name, ref)
# we try to keep properties as utf-8 encoded str, but the
# shield text function may have turned them into unicode.
# this is a catch-all just to make absolutely sure.
if isinstance(ref, unicode):
ref = ref.encode('utf-8')
shield_texts.append(ref)
properties[all_networks] = network_names
properties[all_shield_texts] = shield_texts
return (shape, properties, fid)
def _choose_most_important_network(properties, prefix, importance_fn):
"""
Use the `_network_importance` function to select any road networks from
`all_networks` and `all_shield_texts`, taking the most important one.
"""
all_networks = 'all_' + prefix + 'networks'
all_shield_texts = 'all_' + prefix + 'shield_texts'
networks = properties.pop(all_networks, None)
shield_texts = properties.pop(all_shield_texts, None)
country_code = properties.get('country_code')
if networks and shield_texts:
def network_key(t):
return importance_fn(*t)
tuples = sorted(set(zip(networks, shield_texts)), key=network_key)
# i think most route designers would try pretty hard to make sure that
# a segment of road isn't on two routes of different networks but with
# the same shield text. most likely when this happens it's because we
# have duplicate information in the element and relations it's a part
# of. so get rid of anything with network=None where there's an entry
# with the same ref (and network != none).
seen_ref = set()
new_tuples = []
for network, ref in tuples:
if network:
if ref:
seen_ref.add(ref)
new_tuples.append((network, ref))
elif ref is not None and ref not in seen_ref:
# network is None, fall back to the country code
new_tuples.append((country_code, ref))
tuples = new_tuples
if tuples:
# expose first network as network/shield_text
network, ref = tuples[0]
properties[prefix + 'network'] = network
properties[prefix + 'shield_text'] = ref
# replace properties with sorted versions of themselves
properties[all_networks] = [n[0] for n in tuples]
properties[all_shield_texts] = [n[1] for n in tuples]
return properties
def choose_most_important_network(shape, properties, fid, zoom):
for net in _NETWORKS.values():
prefix = net.prefix
if net is _ROAD_NETWORK:
country_code = properties.get('country_code')
logic = _COUNTRY_SPECIFIC_ROAD_NETWORK_LOGIC.get(country_code)
importance_fn = None
if logic:
importance_fn = logic.sort
if not importance_fn:
importance_fn = _default_sort_network
else:
importance_fn = net.network_importance_fn
properties = _choose_most_important_network(
properties, prefix, importance_fn)
return (shape, properties, fid)
def buildings_unify(ctx):
"""
Unify buildings with their parts. Building parts will receive a
root_id property which will be the id of building parent they are
associated with.
"""
zoom = ctx.nominal_zoom
start_zoom = ctx.params.get('start_zoom', 0)
if zoom < start_zoom:
return None
source_layer = ctx.params.get('source_layer')
assert source_layer is not None, 'unify_buildings: missing source_layer'
feature_layers = ctx.feature_layers
layer = _find_layer(feature_layers, source_layer)
if layer is None:
return None
class geom_with_building_id(object):
def __init__(self, geom, building_id):
self.geom = geom
self.building_id = building_id
self._geom = geom._geom
self.is_empty = geom.is_empty
indexable_buildings = []
parts = []
for feature in layer['features']:
shape, props, feature_id = feature
kind = props.get('kind')
if kind == 'building':
building_id = props.get('id')
if building_id:
indexed_building = geom_with_building_id(shape, building_id)
indexable_buildings.append(indexed_building)
elif kind == 'building_part':
parts.append(feature)
if not (indexable_buildings and parts):
return
buildings_index = STRtree(indexable_buildings)
for part in parts:
best_overlap = 0
root_building_id = None
part_shape, part_props, part_feature_id = part
indexed_buildings = buildings_index.query(part_shape)
for indexed_building in indexed_buildings:
building_shape = indexed_building.geom
intersection = part_shape.intersection(building_shape)
overlap = intersection.area
if overlap > best_overlap:
best_overlap = overlap
root_building_id = indexed_building.building_id
if root_building_id is not None:
part_props['root_id'] = root_building_id
def truncate_min_zoom_to_2dp(shape, properties, fid, zoom):
"""
Truncate the "min_zoom" property to two decimal places.
"""
min_zoom = properties.get('min_zoom')
if min_zoom:
properties['min_zoom'] = round(min_zoom, 2)
return shape, properties, fid
def truncate_min_zoom_to_1dp(shape, properties, fid, zoom):
"""
Truncate the "min_zoom" property to one decimal place.
"""
min_zoom = properties.get('min_zoom')
if min_zoom:
properties['min_zoom'] = round(min_zoom, 1)
return shape, properties, fid
class Palette(object):
"""
A collection of named colours which allows relatively fast lookup of the
closest named colour to any particular input colour.
Inspired by https://github.com/cooperhewitt/py-cooperhewitt-swatchbook
"""
def __init__(self, colours):
self.colours = colours
self.namelookup = dict()
for name, colour in colours.items():
assert len(colour) == 3, \
"Colours must lists of be of length 3 (%r: %r)" % \
(name, colour)
for val in colour:
assert isinstance(val, int), \
"Colour values must be integers (%r: %r)" % (name, colour)
assert val >= 0 and val <= 255, \
"Colour values must be between 0 and 255 (%r: %r)" % \
(name, colour)
self.namelookup[tuple(colour)] = name
self.tree = kdtree.create(colours.values())
def __call__(self, colour):
"""
Returns the name of the closest colour in the palette to the input
colour.
"""
node, dist = self.tree.search_nn(colour)
return self.namelookup[tuple(node.data)]
def get(self, name):
return self.colours.get(name)
def palettize_colours(ctx):
"""
Derive a colour from each feature by looking at one or more input
attributes and match that to a palette of name to colour mappings given
in the `colours` parameter. The name of the colour will be output in the
feature's properties using a key from the `attribute` paramter.
"""
from vectordatasource.colour import parse_colour
layer_name = ctx.params.get('layer')
assert layer_name, \
'Parameter layer was missing from palettize config'
attr_name = ctx.params.get('attribute')
assert attr_name, \
'Parameter attribute was missing from palettize config'
colours = ctx.params.get('colours')
assert colours, \
'Dict mapping colour names to RGB triples was missing from config'
input_attrs = ctx.params.get('input_attributes', ['colour'])
layer = _find_layer(ctx.feature_layers, layer_name)
palette = Palette(colours)
for (shape, props, fid) in layer['features']:
colour = None
for attr in input_attrs:
colour = props.get(attr)
if colour:
break
if colour:
rgb = parse_colour(colour)
if rgb:
props[attr_name] = palette(rgb)
return layer
def backfill_from_other_layer(ctx):
"""
Matches features from one layer with the other on the basis of the feature
ID and, if the configured layer property doesn't exist on the feature, but
the other layer property does exist on the matched feature, then copy it
across.
The initial use for this is to backfill POI kinds into building kind_detail
when the building doesn't already have a different kind_detail supplied.
"""
layer_name = ctx.params.get('layer')
assert layer_name, \
'Parameter layer was missing from ' \
'backfill_from_other_layer config'
other_layer_name = ctx.params.get('other_layer')
assert other_layer_name, \
'Parameter other_layer_name was missing from ' \
'backfill_from_other_layer config'
layer_key = ctx.params.get('layer_key')
assert layer_key, \
'Parameter layer_key was missing from ' \
'backfill_from_other_layer config'
other_key = ctx.params.get('other_key')
assert other_key, \
'Parameter other_key was missing from ' \
'backfill_from_other_layer config'
layer = _find_layer(ctx.feature_layers, layer_name)
other_layer = _find_layer(ctx.feature_layers, other_layer_name)
# build an index of feature ID to property value in the other layer
other_values = {}
for (shape, props, fid) in other_layer['features']:
# prefer to use the `id` property rather than the fid.
fid = props.get('id', fid)
kind = props.get(other_key)
# make sure fid is truthy, as it can be set to None on features
# created by merging.
if kind and fid:
other_values[fid] = kind
# apply those to features which don't already have a value
for (shape, props, fid) in layer['features']:
if layer_key not in props:
fid = props.get('id', fid)
value = other_values.get(fid)
if value:
props[layer_key] = value
return layer
def drop_layer(ctx):
"""
Drops the named layer from the list of layers.
"""
layer_to_delete = ctx.params.get('layer')
for idx, feature_layer in enumerate(ctx.feature_layers):
layer_datum = feature_layer['layer_datum']
layer_name = layer_datum['name']
if layer_name == layer_to_delete:
del ctx.feature_layers[idx]
break
return None
def _fixup_country_specific_networks(shape, props, fid, zoom):
"""
Apply country-specific fixup functions to mz_networks.
"""
mz_networks = props.get('mz_networks')
country_code = props.get('country_code')
logic = _COUNTRY_SPECIFIC_ROAD_NETWORK_LOGIC.get(country_code)
if logic and logic.fix and mz_networks:
new_networks = []
# mz_networks is a list of repeated [type, network, ref, ...], it isn't
# nested!
itr = iter(mz_networks)
for (type, network, ref) in zip(itr, itr, itr):
if type == 'road':
network, ref = logic.fix(network, ref)
new_networks.extend([type, network, ref])
props['mz_networks'] = new_networks
return (shape, props, fid)
def road_networks(ctx):
"""
Fix up road networks. This means looking at the networks from the
relation(s), if any, merging that with information from the tags on the
original object and any structure we expect from looking at the country
code.
"""
params = _Params(ctx, 'road_networks')
layer_name = params.required('layer')
layer = _find_layer(ctx.feature_layers, layer_name)
zoom = ctx.nominal_zoom
funcs = [
merge_networks_from_tags,
_fixup_country_specific_networks,
extract_network_information,
choose_most_important_network,
]
new_features = []
for (shape, props, fid) in layer['features']:
for fn in funcs:
shape, props, fid = fn(shape, props, fid, zoom)
new_features.append((shape, props, fid))
layer['features'] = new_features
return None
# helper class to wrap logic around extracting required and optional parameters
# from the context object passed to post-processors, making its use more
# concise and readable in the post-processor method itself.
#
class _Params(object):
def __init__(self, ctx, post_processor_name):
self.ctx = ctx
self.post_processor_name = post_processor_name
def required(self, name, typ=str, default=None):
"""
Returns a named parameter of the given type and default from the
context, raising an assertion failed exception if the parameter wasn't
present, or wasn't an instance of the type.
"""
value = self.optional(name, typ=typ, default=default)
assert value is not None, \
'Required parameter %r was missing from %r config' \
% (name, self.post_processor_name)
return value
def optional(self, name, typ=str, default=None):
"""
Returns a named parameter of the given type, or the default if that
parameter wasn't given in the context. Raises an exception if the
value was present and is not of the expected type.
"""
value = self.ctx.params.get(name, default)
if value is not None:
assert isinstance(value, typ), \
'Expected parameter %r to be of type %s, but value %r is of ' \
'type %r in %r config' \
% (name, typ.__name__, value, type(value).__name__,
self.post_processor_name)
return value
def point_in_country_logic(ctx):
"""
Intersect points from source layer with target layer, then look up which
country they're in and assign property based on a look-up table.
"""
params = _Params(ctx, 'point_in_country_logic')
layer_name = params.required('layer')
country_layer_name = params.required('country_layer')
country_code_attr = params.required('country_code_attr')
# single attribute version
output_attr = params.optional('output_attr')
# multiple attribute version
output_attrs = params.optional('output_attrs', typ=list)
# must provide one or the other
assert output_attr or output_attrs, 'Must provide one or other of ' \
'output_attr or output_attrs for point_in_country_logic'
logic_table = params.optional('logic_table', typ=dict)
if logic_table is None:
logic_table = ctx.resources.get('logic_table')
assert logic_table is not None, 'Must provide logic_table via a param ' \
'or resource for point_in_country_logic'
where = params.optional('where')
layer = _find_layer(ctx.feature_layers, layer_name)
country_layer = _find_layer(ctx.feature_layers, country_layer_name)
if where is not None:
where = compile(where, 'queries.yaml', 'eval')
# this is a wrapper around a geometry, so that we can store extra
# information in the STRTree.
class country_with_value(object):
def __init__(self, geom, value):
self.geom = geom
self.value = value
self._geom = geom._geom
self.is_empty = geom.is_empty
# construct an STRtree index of the country->value mapping. in many cases,
# the country will cover the whole tile, but in some other cases it will
# not, and it's worth having the speedup of indexing for those.
countries = []
for (shape, props, fid) in country_layer['features']:
country_code = props.get(country_code_attr)
value = logic_table.get(country_code)
if value is not None:
countries.append(country_with_value(shape, value))
countries_index = STRtree(countries)
for (shape, props, fid) in layer['features']:
# skip features where the 'where' clause doesn't match
if where:
local = props.copy()
if not eval(where, {}, local):
continue
candidates = countries_index.query(shape)
for candidate in candidates:
# given that the shape is (expected to be) a point, all
# intersections are the same (there's no measure of the "amount of
# overlap"), so we might as well just stop on the first one.
if shape.intersects(candidate.geom):
if output_attrs:
for output_attr in output_attrs:
props[output_attr] = candidate.value[output_attr]
else:
props[output_attr] = candidate.value
break
return None
def max_zoom_filter(ctx):
"""
For features with a max_zoom, remove them if it's < nominal zoom.
"""
params = _Params(ctx, 'max_zoom_filter')
layers = params.required('layers', typ=list)
nominal_zoom = ctx.nominal_zoom
for layer_name in layers:
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
new_features = []
for feature in features:
_, props, _ = feature
max_zoom = props.get('max_zoom')
if max_zoom is None or max_zoom >= nominal_zoom:
new_features.append(feature)
layer['features'] = new_features
return None
def min_zoom_filter(ctx):
"""
For features with a min_zoom, remove them if it's > nominal zoom + 1.
"""
params = _Params(ctx, 'min_zoom_filter')
layers = params.required('layers', typ=list)
nominal_zoom = ctx.nominal_zoom
for layer_name in layers:
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
new_features = []
for feature in features:
_, props, _ = feature
min_zoom = props.get('min_zoom')
if min_zoom is not None and min_zoom < nominal_zoom + 1:
new_features.append(feature)
layer['features'] = new_features
return None
def tags_set_ne_min_max_zoom(ctx):
"""
Override the min zoom and max zoom properties with __ne_* variants from
Natural Earth, if there are any.
"""
params = _Params(ctx, 'tags_set_ne_min_max_zoom')
layer_name = params.required('layer')
layer = _find_layer(ctx.feature_layers, layer_name)
for _, props, _ in layer['features']:
min_zoom = props.pop('__ne_min_zoom', None)
if min_zoom is not None:
# don't overstuff features into tiles when they are in the
# long tail of won't display, but make their min_zoom
# consistent with when they actually show in tiles
if min_zoom % 1 > 0.5:
min_zoom = ceil(min_zoom)
props['min_zoom'] = min_zoom
elif props.get('kind') == 'country':
# countries and regions which don't have a min zoom joined from NE
# are probably either vandalism or unrecognised countries. either
# way, we probably don't want to see them at zoom, which is lower
# than most of the curated NE min zooms. see issue #1826 for more
# information.
props['min_zoom'] = max(6, props['min_zoom'])
elif props.get('kind') == 'region':
props['min_zoom'] = max(8, props['min_zoom'])
max_zoom = props.pop('__ne_max_zoom', None)
if max_zoom is not None:
props['max_zoom'] = max_zoom
return None
def whitelist(ctx):
"""
Applies a whitelist to a particular property on all features in the layer,
optionally also remapping some values.
"""
params = _Params(ctx, 'whitelist')
layer_name = params.required('layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
property_name = params.required('property')
whitelist = params.required('whitelist', typ=list)
remap = params.optional('remap', default={}, typ=dict)
where = params.optional('where')
# check that we're in the zoom range where this post-processor is supposed
# to operate.
if ctx.nominal_zoom < start_zoom:
return None
if end_zoom is not None and ctx.nominal_zoom >= end_zoom:
return None
if where is not None:
where = compile(where, 'queries.yaml', 'eval')
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
for feature in features:
_, props, _ = feature
# skip this feature if there's a where clause and it evaluates falsey.
if where is not None:
local = props.copy()
local['zoom'] = ctx.nominal_zoom
if not eval(where, {}, local):
continue
value = props.get(property_name)
if value is not None:
if value in whitelist:
# leave value as-is
continue
elif value in remap:
# replace with replacement value
props[property_name] = remap[value]
else:
# drop the property
props.pop(property_name)
return None
def remap(ctx):
"""
Maps some values for a particular property to others. Similar to whitelist,
but won't remove the property if there's no match.
"""
params = _Params(ctx, 'remap')
layer_name = params.required('layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
property_name = params.required('property')
remap = params.optional('remap', default={}, typ=dict)
where = params.optional('where')
# check that we're in the zoom range where this post-processor is supposed
# to operate.
if ctx.nominal_zoom < start_zoom:
return None
if end_zoom is not None and ctx.nominal_zoom >= end_zoom:
return None
if where is not None:
where = compile(where, 'queries.yaml', 'eval')
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
for feature in features:
shape, props, _ = feature
# skip this feature if there's a where clause and it evaluates falsey.
if where is not None:
local = props.copy()
local['zoom'] = ctx.nominal_zoom
local['geom_type'] = shape.geom_type
if not eval(where, {}, local):
continue
value = props.get(property_name)
if value in remap:
# replace with replacement value
props[property_name] = remap[value]
return None
def backfill(ctx):
"""
Backfills default values for some features. In other words, if the feature
lacks some or all of the defaults, then set those defaults.
"""
params = _Params(ctx, 'whitelist')
layer_name = params.required('layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
defaults = params.required('defaults', typ=dict)
where = params.optional('where')
# check that we're in the zoom range where this post-processor is supposed
# to operate.
if ctx.nominal_zoom < start_zoom:
return None
if end_zoom is not None and ctx.nominal_zoom >= end_zoom:
return None
if where is not None:
where = compile(where, 'queries.yaml', 'eval')
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
for feature in features:
_, props, _ = feature
# skip this feature if there's a where clause and it evaluates truthy.
if where is not None:
local = props.copy()
local['zoom'] = ctx.nominal_zoom
if not eval(where, {}, local):
continue
for k, v in defaults.iteritems():
if k not in props:
props[k] = v
return None
def clamp_min_zoom(ctx):
"""
Clamps the min zoom for features depending on context.
"""
params = _Params(ctx, 'clamp_min_zoom')
layer_name = params.required('layer')
start_zoom = params.optional('start_zoom', default=0, typ=int)
end_zoom = params.optional('end_zoom', typ=int)
clamp = params.required('clamp', typ=dict)
property_name = params.required('property')
# check that we're in the zoom range where this post-processor is supposed
# to operate.
if ctx.nominal_zoom < start_zoom:
return None
if end_zoom is not None and ctx.nominal_zoom >= end_zoom:
return None
layer = _find_layer(ctx.feature_layers, layer_name)
features = layer['features']
for feature in features:
_, props, _ = feature
value = props.get(property_name)
min_zoom = props.get('min_zoom')
if value is not None and min_zoom is not None:
min_val = clamp.get(value)
if min_val is not None and min_val > min_zoom:
props['min_zoom'] = min_val
return None
def add_vehicle_restrictions(shape, props, fid, zoom):
"""
Parse the maximum height, weight, length, etc... restrictions on vehicles
and create the `hgv_restriction` and `hgv_restriction_shield_text`.
"""
from math import floor
def _one_dp(val, unit):
deci = int(floor(10 * val))
if deci % 10 == 0:
return "%d%s" % (deci / 10, unit)
return "%.1f%s" % (0.1 * deci, unit)
def _metres(val):
# parse metres or feet and inches, return cm
metres = _to_float_meters(val)
if metres:
return True, _one_dp(metres, 'm')
return False, None
def _tonnes(val):
tonnes = to_float(val)
if tonnes:
return True, _one_dp(tonnes, 't')
return False, None
def _false(val):
return val == 'no', None
Restriction = namedtuple('Restriction', 'kind parse')
restrictions = {
'maxwidth': Restriction('width', _metres),
'maxlength': Restriction('length', _metres),
'maxheight': Restriction('height', _metres),
'maxweight': Restriction('weight', _tonnes),
'maxaxleload': Restriction('wpa', _tonnes),
'hazmat': Restriction('hazmat', _false),
}
hgv_restriction = None
hgv_restriction_shield_text = None
for osm_key, restriction in restrictions.items():
osm_val = props.pop(osm_key, None)
if osm_val is None:
continue
restricted, shield_text = restriction.parse(osm_val)
if not restricted:
continue
if hgv_restriction is None:
hgv_restriction = restriction.kind
hgv_restriction_shield_text = shield_text
else:
hgv_restriction = 'multiple'
hgv_restriction_shield_text = None
if hgv_restriction:
props['hgv_restriction'] = hgv_restriction
if hgv_restriction_shield_text:
props['hgv_restriction_shield_text'] = hgv_restriction_shield_text
return shape, props, fid
def load_collision_ranker(fh):
import yaml
from vectordatasource.collision import CollisionRanker
data = yaml.load(fh)
assert isinstance(data, list)
return CollisionRanker(data)
def add_collision_rank(ctx):
"""
Add or update a collision_rank property on features in the given layers.
The collision rank is looked up from a YAML file consisting of a list of
filters (same syntax as in kind/min_zoom YAML) and "_reserved" blocks.
Collision rank indices are automatically assigned based on where in the
list a matching filter is found.
"""
feature_layers = ctx.feature_layers
zoom = ctx.nominal_zoom
start_zoom = ctx.params.get('start_zoom', 0)
end_zoom = ctx.params.get('end_zoom')
ranker = ctx.resources.get('ranker')
where = ctx.params.get('where')
assert ranker, 'add_collision_rank: missing ranker resource'
if zoom < start_zoom:
return None
if end_zoom is not None and zoom >= end_zoom:
return None
if where:
where = compile(where, 'queries.yaml', 'eval')
for layer in feature_layers:
layer_name = layer['layer_datum']['name']
for shape, props, fid in layer['features']:
# use the "where" clause to limit the selection of features which
# we add collision_rank to.
add_collision_rank = True
if where:
local = defaultdict(lambda: None)
local.update(props)
local['layer_name'] = layer_name
local['_has_name'] = _has_name(props)
add_collision_rank = eval(where, {}, local)
if add_collision_rank:
props_with_layer = props.copy()
props_with_layer['$layer'] = layer_name
rank = ranker((shape, props_with_layer, fid))
if rank is not None:
props['collision_rank'] = rank
return None
# mappings from the fclass_XXX values in the Natural Earth disputed areas data
# to the matching Tilezen kind.
_REMAP_VIEWPOINT_KIND = {
'Disputed (please verify)': 'disputed',
'Indefinite (please verify)': 'indefinite',
'Indeterminant frontier': 'indeterminate',
'International boundary (verify)': 'country',
'Lease limit': 'lease_limit',
'Line of control (please verify)': 'line_of_control',
'Overlay limit': 'overlay_limit',
'Unrecognized': 'unrecognized_country',
'Map unit boundary': 'map_unit',
'Breakaway': 'disputed_breakaway',
'Claim boundary': 'disputed_claim',
'Elusive frontier': 'disputed_elusive',
'Reference line': 'disputed_reference_line',
'Admin-1 region boundary': 'macroregion',
'Admin-1 boundary': 'region',
'Admin-1 statistical boundary': 'region',
'Admin-1 statistical meta bounds': 'region',
'1st Order Admin Lines': 'region',
'Unrecognized Admin-1 region boundary': 'unrecognized_macroregion',
'Unrecognized Admin-1 boundary': 'unrecognized_region',
'Unrecognized Admin-1 statistical boundary': 'unrecognized_region',
'Unrecognized Admin-1 statistical meta bounds': 'unrecognized_region',
}
def remap_viewpoint_kinds(shape, props, fid, zoom):
"""
Remap Natural Earth kinds in kind:* country viewpoints into the standard
Tilezen nomenclature.
"""
for key in props.keys():
if key.startswith('kind:'):
props[key] = _REMAP_VIEWPOINT_KIND.get(props[key])
return (shape, props, fid)
def _list_of_countries(value):
"""
Parses a comma or semicolon delimited list of ISO 3166-1 alpha-2 codes,
discarding those which don't match our expected format. We also allow a
special pseudo-country code "iso".
Returns a list of lower-case, stripped country codes (plus "iso").
"""
from re import match
from re import split
countries = []
candidates = split('[,;]', value)
for candidate in candidates:
# should have an ISO 3166-1 alpha-2 code, so should be 2 ASCII
# latin characters.
candidate = candidate.strip().lower()
if candidate == 'iso' or match('[a-z][a-z]', candidate):
countries.append(candidate)
return countries
def unpack_viewpoint_claims(shape, props, fid, zoom):
"""
Unpack OSM "claimed_by" list into viewpoint kinds.
For example; "claimed_by=AA;BB;CC" should become "kind:aa=country,
kind:bb=country, kind:cc=country" (or region, etc... as appropriate for
the main kind, which should be "unrecognized_TYPE".
Additionally, "recognized_by=XX;YY;ZZ" indicates that these viewpoints,
although they don't claim the territory, recognize the claim and should
see it in their viewpoint as a country/region/county.
"""
prefix = 'unrecognized_'
kind = props.get('kind')
claimed_by = props.get('claimed_by')
recognized_by = props.get('recognized_by')
if kind and kind.startswith(prefix) and claimed_by:
claimed_kind = kind[len(prefix):]
for country in _list_of_countries(claimed_by):
props['kind:' + country] = claimed_kind
if recognized_by:
for viewpoint in _list_of_countries(recognized_by):
props['kind:' + viewpoint] = claimed_kind
return (shape, props, fid)
class _DisputeMasks(object):
"""
Creates a "mask" of polygons by buffering disputed border lines and
provides an interface through cut() to intersect other border lines and
apply kind:xx=unrecognized_* to them.
This allows us to handle disputed borders - we effectively clip them out
of the disputant's viewpoint by setting a property that will hide them.
"""
def __init__(self, buffer_distance):
self.buffer_distance = buffer_distance
self.masks = []
def add(self, shape, props):
from shapely.geometry import CAP_STYLE
from shapely.geometry import JOIN_STYLE
disputed_by = props.get('disputed_by', '')
disputants = _list_of_countries(disputed_by)
if disputants:
# we use a flat cap to avoid straying too much into nearby lines
# and a mitred join to avoid creating extra geometry points to
# represent the curve, as this slows down intersection checks.
buffered_shape = shape.buffer(
self.buffer_distance, CAP_STYLE.flat, JOIN_STYLE.mitre)
self.masks.append((buffered_shape, disputants))
def empty(self):
return not self.masks
def cut(self, shape, props, fid):
"""
Cut the (shape, props, fid) feature against the masks to apply the
dispute to the boundary by setting 'kind:xx' to unrecognized.
"""
updated_features = []
# figure out what we want the boundary kind to be, if it's intersected
# with the dispute mask.
kind = props['kind']
if kind.startswith('unrecognized_'):
unrecognized = kind
else:
unrecognized = 'unrecognized_' + kind
for mask_shape, disputants in self.masks:
# we don't want to override a kind:xx if it has already been set
# (e.g: by a claim), so we filter out disputant viewpoints where
# a kind override has already been set.
#
# this is necessary for dealing with the case where a border is
# both claimed and disputed in the same viewpoint.
non_claim_disputants = []
for disputant in disputants:
key = 'kind:' + disputant
if key not in props:
non_claim_disputants.append(disputant)
if shape.intersects(mask_shape):
cut_shape = shape.intersection(mask_shape)
cut_shape = _filter_geom_types(cut_shape, _LINE_DIMENSION)
shape = shape.difference(mask_shape)
shape = _filter_geom_types(shape, _LINE_DIMENSION)
if not cut_shape.is_empty:
new_props = props.copy()
for disputant in non_claim_disputants:
new_props['kind:' + disputant] = unrecognized
updated_features.append((cut_shape, new_props, None))
if not shape.is_empty:
updated_features.append((shape, props, fid))
return updated_features
# tuple of boundary kind values on which we should set alternate viewpoints
# from disputed_by ways.
_BOUNDARY_KINDS = ('country', 'region', 'county', 'locality',
'aboriginal_lands')
def apply_disputed_boundary_viewpoints(ctx):
"""
Use the dispute features to apply viewpoints to the admin boundaries.
We take the 'mz_internal_dispute_mask' features and build a mask from them.
The mask is used to move the information from 'disputed_by' lists on the
mask features to 'kind:xx' overrides on the boundary features. The mask
features are discarded afterwards.
"""
params = _Params(ctx, 'apply_disputed_boundary_viewpoints')
layer_name = params.required('base_layer')
start_zoom = params.optional('start_zoom', typ=int, default=0)
end_zoom = params.optional('end_zoom', typ=int)
layer = _find_layer(ctx.feature_layers, layer_name)
zoom = ctx.nominal_zoom
if zoom < start_zoom or \
(end_zoom is not None and zoom >= end_zoom):
return None
# we tried intersecting lines against lines, but this often led to a sort
# of "dashed pattern" in the output where numerical imprecision meant two
# lines don't quite intersect.
#
# we solve this by buffering out the shape by a small amount so that we're
# more likely to get a clean cut against the boundary line.
#
# tolerance for zoom is the length of 1px at 256px per tile, so we can take
# a fraction of that to get sub-pixel alignment.
buffer_distance = 0.1 * tolerance_for_zoom(zoom)
# first, separate out the dispute mask geometries
masks = _DisputeMasks(buffer_distance)
# features that we're going to return
new_features = []
# boundaries, which we pull out separately to apply the disputes to
boundaries = []
for shape, props, fid in layer['features']:
kind = props.get('kind')
if kind == 'mz_internal_dispute_mask':
masks.add(shape, props)
elif kind in _BOUNDARY_KINDS:
boundaries.append((shape, props, fid))
# we want to apply disputes to already generally-unrecognised borders
# too, as this allows for multi-level fallback from one viewpoint
# possibly through several others before reaching the default.
elif (kind.startswith('unrecognized_') and
kind[len('unrecognized_'):] in _BOUNDARY_KINDS):
boundaries.append((shape, props, fid))
else:
# pass through this feature - we just ignore it.
new_features.append((shape, props, fid))
# quick escape if there are no masks (which should be the common case)
if masks.empty():
# keep the boundaries and other features we already passed through,
# but drop the masks - we don't want them in the output.
new_features.extend(boundaries)
else:
for shape, props, fid in boundaries:
# cut boundary features against disputes and set the alternate
# viewpoint on any which intersect.
features = masks.cut(shape, props, fid)
new_features.extend(features)
layer['features'] = new_features
return layer
def update_min_zoom(ctx):
"""
Update the min zoom for features matching the Python fragment "where"
clause. If none is provided, update all features.
The new min_zoom is calculated by evaluating a Python fragment passed
in through the "min_zoom" parameter. This is evaluated in the context
of the features' parameters, plus a zoom variable.
If the min zoom is lower than the current min zoom, the current one is
kept. If the min zoom is increased, then it's checked against the
current zoom and the feature dropped if it's not in range.
"""
params = _Params(ctx, 'update_min_zoom')
layer_name = params.required('source_layer')
start_zoom = params.optional('start_zoom', typ=int, default=0)
end_zoom = params.optional('end_zoom', typ=int)
min_zoom = params.required('min_zoom')
where = params.optional('where')
layer = _find_layer(ctx.feature_layers, layer_name)
zoom = ctx.nominal_zoom
if zoom < start_zoom or \
(end_zoom is not None and zoom >= end_zoom):
return None
min_zoom = compile(min_zoom, 'queries.yaml', 'eval')
if where:
where = compile(where, 'queries.yaml', 'eval')
new_features = []
for shape, props, fid in layer['features']:
local = defaultdict(lambda: None)
local.update(props)
local['zoom'] = zoom
if where and eval(where, {}, local):
new_min_zoom = eval(min_zoom, {}, local)
if new_min_zoom > props.get('min_zoom'):
props['min_zoom'] = new_min_zoom
if new_min_zoom >= zoom + 1 and zoom < 16:
# DON'T add feature - it's masked by min zoom.
continue
new_features.append((shape, props, fid))
layer['features'] = new_features
return layer
def major_airport_detector(shape, props, fid, zoom):
if props.get('kind') == 'aerodrome':
passengers = props.get('passenger_count', 0)
kind_detail = props.get('kind_detail')
# if we didn't detect that the airport is international (probably
# missing tagging to indicate that), but it carries over a million
# passengers a year, then it's probably an airport in the same class
# as an international one.
#
# for example, TPE (Taipei) airport hasn't got any international
# tagging, but carries over 45 million passengers a year. however,
# CGH (Sao Paulo Congonhas) carries 21 million, but is actually a
# domestic airport -- however it's so large we'd probably want to
# display it at the same scale as an international airport.
if kind_detail != 'international' and passengers > 1000000:
props['kind_detail'] = 'international'
# likewise, if we didn't detect a kind detail, but the number of
# passengers suggests it's more than just a flying club airfield,
# then set a regional kind_detail.
elif kind_detail is None and passengers > 10000:
props['kind_detail'] = 'regional'
return shape, props, fid
_NE_COUNTRY_CAPITALS = [
'Admin-0 region capital',
'Admin-0 capital alt',
'Admin-0 capital',
]
_NE_REGION_CAPITALS = [
'Admin-1 capital',
'Admin-1 region capital',
]
def capital_alternate_viewpoint(shape, props, fid, zoom):
"""
Removes the fclass_* properties and replaces them with viewpoint overrides
for country_capital and region_capital.
"""
fclass_prefix = 'fclass_'
default_country_capital = props.get('country_capital', False)
default_region_capital = props.get('region_capital', False)
for k in props.keys():
if k.startswith(fclass_prefix):
viewpoint = k[len(fclass_prefix):]
fclass = props.pop(k)
country_capital = fclass in _NE_COUNTRY_CAPITALS
region_capital = fclass in _NE_REGION_CAPITALS
if country_capital:
props['country_capital:' + viewpoint] = True
elif region_capital:
props['region_capital:' + viewpoint] = True
if default_country_capital and not country_capital:
props['country_capital:' + viewpoint] = False
elif default_region_capital and not region_capital:
props['region_capital:' + viewpoint] = False
return shape, props, fid
| mit | 5,696,704,580,245,091 | 30.690366 | 540 | 0.609424 | false | 3.759654 | false | false | false |
openstack/blazar | blazar/enforcement/filters/base_filter.py | 1 | 1225 | # Copyright (c) 2020 University of Chicago.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
class BaseFilter(metaclass=abc.ABCMeta):
enforcement_opts = []
def __init__(self, conf=None):
self.conf = conf
for opt in self.enforcement_opts:
self.conf.register_opt(opt, 'enforcement')
def __getattr__(self, name):
func = getattr(self.conf.enforcement, name)
return func
@abc.abstractmethod
def check_create(self, context, lease_values):
pass
@abc.abstractmethod
def check_update(self, context, current_lease_values, new_lease_values):
pass
@abc.abstractmethod
def on_end(self, context, lease_values):
pass
| apache-2.0 | -5,319,428,991,090,619,000 | 27.488372 | 76 | 0.690612 | false | 3.977273 | false | false | false |
albapa/glosim | libmatch/structures.py | 1 | 12323 | #!/usr/bin/env python
# Computes the matrix of similarities between structures in a xyz file
# by first getting SOAP descriptors for all environments, finding the best
# match between environments using the Hungarian algorithm, and finally
# summing up the environment distances.
# Supports periodic systems, matching between structures with different
# atom number and kinds, and sports the infrastructure for introducing an
# alchemical similarity kernel to match different atomic species
# import sys, os, pickle
import sys, os
import cPickle as pickle
import gc
from lap.lap import best_pairs, best_cost, lcm_best_cost
from lap.perm import xperm, mcperm, rematch
import numpy as np
from environments import environ, alchemy, envk
import quippy
__all__ = [ "structk", "structure" ]
class structure:
def __init__(self, salchem=None):
self.env={}
self.species={}
self.zspecies = []
self.atz = []
self.nenv=0
self.alchem=salchem
if self.alchem is None: self.alchem=alchemy()
self.globenv = None
def getnz(self, sp):
if sp in self.species:
return self.species[sp]
else: return 0
def getatomenv(self, i):
if i>=len(self.atz):
raise IndexError("Trying to access atom past structure size")
k=0
lsp = {}
for z in self.atz:
if z in lsp: lsp[z]+=1
else: lsp[z] = 0
if i==k:
return self.env[z][lsp[z]]
k+=1
def getenv(self, sp, i):
if sp in self.env and i<len(self.env[sp]):
return self.env[sp][i]
else:
return environ(self.nmax,self.lmax,self.alchem,sp) # missing atoms environments just returned as isolated species!
def ismissing(self, sp, i):
if sp in self.species and i<self.species[sp]:
return False
else: return True
def parse(self, fat, coff=5.0, cotw=0.5, nmax=4, lmax=3, gs=0.5, cw=1.0, nocenter=[], noatom=[], kit=None, soapdump=None):
""" Takes a frame in the QUIPPY format and computes a list of its environments. """
# removes atoms that are to be ignored
at = fat.copy()
nol = []
for s in range(1,at.z.size+1):
if at.z[s] in noatom: nol.append(s)
if len(nol)>0: at.remove_atoms(nol)
self.nmax = nmax
self.lmax = lmax
self.atz = at.z.copy()
self.species = {}
for z in at.z:
if z in self.species: self.species[z]+=1
else: self.species[z] = 1
self.zspecies = self.species.keys();
self.zspecies.sort();
lspecies = 'n_species='+str(len(self.zspecies))+' species_Z={ '
for z in self.zspecies: lspecies = lspecies + str(z) + ' '
lspecies = lspecies + '}'
at.set_cutoff(coff);
at.calc_connect();
self.nenv = 0
if not soapdump is None:
soapdump.write("####### SOAP VECTOR FRAME ######\n")
for sp in self.species:
if sp in nocenter:
self.species[sp]=0
continue # Option to skip some environments
# first computes the descriptors of species that are present
if not soapdump is None: sys.stderr.write("SOAP STRING: "+"soap central_reference_all_species=F central_weight="+str(cw)+" covariance_sigma0=0.0 atom_sigma="+str(gs)+" cutoff="+str(coff)+" cutoff_transition_width="+str(cotw)+" n_max="+str(nmax)+" l_max="+str(lmax)+' '+lspecies+' Z='+str(sp)+"\n")
desc = quippy.descriptors.Descriptor("soap central_reference_all_species=F central_weight="+str(cw)+" covariance_sigma0=0.0 atom_sigma="+str(gs)+" cutoff="+str(coff)+" cutoff_transition_width="+str(cotw)+" n_max="+str(nmax)+" l_max="+str(lmax)+' '+lspecies+' Z='+str(sp) )
try:
psp = desc.calc(at)["descriptor"].T
except TypeError:
print("Interface change in QUIP/GAP. Update your code first.")
if not soapdump is None:
soapdump.write("Specie %d - %d atoms\n"% (sp,len(psp)))
for p in psp:
np.savetxt(soapdump,[p])
# now repartitions soaps in environment descriptors
lenv = []
for p in psp:
nenv = environ(nmax, lmax, self.alchem)
nenv.convert(sp, self.zspecies, p)
lenv.append(nenv)
self.env[sp] = lenv
self.nenv += self.species[sp]
# adds kit data
if kit is None: kit = {}
for sp in kit:
if not sp in self.species:
self.species[sp]=0
self.env[sp] = []
for k in range(self.species[sp], kit[sp]):
self.env[sp].append(environ(self.nmax,self.lmax,self.alchem,sp))
self.nenv+=1
self.species[sp] = kit[sp]
self.zspecies = self.species.keys()
self.zspecies.sort()
# also compute the global (flattened) fingerprint
self.globenv = environ(nmax, lmax, self.alchem)
for k, se in self.env.items():
for e in se:
self.globenv.add(e)
# divides by the number of atoms in the structure
for sij in self.globenv.soaps: self.globenv.soaps[sij]*=1.0/self.nenv
# self.globenv.normalize() #if needed, normalization will be done later on.....
def gcd(a,b):
if (b>a): a,b = b, a
while (b): a, b = b, a%b
return a
def lcm(a,b):
return a*b/gcd(b,a)
#def gstructk(strucA, strucB, alchem=alchemy(), periodic=False):
#
# return envk(strucA.globenv, strucB.globenv, alchem)
def structk(strucA, strucB, alchem=alchemy(), periodic=False, mode="match", fout=None, peps=0.0, gamma=1.0, zeta=1.0, xspecies=False):
# computes the SOAP similarity KERNEL between two structures by combining atom-centered kernels
# possible kernel modes include:
# average : scalar product between averaged kernels
# match: best-match hungarian kernel
# permanent: average over all permutations
# average kernel. quick & easy!
if mode=="fastavg":
genvA=strucA.globenv
genvB=strucB.globenv
return envk(genvA, genvB, alchem)**zeta, 0
elif mode=="fastspecies":
# for now, only implement standard Kronecker alchemy
senvB = environ(strucB.nmax, strucB.lmax, strucB.alchem)
kk = 0
for za in strucA.zspecies:
if not za in strucB.zspecies: continue
senvA = environ(strucA.nmax, strucA.lmax, strucA.alchem)
for ia in xrange(strucA.getnz(za)):
senvA.add(strucA.getenv(za, ia))
senvB = environ(strucB.nmax, strucB.lmax, strucB.alchem)
for ib in xrange(strucB.getnz(za)):
senvB.add(strucB.getenv(za, ib))
kk += envk(senvA, senvB, alchem)**zeta
kk/=strucA.nenv*strucB.nenv
return kk,0
# for zb, nzb in nspeciesB:
# for ib in xrange(nzb):
# return envk(genvA, genvB, alchem), 0
nenv = 0
if periodic: # replicate structures to match structures of different periodicity
# we do not check for compatibility at this stage, just assume that the
# matching will be done somehow (otherwise it would be exceedingly hard to manage in case of non-standard alchemy)
nspeciesA = []
nspeciesB = []
for z in strucA.zspecies:
nspeciesA.append( (z, strucA.getnz(z)) )
for z in strucB.zspecies:
nspeciesB.append( (z, strucB.getnz(z)) )
nenv=nenvA = strucA.nenv
nenvB = strucB.nenv
else:
# top up missing atoms with isolated environments
# first checks which atoms are present
zspecies = sorted(list(set(strucB.zspecies+strucA.zspecies)))
nspecies = []
for z in zspecies:
nz = max(strucA.getnz(z),strucB.getnz(z))
nspecies.append((z,nz))
nenv += nz
nenvA = nenvB = nenv
nspeciesA = nspeciesB = nspecies
np.set_printoptions(linewidth=500,precision=4)
kk = np.zeros((nenvA,nenvB),float)
ika = 0
ikb = 0
for za, nza in nspeciesA:
for ia in xrange(nza):
envA = strucA.getenv(za, ia)
ikb = 0
for zb, nzb in nspeciesB:
for ib in xrange(nzb):
envB = strucB.getenv(zb, ib)
if alchem.mu > 0 and (strucA.ismissing(za, ia) ^ strucB.ismissing(zb, ib)):
# includes a penalty dependent on "mu", in a way that is consistent with the definition of kernel distance
kk[ika,ikb] = exp(-alchem.mu)
else:
if za == zb or not xspecies: #uncomment to zero out kernels between different species
kk[ika,ikb] = envk(envA, envB, alchem)**zeta
else: kk[ika,ikb] = 0
ikb+=1
ika+=1
aidx = {}
ika=0
for za, nza in nspeciesA:
aidx[za] = range(ika,ika+nza)
ika+=nza
ikb=0
bidx = {}
for zb, nzb in nspeciesB:
bidx[zb] = range(ikb,ikb+nzb)
ikb+=nzb
if fout != None:
# prints out similarity information for the environment pairs
fout.write("# atomic species in the molecules (possibly topped up with dummy isolated atoms): \n")
for za, nza in nspeciesA:
for ia in xrange(nza): fout.write(" %d " % (za) )
fout.write("\n");
for zb, nzb in nspeciesB:
for ib in xrange(nzb): fout.write(" %d " % (zb) )
fout.write("\n");
fout.write("# environment kernel matrix: \n")
for r in kk:
for e in r:
fout.write("%20.14e " % (e) )
fout.write("\n")
#fout.write("# environment kernel eigenvalues: \n")
#ev = np.linalg.eigvals(kk)
#for e in ev:
# fout.write("(%8.4e,%8.4e) " % (e.real,e.imag) )
#fout.write("\n");
# Now we have the matrix of scalar products.
# We can first find the optimal scalar product kernel
# we must find the maximum "cost"
if mode == "match":
if periodic and nenvA != nenvB:
nenv = lcm(nenvA, nenvB)
hun = lcm_best_cost(1-kk)
else:
hun=best_cost(1.0-kk)
cost = 1-hun/nenv
elif mode == "permanent":
# there is no place to hide: cross-species environments are not necessarily zero
if peps>0: cost = mcperm(kk, peps)
else: cost = xperm(kk)
cost = cost/np.math.factorial(nenv)/nenv
elif mode == "rematch":
cost=rematch(kk, gamma, 1e-6) # hard-coded residual error for regularized gamma
# print cost, kk.sum()/(nenv*nenv), envk(strucA.globenv, strucB.globenv, alchem)
elif mode == "average":
cost = kk.sum()/(nenvA*nenvB)
# print 'elem: {}'.format(kk.sum())
# print 'elem norm: {}'.format(cost)
# print 'avg norm: {}'.format((nenvA*nenvB))
else: raise ValueError("Unknown global fingerprint mode ", mode)
return cost,kk
class structurelist(list):
def __init__(self, basedir="tmpstructures"):
self.basedir=basedir
# create the folder if it is not there
if not os.path.exists(basedir):os.makedirs(basedir)
self.count=0
def exists(self, index):
# return true if the file associated with index exists, false otherwise
f=self.basedir+'/sl_'+str(index)+'.dat'
return os.path.isfile(f)
# @profile
def append(self, element):
#pickle the element for later use
ind=self.count
f=self.basedir+'/sl_'+str(ind)+'.dat'
file = open(f,"wb")
gc.disable()
pickle.dump(element, file,protocol=pickle.HIGHEST_PROTOCOL) # HIGHEST_PROTOCOL is 2 in py 2.7
file.close()
gc.enable()
self.count+=1
# @profile
def __getitem__(self, index):
f = self.basedir+'/sl_'+str(index)+'.dat'
try:
file = open(f,"rb")
except IOError:
raise IOError("Cannot load descriptors for index %d" % (index) )
gc.disable()
l = pickle.load(file)
file.close()
gc.enable()
return l
| mit | 6,443,952,005,650,613,000 | 35.13783 | 310 | 0.574373 | false | 3.350462 | false | false | false |
why2pac/dp-tornado | dp_tornado/helper/io/image/driver/wand.py | 1 | 1274 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from dp_tornado.engine.helper import Helper as dpHelper
try:
from wand.image import Image
except ImportError:
Image = None
class WandHelper(dpHelper):
@property
def Image(self):
return Image
def load(self, src):
return Image(filename=src)
def size(self, src):
return src.width, src.height
def crop(self, img, left, top, right, bottom):
img.crop(left, top, right, bottom)
return img
def resize(self, img, width, height, kwargs=None):
if kwargs is None:
img.resize(width, height)
else:
raise Exception('Not implemented method.')
return img
def border(self, img, border, border_color):
raise Exception('Not implemented method.')
def radius(self, img, radius, border, border_color):
raise Exception('Not implemented method.')
def colorize(self, img, colorize):
raise Exception('Not implemented method.')
def save(self, img, ext, dest, kwargs):
if ext.lower() == 'jpg':
ext = 'jpeg'
img.format = ext
img.save(filename=dest)
return True
def iter_seqs(self, img, kwargs):
yield 0, img
| mit | -7,070,461,348,547,270,000 | 22.163636 | 56 | 0.605965 | false | 4.057325 | false | false | false |
jinzekid/codehub | python/py3_6venv/spider_jd_phone/spider_jd_phone/spiders/jd_phone.py | 1 | 3028 | # -*- coding: utf-8 -*-
import scrapy
import re
import urllib.request
from scrapy.http import Request
from spider_jd_phone.items import SpiderJdPhoneItem
class JdPhoneSpider(scrapy.Spider):
name = 'jd_phone'
allowed_domains = ['jd.com']
str_keyword = '手机京东自营'
encode_keyword = urllib.request.quote(str_keyword)
url = 'https://search.jd.com/Search?keyword=' + encode_keyword + '&enc=utf-8&qrst' \
'=1&rt' \
'=1&stop=1&spm=2.1.0&vt=2&page=1&s=1&click=0'
# start_urls = [url]
# def start_requests(self):
# print(">>>进行第一次爬取<<<")
# print("爬取网址:%s" % self.url)
# yield Request(self.encode_url,
# headers={
# 'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36"
# })
# 设置要爬取用户的uid,为后续构造爬取网址做准备
# uid = "19940007"
# start_urls = ["http://19940007.blog.hexun.com/p1/default.html"]
def start_requests(self):
print(">>>进行第一次爬取<<<")
# 首次爬取模拟成浏览器进行
# yield Request(
# "http://" + str(self.uid) + ".blog.hexun.com/p1/default.html",
# headers={
# 'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) "
# "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36"
# })
url = "https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA%E4%BA%AC%E4%B8%9C%E8%87%AA%E8%90%A5&enc=utf-8&qrst=1&rt%27=1&stop=1&spm=2.1.0&vt=2&page=1&s=1&click=0"
print(url)
yield Request("https://search.jd.com/Search?keyword=" +
self.str_keyword + "&enc=utf-8&qrst=1&rt'=1&stop=1&spm=2.1.0&vt=2&page=1&s=1&click=0",
headers={
'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) "
"AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36"
})
def parse(self, response):
print(">>>parsing...<<<")
item = SpiderJdPhoneItem()
# print(str(response.body))
file_object = open('test.html', 'wb')
file_object.write(response.body)
file_object.close()
item['price'] = response.xpath("//div["
"@class='p-price']//i/text()").extract()
item['name'] = response.xpath("//div[@class='p-name "
"p-name-type-2']//em").extract()
print("获取item:{}".format(item))
print("长度:%s" % len(item['price']))
print("长度:%s" % len(item['name']))
print("=====================")
yield item
| gpl-3.0 | -3,422,459,829,960,525,300 | 39.760563 | 173 | 0.49689 | false | 2.911469 | false | false | false |
CDSherrill/psi4 | samples/psi4numpy/rhf/input.py | 30 | 3412 | #! A simple Psi 4 input script to compute a SCF reference using Psi4's libJK
import time
import numpy as np
import psi4
psi4.set_output_file("output.dat", False)
# Benzene
mol = psi4.geometry("""
0 1
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
psi4.set_options({"basis": "aug-cc-pVDZ",
"scf_type": "df",
"e_convergence": 1e-8
})
# Set tolerances
maxiter = 12
E_conv = 1.0E-6
D_conv = 1.0E-5
# Integral generation from Psi4's MintsHelper
wfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option("BASIS"))
mints = psi4.core.MintsHelper(wfn.basisset())
S = mints.ao_overlap()
# Get nbf and ndocc for closed shell molecules
nbf = wfn.nso()
ndocc = wfn.nalpha()
if wfn.nalpha() != wfn.nbeta():
raise PsiException("Only valid for RHF wavefunctions!")
psi4.core.print_out('\nNumber of occupied orbitals: %d\n' % ndocc)
psi4.core.print_out('Number of basis functions: %d\n\n' % nbf)
# Build H_core
V = mints.ao_potential()
T = mints.ao_kinetic()
H = T.clone()
H.add(V)
# Orthogonalizer A = S^(-1/2)
A = mints.ao_overlap()
A.power(-0.5, 1.e-16)
# Diagonalize routine
def build_orbitals(diag):
Fp = psi4.core.triplet(A, diag, A, True, False, True)
Cp = psi4.core.Matrix(nbf, nbf)
eigvecs = psi4.core.Vector(nbf)
Fp.diagonalize(Cp, eigvecs, psi4.core.DiagonalizeOrder.Ascending)
C = psi4.core.doublet(A, Cp, False, False)
Cocc = psi4.core.Matrix(nbf, ndocc)
Cocc.np[:] = C.np[:, :ndocc]
D = psi4.core.doublet(Cocc, Cocc, False, True)
return C, Cocc, D
# Build core orbitals
C, Cocc, D = build_orbitals(H)
# Setup data for DIIS
t = time.time()
E = 0.0
Enuc = mol.nuclear_repulsion_energy()
Eold = 0.0
# Initialize the JK object
jk = psi4.core.JK.build(wfn.basisset())
jk.set_memory(int(1.25e8)) # 1GB
jk.initialize()
jk.print_header()
diis_obj = psi4.p4util.solvers.DIIS(max_vec=3, removal_policy="largest")
psi4.core.print_out('\nTotal time taken for setup: %.3f seconds\n' % (time.time() - t))
psi4.core.print_out('\nStart SCF iterations:\n\n')
t = time.time()
for SCF_ITER in range(1, maxiter + 1):
# Compute JK
jk.C_left_add(Cocc)
jk.compute()
jk.C_clear()
# Build Fock matrix
F = H.clone()
F.axpy(2.0, jk.J()[0])
F.axpy(-1.0, jk.K()[0])
# DIIS error build and update
diis_e = psi4.core.triplet(F, D, S, False, False, False)
diis_e.subtract(psi4.core.triplet(S, D, F, False, False, False))
diis_e = psi4.core.triplet(A, diis_e, A, False, False, False)
diis_obj.add(F, diis_e)
# SCF energy and update
FH = F.clone()
FH.add(H)
SCF_E = FH.vector_dot(D) + Enuc
dRMS = diis_e.rms()
psi4.core.print_out('SCF Iteration %3d: Energy = %4.16f dE = % 1.5E dRMS = %1.5E\n'
% (SCF_ITER, SCF_E, (SCF_E - Eold), dRMS))
if (abs(SCF_E - Eold) < E_conv) and (dRMS < D_conv):
break
Eold = SCF_E
# DIIS extrapolate
F = diis_obj.extrapolate()
# Diagonalize Fock matrix
C, Cocc, D = build_orbitals(F)
if SCF_ITER == maxiter:
psi4.clean()
raise Exception("Maximum number of SCF cycles exceeded.\n")
psi4.core.print_out('Total time for SCF iterations: %.3f seconds \n\n' % (time.time() - t))
#print(psi4.energy("SCF"))
psi4.core.print_out('Final SCF energy: %.8f hartree\n' % SCF_E)
psi4.compare_values(-76.0033389840197202, SCF_E, 6, 'SCF Energy')
| lgpl-3.0 | -9,054,888,747,358,620,000 | 23.724638 | 91 | 0.62925 | false | 2.430199 | false | false | false |
txtbits/daw-python | pygame/pong/pong [full]/info.py | 1 | 7158 | # -*- coding: utf-8 -*-
'''
Created on 10/01/2012
@author: lm / @fsalamero
'''
import pygame
import sys
import time
from pygame.locals import *
#------------------------------------------------------------------#
# Inicialización de Pygame
#------------------------------------------------------------------#
#pygame.mixer.pre_init(44100,16,2,1024)
pygame.init()
#------------------------------------------------------------------#
# Definición de variables
#------------------------------------------------------------------#
fps = 60
tiempo = 0
BLANCO = (255,255,255)
AMARILLO = (255,255,0)
pelotaX = 50
pelotaY = 50
pelotaDX = 5
pelotaDY = 5
raquetaX = 50
raquetaY = 250
raquetaDY = 5
raqueta2X = 740
raqueta2Y = 250
raqueta2DY = 5
puntos1 = 0
puntos2 = 0
tipoLetra = pygame.font.Font('data/Grandezza.ttf', 96)
tipoLetra2 = pygame.font.Font('data/Grandezza.ttf', 24)
tipoLetra3 = pygame.font.Font('data/Grandezza.ttf', 48)
sonidoPelota = pygame.mixer.Sound('data/sonidoPelota.wav')
sonidoRaqueta = pygame.mixer.Sound('data/sonidoRaqueta.wav')
sonidoError = pygame.mixer.Sound('data/sonidoError.wav')
sonidoAplausos = pygame.mixer.Sound('data/sonidoAplausos.wav')
sonidoLaser = pygame.mixer.Sound('data/onidoLaser.wav')
imagenDeFondo = 'data/pingpong.jpg'
#------------------------------------------------------------------#
# Creación de la pantalla de juego (SURFACE)
#------------------------------------------------------------------#
visor = pygame.display.set_mode((800,600),FULLSCREEN)
#------------------------------------------------------------------#
# Funciones del programa
#------------------------------------------------------------------#
def pausa():
# Esta función hace que se espera hasta que se pulse una tecla
esperar = True
while esperar:
for evento in pygame.event.get():
if evento.type == KEYDOWN:
esperar = False
sonidoLaser.play()
def mostrarIntro():
# Muestra la pantalla de inicio y espera
fondo = pygame.image.load(imagenDeFondo).convert()
visor.blit(fondo, (0,0))
mensaje1 = 'PONG'
texto1 = tipoLetra.render(mensaje1, True, AMARILLO)
mensaje2 = 'Pulsa una tecla para comenzar'
texto2 = tipoLetra2.render(mensaje2, True, BLANCO)
visor.blit(texto1, (50,100,200,100))
visor.blit(texto2, (235,340,350,30))
pygame.display.update()
pausa()
def dibujarJuego():
# Dibuja la mesa, la pelota, las raquetas y los marcadores
# Primero borra la pantalla en negro
visor.fill((0,0,0))
# Dibuja la pelota y las raquetas
pygame.draw.circle(visor, BLANCO, (pelotaX,pelotaY),4,0)
pygame.draw.rect(visor, BLANCO, (raquetaX,raquetaY,10,50))
pygame.draw.rect(visor, BLANCO, (raqueta2X,raqueta2Y,10,50))
# Dibuja la red
for i in range(10):
pygame.draw.rect(visor, BLANCO, (398,10+60*i,4,30))
# Dibuja los marcadores
marcador1 = tipoLetra.render(str(puntos1), True, BLANCO)
marcador2 = tipoLetra.render(str(puntos2), True, BLANCO)
visor.blit(marcador1, (300,20,50,50))
visor.blit(marcador2, (450,20,50,50))
# Y, finalmente, lo vuelca todo en pantalla
pygame.display.update()
def decirGanador():
# Decir qué jugador ha ganado y esperar
sonidoAplausos.play()
if puntos1 == 11:
ganador = 'Jugador 1'
else:
ganador = 'Jugador 2'
mensaje = 'Ganador: '+ ganador
texto = tipoLetra3.render(mensaje, True, AMARILLO)
visor.blit(texto, (110,350,600,100))
pygame.display.update()
pausa()
#------------------------------------------------------------------#
# Cuerpo principal del juego
#------------------------------------------------------------------#
pygame.mouse.set_visible(False)
mostrarIntro()
time.sleep(0.75)
while True:
#----------------------------------------------------------------#
# Gestionar la velocidad del juego
#----------------------------------------------------------------#
if pygame.time.get_ticks()-tiempo < 1000/fps:
continue
tiempo = pygame.time.get_ticks()
#----------------------------------------------------------------#
# Bucle de eventos: Mirar si se quiere terminar el juego
#----------------------------------------------------------------#
for evento in pygame.event.get():
if evento.type == KEYDOWN:
if evento.key == K_ESCAPE:
pygame.quit()
sys.exit()
#----------------------------------------------------------------#
# Mover la pelota
#----------------------------------------------------------------#
# Primero hay que vigilar por si hay que cambiar de dirección
# Mira si se impacta con el jugador 1
diff1 = pelotaY-raquetaY
if pelotaX == raquetaX + 10 and diff1 >= 0 and diff1 <= 50:
pelotaDX = -pelotaDX
sonidoRaqueta.play()
# Mira si se impacta con el jugador 2
diff2 = pelotaY-raqueta2Y
if pelotaX == raqueta2X and diff2 >= 0 and diff2 <= 50:
pelotaDX = -pelotaDX
sonidoRaqueta.play()
# Mira si se ha llegado al borde de la pantalla
if pelotaY < 5 or pelotaY > 595:
pelotaDY = -pelotaDY
sonidoPelota.play()
# Mueve la pelota
pelotaX += pelotaDX
pelotaY += pelotaDY
#----------------------------------------------------------------#
# Mover las raquetas
#----------------------------------------------------------------#
# Mira si el jugador 1 mueve la raqueta
teclasPulsadas = pygame.key.get_pressed()
if teclasPulsadas[K_a]:
raquetaY += raquetaDY
if teclasPulsadas[K_q]:
raquetaY -= raquetaDY
# Vigilar que la raqueta no se salga de la pantalla
if raquetaY < 0:
raquetaY = 0
elif raquetaY > 550:
raquetaY = 550
# Ahora hacemos lo mismo con el jugador 2
if teclasPulsadas[K_l]:
raqueta2Y += raqueta2DY
if teclasPulsadas[K_p]:
raqueta2Y -= raqueta2DY
if raqueta2Y < 0:
raqueta2Y = 0
elif raqueta2Y > 550:
raqueta2Y = 550
#----------------------------------------------------------------#
# Mirar si se ha ganado un punto
#----------------------------------------------------------------#
# Primero, mira si la pelota ha llegado al borde
if pelotaX > 800 or pelotaX < 0:
# En tal caso, recolocar juego y cambiar puntuación
sonidoError.play()
time.sleep(1)
raquetaY = 250
raqueta2Y = 250
if pelotaX > 800:
puntos1 = puntos1 + 1
else:
puntos2 = puntos2 + 1
pelotaX = 400
pelotaDX = -pelotaDX
#----------------------------------------------------------------#
# Dibujar el juego en pantalla
#----------------------------------------------------------------#
dibujarJuego()
#---------------------------------------------------------------#
# Comprobar si el juego se ha acabado
#---------------------------------------------------------------#
if puntos1 == 11 or puntos2 ==11:
decirGanador()
puntos1 = 0
puntos2 = 0
visor.fill((0,0,0))
mostrarIntro()
| mit | 7,379,330,731,885,000,000 | 34.231527 | 70 | 0.495036 | false | 3.241614 | false | false | false |
brunogamacatao/portalsaladeaula | django/contrib/sitemaps/tests/basic.py | 1 | 5379 | from datetime import date
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sitemaps import Sitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils.unittest import skipUnless
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
class SitemapTests(TestCase):
urls = 'django.contrib.sitemaps.tests.urls'
def setUp(self):
self.old_USE_L10N = settings.USE_L10N
self.old_Site_meta_installed = Site._meta.installed
# Create a user that will double as sitemap content
User.objects.create_user('testuser', '[email protected]', 's3krit')
def tearDown(self):
settings.USE_L10N = self.old_USE_L10N
Site._meta.installed = self.old_Site_meta_installed
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEquals(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://example.com/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today().strftime('%Y-%m-%d'))
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
# Localization should be active
settings.USE_L10N = True
activate('fr')
self.assertEqual(u'0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today().strftime('%Y-%m-%d'))
deactivate()
def test_generic_sitemap(self):
"A minimal generic sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/generic/sitemap.xml')
expected = ''
for username in User.objects.values_list("username", flat=True):
expected += "<url><loc>http://example.com/accounts/%s/</loc></url>" %username
# Check for all the important bits:
self.assertEquals(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
%s
</urlset>
""" %expected)
@skipUnless("django.contrib.flatpages" in settings.INSTALLED_APPS, "django.contrib.flatpages app not installed.")
def test_flatpage_sitemap(self):
"Basic FlatPage sitemap test"
# Import FlatPage inside the test so that when django.contrib.flatpages
# is not installed we don't get problems trying to delete Site
# objects (FlatPage has an M2M to Site, Site.delete() tries to
# delete related objects, but the M2M table doesn't exist.
from django.contrib.flatpages.models import FlatPage
public = FlatPage.objects.create(
url=u'/public/',
title=u'Public Page',
enable_comments=True,
registration_required=False,
)
public.sites.add(settings.SITE_ID)
private = FlatPage.objects.create(
url=u'/private/',
title=u'Private Page',
enable_comments=True,
registration_required=True
)
private.sites.add(settings.SITE_ID)
response = self.client.get('/flatpages/sitemap.xml')
# Public flatpage should be in the sitemap
self.assertContains(response, '<loc>http://example.com%s</loc>' % public.url)
# Private flatpage should not be in the sitemap
self.assertNotContains(response, '<loc>http://example.com%s</loc>' % private.url)
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception
Site._meta.installed = False
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEquals(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today().strftime('%Y-%m-%d'))
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site._meta.installed = True
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
Site.objects.get_current()
Site._meta.installed = False
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
| bsd-3-clause | -7,294,728,011,007,091,000 | 41.690476 | 125 | 0.661089 | false | 3.981495 | true | false | false |
Kitware/tonic-data-generator | python/tonic/vtk/__init__.py | 1 | 1767 | from vtk import *
from tonic import camera as tc
def update_camera(renderer, cameraData):
camera = renderer.GetActiveCamera()
camera.SetPosition(cameraData['position'])
camera.SetFocalPoint(cameraData['focalPoint'])
camera.SetViewUp(cameraData['viewUp'])
def create_spherical_camera(renderer, dataHandler, phiValues, thetaValues):
camera = renderer.GetActiveCamera()
return tc.SphericalCamera(dataHandler, camera.GetFocalPoint(), camera.GetPosition(), camera.GetViewUp(), phiValues, thetaValues)
def create_cylindrical_camera(renderer, dataHandler, phiValues, translationValues):
camera = renderer.GetActiveCamera()
return tc.CylindricalCamera(dataHandler, camera.GetFocalPoint(), camera.GetPosition(), camera.GetViewUp(), phiValues, translationValues)
class CaptureRenderWindow(object):
def __init__(self, magnification=1):
self.windowToImage = vtkWindowToImageFilter()
self.windowToImage.SetMagnification(magnification)
self.windowToImage.SetInputBufferTypeToRGB()
self.windowToImage.ReadFrontBufferOn()
self.writer = None
def SetRenderWindow(self, renderWindow):
self.windowToImage.SetInput(renderWindow)
def SetFormat(self, mimeType):
if mimeType == 'image/png':
self.writer = vtkPNGWriter()
self.writer.SetInputConnection(self.windowToImage.GetOutputPort())
elif mimeType == 'image/jpg':
self.writer = vtkJPEGWriter()
self.writer.SetInputConnection(self.windowToImage.GetOutputPort())
def writeImage(self, path):
if self.writer:
self.windowToImage.Modified()
self.windowToImage.Update()
self.writer.SetFileName(path)
self.writer.Write()
| bsd-3-clause | -4,538,920,584,704,970,000 | 41.071429 | 140 | 0.711941 | false | 4.043478 | false | false | false |
robbiet480/home-assistant | tests/components/deconz/test_deconz_event.py | 6 | 3213 | """Test deCONZ remote events."""
from copy import deepcopy
from homeassistant.components.deconz.deconz_event import CONF_DECONZ_EVENT
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.common import async_capture_events
SENSORS = {
"1": {
"id": "Switch 1 id",
"name": "Switch 1",
"type": "ZHASwitch",
"state": {"buttonevent": 1000},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
"2": {
"id": "Switch 2 id",
"name": "Switch 2",
"type": "ZHASwitch",
"state": {"buttonevent": 1000},
"config": {"battery": 100},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
"3": {
"id": "Switch 3 id",
"name": "Switch 3",
"type": "ZHASwitch",
"state": {"buttonevent": 1000, "gesture": 1},
"config": {"battery": 100},
"uniqueid": "00:00:00:00:00:00:00:03-00",
},
"4": {
"id": "Switch 4 id",
"name": "Switch 4",
"type": "ZHASwitch",
"state": {"buttonevent": 1000, "gesture": 1},
"config": {"battery": 100},
"uniqueid": "00:00:00:00:00:00:00:04-00",
},
}
async def test_deconz_events(hass):
"""Test successful creation of deconz events."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
gateway = await setup_deconz_integration(hass, get_state_response=data)
assert "sensor.switch_1" not in gateway.deconz_ids
assert "sensor.switch_1_battery_level" not in gateway.deconz_ids
assert "sensor.switch_2" not in gateway.deconz_ids
assert "sensor.switch_2_battery_level" in gateway.deconz_ids
assert len(hass.states.async_all()) == 3
assert len(gateway.events) == 4
switch_1 = hass.states.get("sensor.switch_1")
assert switch_1 is None
switch_1_battery_level = hass.states.get("sensor.switch_1_battery_level")
assert switch_1_battery_level is None
switch_2 = hass.states.get("sensor.switch_2")
assert switch_2 is None
switch_2_battery_level = hass.states.get("sensor.switch_2_battery_level")
assert switch_2_battery_level.state == "100"
events = async_capture_events(hass, CONF_DECONZ_EVENT)
gateway.api.sensors["1"].update({"state": {"buttonevent": 2000}})
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data == {
"id": "switch_1",
"unique_id": "00:00:00:00:00:00:00:01",
"event": 2000,
}
gateway.api.sensors["3"].update({"state": {"buttonevent": 2000}})
await hass.async_block_till_done()
assert len(events) == 2
assert events[1].data == {
"id": "switch_3",
"unique_id": "00:00:00:00:00:00:00:03",
"event": 2000,
"gesture": 1,
}
gateway.api.sensors["4"].update({"state": {"gesture": 0}})
await hass.async_block_till_done()
assert len(events) == 3
assert events[2].data == {
"id": "switch_4",
"unique_id": "00:00:00:00:00:00:00:04",
"event": 1000,
"gesture": 0,
}
await gateway.async_reset()
assert len(hass.states.async_all()) == 0
assert len(gateway.events) == 0
| apache-2.0 | 3,113,315,831,892,946,400 | 29.028037 | 77 | 0.580454 | false | 3.10735 | false | false | false |
skarlekar/chehara | process_events.py | 1 | 3180 | import os
import json
import boto3 as boto
from slackclient import SlackClient
import requests
from datetime import datetime
def printJson(jsonObject, label):
"""Pretty print JSON document with indentation."""
systime = str(datetime.now())
print("********************************* {} *********************************".format(label))
print("--------------------------------- {} ---------------------------------".format(systime))
print(json.dumps(jsonObject, indent=4, sort_keys=True))
print("----------------------------------------------------------------------")
def getTeam(team_id):
"""Given a team id, lookup the team data from DynamoDB."""
table_name = os.environ['SLACK_TEAMS']
dynamodb = boto.resource('dynamodb')
table = dynamodb.Table(table_name)
table_key = {'team_id': team_id}
result = table.get_item(Key=table_key)['Item']
return result
def getAccessToken(team):
"""Extract the access token from the team data given."""
access_token = None
if team:
access_token = team['access_token']
return access_token
def process(event, context):
"""
Process the incoming Slack event.
If the incoming event is a file_share event and the file shared is an image,
lookup the team data from the database based on the team id in the event.
Enrich the event with a new object to include the access_token and other
details and return the information. StepFunction will take this enriched
event to the next layer of Lambda functions to process.
"""
# Event comes as a JSON. No need to convert.
body = event
printJson(body, "process_events Input")
team_id = body['team_id']
team = getTeam(team_id)
access_token = getAccessToken(team)
slack_event = body['event']
printJson(slack_event, "slack_event")
slack_event_type = slack_event['type']
slack_event_channel = slack_event['channel']
slack_event_subtype = slack_event['subtype']
slack_event_ts = slack_event['ts']
slack_event_username = slack_event['username']
slack_event_file = None
celebrity_detected = False
celebs = None
if (slack_event_subtype) and (slack_event_type == 'message') and (slack_event_subtype == 'file_share'):
slack_event_file = slack_event['file']
file_type = slack_event_file['filetype']
if file_type == 'jpg' or file_type == 'png':
file_url = slack_event_file['url_private']
process_events = {
'team_id': team_id,
'team': team,
'slack_access_token': access_token,
'slack_event_type': slack_event_type,
'slack_event_channel': slack_event_channel,
'slack_event_subtype': slack_event_subtype,
'slack_event_ts': slack_event_ts,
'slack_event_username': slack_event_username,
'slack_event_filetype': file_type,
'slack_event_file_url': file_url
}
event['process_events'] = process_events
printJson(event, "Return this event downstream")
print("****** Done with process_events")
return event
| mit | -7,964,160,720,100,211,000 | 37.780488 | 107 | 0.593711 | false | 4.040661 | false | false | false |
SublimeText/Pywin32 | lib/x64/win32comext/adsi/demos/search.py | 10 | 4265 | from win32com.adsi import adsi
from win32com.adsi.adsicon import *
from win32com.adsi import adsicon
import pythoncom, pywintypes, win32security
options = None # set to optparse options object
ADsTypeNameMap = {}
def getADsTypeName(type_val):
# convert integer type to the 'typename' as known in the headerfiles.
if not ADsTypeNameMap:
for n, v in adsicon.__dict__.items():
if n.startswith("ADSTYPE_"):
ADsTypeNameMap[v] = n
return ADsTypeNameMap.get(type_val, hex(type_val))
def _guid_from_buffer(b):
return pywintypes.IID(b, True)
def _sid_from_buffer(b):
return str(pywintypes.SID(b))
_null_converter = lambda x: x
converters = {
'objectGUID' : _guid_from_buffer,
'objectSid' : _sid_from_buffer,
'instanceType' : getADsTypeName,
}
def log(level, msg, *args):
if options.verbose >= level:
print("log:", msg % args)
def getGC():
cont = adsi.ADsOpenObject("GC:", options.user, options.password, 0, adsi.IID_IADsContainer)
enum = adsi.ADsBuildEnumerator(cont)
# Only 1 child of the global catalog.
for e in enum:
gc = e.QueryInterface(adsi.IID_IDirectorySearch)
return gc
return None
def print_attribute(col_data):
prop_name, prop_type, values = col_data
if values is not None:
log(2, "property '%s' has type '%s'", prop_name, getADsTypeName(prop_type))
value = [converters.get(prop_name, _null_converter)(v[0]) for v in values]
if len(value) == 1:
value = value[0]
print(" %s=%r" % (prop_name, value))
else:
print(" %s is None" % (prop_name,))
def search():
gc = getGC()
if gc is None:
log(0, "Can't find the global catalog")
return
prefs = [(ADS_SEARCHPREF_SEARCH_SCOPE, (ADS_SCOPE_SUBTREE,))]
hr, statuses = gc.SetSearchPreference(prefs)
log(3, "SetSearchPreference returned %d/%r", hr, statuses)
if options.attributes:
attributes = options.attributes.split(",")
else:
attributes = None
h = gc.ExecuteSearch(options.filter, attributes)
hr = gc.GetNextRow(h)
while hr != S_ADS_NOMORE_ROWS:
print("-- new row --")
if attributes is None:
# Loop over all columns returned
while 1:
col_name = gc.GetNextColumnName(h)
if col_name is None:
break
data = gc.GetColumn(h, col_name)
print_attribute(data)
else:
# loop over attributes specified.
for a in attributes:
try:
data = gc.GetColumn(h, a)
print_attribute(data)
except adsi.error as details:
if details[0] != E_ADS_COLUMN_NOT_SET:
raise
print_attribute( (a, None, None) )
hr = gc.GetNextRow(h)
gc.CloseSearchHandle(h)
def main():
global options
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-v", "--verbose",
action="count", default=1,
help="increase verbosity of output")
parser.add_option("-q", "--quiet",
action="store_true",
help="suppress output messages")
parser.add_option("-U", "--user",
help="specify the username used to connect")
parser.add_option("-P", "--password",
help="specify the password used to connect")
parser.add_option("", "--filter",
default = "(&(objectCategory=person)(objectClass=User))",
help="specify the search filter")
parser.add_option("", "--attributes",
help="comma sep'd list of attribute names to print")
options, args = parser.parse_args()
if options.quiet:
if options.verbose != 1:
parser.error("Can not use '--verbose' and '--quiet'")
options.verbose = 0
if args:
parser.error("You need not specify args")
search()
if __name__=='__main__':
main()
| bsd-3-clause | 5,202,562,121,101,393,000 | 31.310606 | 95 | 0.568347 | false | 3.818263 | false | false | false |
ncgmp09/ncgmp09-online | models.py | 1 | 14732 | from django.contrib.gis.db import models
from django.contrib.gis.gdal import DataSource
from django.core.exceptions import ValidationError
from geomaps.validation import GdbValidator
from geomaps.dataloader import GdbLoader
from geomaps.postprocess import StandardLithologyProcessor, GeologicEventProcessor
from gsconfig.layers import LayerGenerator
from gsmlp.generators import GeologicUnitViewGenerator
from vocab.parser import updateVocabulary
# Map is a class that represents the upload of a single NCGMP File Geodatabase
class GeoMap(models.Model):
class Meta:
db_table = 'geomaps'
verbose_name = 'Geologic Map'
name = models.CharField(max_length=50)
title = models.CharField(max_length=200)
fgdb_path = models.CharField(max_length=200)
map_type = models.CharField(max_length=200, choices=(('Direct observation', 'New mapping'), ('Compilation', 'Compilation')))
metadata_url = models.URLField(blank=True)
is_loaded = models.BooleanField(default=False)
def __unicode__(self):
return self.name
def clean(self):
try:
self.dataSource = DataSource(self.fgdb_path)
except:
raise ValidationError(self.fgdb_path + " could not be opened by GDAL")
else:
validator = GdbValidator(self.dataSource)
valid = validator.isValid()
if not valid:
err = ValidationError(validator.validationMessage())
err.asJson = validator.logs.asJson()
raise err
def load(self):
loader = GdbLoader(self)
loader.load()
self.is_loaded = True
self.save()
def populateRepresentativeValues(self):
for dmu in self.descriptionofmapunits_set.all():
dmu.generateRepresentativeValues()
def createGsmlp(self):
geologicUnitViewGen = GeologicUnitViewGenerator(self)
geologicUnitViewGen.buildGeologicUnitViews()
def createLayers(self):
layerGen = LayerGenerator(self)
return layerGen.createNewLayers()
# The following are classes from the GeoSciML Portrayal Schema
class GeologicUnitView(models.Model):
class Meta:
db_table = 'geologicunitview'
verbose_name = "GeologicUnitView"
owningmap = models.ForeignKey('GeoMap')
identifier = models.CharField(max_length=200, unique=True)
name = models.CharField(max_length=200, blank=True)
description = models.TextField()
geologicUnitType = models.CharField(max_length=200, blank=True)
rank = models.CharField(max_length=200, blank=True)
lithology = models.CharField(max_length=200, blank=True)
geologicHistory = models.TextField()
observationMethod = models.CharField(max_length=200, blank=True)
positionalAccuracy = models.CharField(max_length=200, blank=True)
source = models.CharField(max_length=200, blank=True)
geologicUnitType_uri = models.CharField(max_length=200)
representativeLithology_uri = models.CharField(max_length=200)
representativeAge_uri = models.CharField(max_length=200)
representativeOlderAge_uri = models.CharField(max_length=200)
representativeYoungerAge_uri = models.CharField(max_length=200)
specification_uri = models.CharField(max_length=200)
metadata_uri = models.CharField(max_length=200)
genericSymbolizer = models.CharField(max_length=200, blank=True)
shape = models.MultiPolygonField(srid=4326)
objects = models.GeoManager()
def __unicode__(self):
return self.identifier
# The following are classes that represent tables from an NCGMP Database
# Each class contains a ForeignKey to the GeoMap Class, which is the upload
# that the feature came into the system with
class MapUnitPolys(models.Model):
class Meta:
db_table = 'mapunitpolys'
verbose_name = 'Map Unit Polygon'
verbose_name_plural = 'MapUnitPolys'
owningmap = models.ForeignKey('GeoMap')
mapunitpolys_id = models.CharField(max_length=200, unique=True)
mapunit = models.ForeignKey('DescriptionOfMapUnits', db_column='mapunit')
identityconfidence = models.CharField(max_length=200)
label = models.CharField(max_length=200, blank=True)
symbol = models.CharField(max_length=200, blank=True)
notes = models.TextField(blank=True)
datasourceid = models.ForeignKey('DataSources', db_column='datasourceid', to_field='datasources_id')
shape = models.MultiPolygonField(srid=4326)
objects = models.GeoManager()
def __unicode__(self):
return self.mapunitpolys_id
class ContactsAndFaults(models.Model):
class Meta:
db_table = 'contactsandfaults'
verbose_name = 'Contact or Fault'
verbose_name_plural = 'ContactsAndFaults'
owningmap = models.ForeignKey('GeoMap')
contactsandfaults_id = models.CharField(max_length=200, unique=True)
type = models.CharField(max_length=200)
isconcealed = models.IntegerField()
existenceconfidence = models.CharField(max_length=200)
identityconfidence = models.CharField(max_length=200)
locationconfidencemeters = models.FloatField()
label = models.CharField(max_length=200, blank=True)
datasourceid = models.ForeignKey('DataSources', db_column='datasourceid', to_field='datasources_id')
notes = models.TextField(blank=True)
symbol = models.IntegerField()
shape = models.MultiLineStringField(srid=4326)
objects = models.GeoManager()
def __unicode__(self):
return self.contactsandfaults_id
class DescriptionOfMapUnits(models.Model):
class Meta:
db_table = 'descriptionofmapunits'
verbose_name = 'Description of a Map Unit'
verbose_name_plural = 'DescriptionOfMapUnits'
ordering = ['hierarchykey']
owningmap = models.ForeignKey('GeoMap')
descriptionofmapunits_id = models.CharField(max_length=200, unique=True)
mapunit = models.CharField(max_length=200)
label = models.CharField(max_length=200)
name = models.CharField(max_length=200)
fullname = models.CharField(max_length=200)
age = models.CharField(max_length=200, blank=True)
description = models.TextField()
hierarchykey = models.CharField(max_length=200)
paragraphstyle = models.CharField(max_length=200, blank=True)
areafillrgb = models.CharField(max_length=200)
areafillpatterndescription = models.CharField(max_length=200, blank=True)
descriptionsourceid = models.ForeignKey('DataSources', db_column='descriptionsourceid', to_field='datasources_id')
generallithologyterm = models.CharField(max_length=200, blank=True)
generallithologyconfidence = models.CharField(max_length=200, blank=True)
objects = models.GeoManager()
def __unicode__(self):
return self.name
def representativeValue(self):
repValues = self.representativevalue_set.all()
if repValues.count() > 0: return repValues[0]
else: return RepresentativeValue.objects.create(owningmap=self.owningmap, mapunit=self)
def generateRepresentativeValues(self):
StandardLithologyProcessor(self).guessRepresentativeLithology()
GeologicEventProcessor(self).guessRepresentativeAge()
def preferredAge(self):
extAttrIds = ExtendedAttributes.objects.filter(ownerid=self.descriptionofmapunits_id, property="preferredAge").values_list("valuelinkid", flat=True)
return GeologicEvents.objects.filter(geologicevents_id__in=extAttrIds)
def geologicHistory(self):
extAttrIds = ExtendedAttributes.objects.filter(ownerid=self.descriptionofmapunits_id).exclude(property="preferredAge").values_list("valuelinkid", flat=True)
return GeologicEvents.objects.filter(geologicevents_id__in=extAttrIds)
class DataSources(models.Model):
class Meta:
db_table = 'datasources'
verbose_name = 'Data Source'
verbose_name_plural = 'DataSources'
ordering = ['source']
owningmap = models.ForeignKey('GeoMap')
datasources_id = models.CharField(max_length=200, unique=True)
notes = models.TextField()
source = models.CharField(max_length=200)
objects = models.GeoManager()
def __unicode__(self):
return self.source
class Glossary(models.Model):
class Meta:
db_table = 'glossary'
verbose_name_plural = 'Glossary Entries'
ordering = ['term']
owningmap = models.ForeignKey('GeoMap')
glossary_id = models.CharField(max_length=200, unique=True)
term = models.CharField(max_length=200)
definition = models.CharField(max_length=200)
definitionsourceid = models.ForeignKey('DataSources', db_column='descriptionsourceid', to_field='datasources_id')
objects = models.GeoManager()
def __unicode__(self):
return self.term
class StandardLithology(models.Model):
class Meta:
db_table = 'standardlithology'
verbose_name_plural = 'Standard Lithology'
owningmap = models.ForeignKey('GeoMap')
standardlithology_id = models.CharField(max_length=200, unique=True)
mapunit = models.ForeignKey('descriptionofmapunits', db_column='mapunit')
parttype = models.CharField(max_length=200)
lithology = models.CharField(max_length=200)
proportionterm = models.CharField(max_length=200, blank=True)
proportionvalue = models.FloatField(max_length=200, blank=True, null=True)
scientificconfidence = models.CharField(max_length=200)
datasourceid = models.ForeignKey('DataSources', db_column='datasourceid', to_field='datasources_id')
objects = models.GeoManager()
def __unicode__(self):
return self.mapunit.mapunit + ': ' + self.lithology
class ExtendedAttributes(models.Model):
class Meta:
db_table = 'extendedattributes'
verbose_name_plural = 'ExtendedAttributes'
ordering = ['ownerid']
owningmap = models.ForeignKey('GeoMap')
extendedattributes_id = models.CharField(max_length=200)
ownertable = models.CharField(max_length=200)
ownerid = models.CharField(max_length=200)
property = models.CharField(max_length=200)
propertyvalue = models.CharField(max_length=200, blank=True)
valuelinkid = models.CharField(max_length=200, blank=True)
qualifier = models.CharField(max_length=200, blank=True)
notes = models.CharField(max_length=200, blank=True)
datasourceid = models.ForeignKey('DataSources', db_column='datasourceid', to_field='datasources_id')
objects = models.GeoManager()
def __unicode__(self):
return self.property + ' for ' + self.ownerid
class GeologicEvents(models.Model):
class Meta:
db_table = 'geologicevents'
verbose_name_plural = 'GeologicEvents'
ordering = ['event']
owningmap = models.ForeignKey('GeoMap')
geologicevents_id = models.CharField(max_length=200)
event = models.CharField(max_length=200)
agedisplay = models.CharField(max_length=200)
ageyoungerterm = models.CharField(max_length=200, blank=True)
ageolderterm = models.CharField(max_length=200, blank=True)
timescale = models.CharField(max_length=200, blank=True)
ageyoungervalue = models.FloatField(blank=True, null=True)
ageoldervalue = models.FloatField(blank=True, null=True)
notes = models.CharField(max_length=200, blank=True)
datasourceid = models.ForeignKey('DataSources', db_column='datasourceid', to_field='datasources_id')
objects = models.GeoManager()
def __unicode__(self):
return self.event + ': ' + self.agedisplay
def mapunits(self):
mapUnits = []
for ext in ExtendedAttributes.objects.filter(valuelinkid=self.geologicevents_id):
try: mapUnits.append(DescriptionOfMapUnits.objects.get(descriptionofmapunits_id=ext.ownerid))
except DescriptionOfMapUnits.DoesNotExist: continue
return mapUnits
def isPreferredAge(self, dmu):
if "preferredAge" in ExtendedAttributes.objects.filter(valuelinkid=self.geologicevents_id, ownerid=dmu.descriptionofmapunits_id).values_list("property", flat=True):
return True
else:
return False
def inGeologicHistory(self, dmu):
if ExtendedAttributes.objects.filter(valuelinkid=self.geologicevents_id, ownerid=dmu.descriptionofmapunits_id).exclude(property="preferredAge").count() > 0:
return True
else:
return False
# The following are vocabulary tables for storing a simplified view of CGI vocabularies
class Vocabulary(models.Model):
class Meta:
db_table = 'cgi_vocabulary'
verbose_name_plural = "Vocabularies"
name = models.CharField(max_length=200)
url = models.URLField()
def __unicode__(self):
return self.name
def update(self):
updateVocabulary(self)
class VocabularyConcept(models.Model):
class Meta:
db_table = 'cgi_vocabularyconcept'
ordering = [ 'label' ]
uri = models.CharField(max_length=200)
label = models.CharField(max_length=200)
definition = models.TextField()
vocabulary = models.ForeignKey("Vocabulary")
def __unicode__(self):
return self.label
class AgeTerm(models.Model):
class Meta:
db_table = 'cgi_ageterm'
ordering = [ 'olderage' ]
uri = models.CharField(max_length=200)
label = models.CharField(max_length=200)
olderage = models.FloatField(verbose_name='Older age')
youngerage = models.FloatField(verbose_name='Younger age')
vocabulary = models.ForeignKey("Vocabulary")
def __unicode__(self):
return self.label
# The following are "helper" tables for generating GSMLP effectively
class RepresentativeValue(models.Model):
class Meta:
db_table = 'representativevalue'
owningmap = models.ForeignKey('GeoMap')
mapunit = models.ForeignKey('descriptionofmapunits', db_column='mapunit')
representativelithology_uri = models.CharField(max_length=200, default="http://www.opengis.net/def/nil/OGC/0/missing")
representativeage_uri = models.CharField(max_length=200, default="http://www.opengis.net/def/nil/OGC/0/missing")
representativeolderage_uri = models.CharField(max_length=200, default="http://www.opengis.net/def/nil/OGC/0/missing")
representativeyoungerage_uri = models.CharField(max_length=200, default="http://www.opengis.net/def/nil/OGC/0/missing")
objects = models.GeoManager()
def __unicode__(self):
return "Representative values for " + self.mapunit.mapunit | bsd-3-clause | 6,808,687,042,689,924,000 | 40.736544 | 172 | 0.694271 | false | 3.859576 | false | false | false |
lorensen/VTKExamples | src/Python/Geovis/GeoGraticle.py | 1 | 1839 | #!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
geoGraticle = vtk.vtkGeoGraticule()
transformProjection = vtk.vtkGeoTransform()
destinationProjection = vtk.vtkGeoProjection()
sourceProjection = vtk.vtkGeoProjection()
transformGraticle = vtk.vtkTransformFilter()
reader = vtk.vtkXMLPolyDataReader()
transformReader = vtk.vtkTransformFilter()
graticleMapper = vtk.vtkPolyDataMapper()
readerMapper = vtk.vtkPolyDataMapper()
graticleActor = vtk.vtkActor()
readerActor = vtk.vtkActor()
geoGraticle.SetGeometryType( geoGraticle.POLYLINES )
geoGraticle.SetLatitudeLevel( 2 )
geoGraticle.SetLongitudeLevel( 2 )
geoGraticle.SetLongitudeBounds( -180, 180 )
geoGraticle.SetLatitudeBounds( -90, 90 )
# destinationProjection defaults to latlong.
destinationProjection.SetName( "rouss" )
destinationProjection.SetCentralMeridian( 0. )
transformProjection.SetSourceProjection( sourceProjection )
transformProjection.SetDestinationProjection( destinationProjection )
transformGraticle.SetInputConnection( geoGraticle.GetOutputPort() )
transformGraticle.SetTransform( transformProjection )
graticleMapper.SetInputConnection( transformGraticle.GetOutputPort() )
graticleActor.SetMapper( graticleMapper )
renderWindow = vtk.vtkRenderWindow()
renderer = vtk.vtkRenderer()
interactor = vtk.vtkRenderWindowInteractor()
renderWindow.SetInteractor( interactor )
renderWindow.AddRenderer( renderer )
renderWindow.SetSize(640, 480)
renderer.SetBackground(colors.GetColor3d("BurlyWood"))
renderer.AddActor( readerActor )
renderer.AddActor( graticleActor )
renderWindow.Render()
interactor.Initialize()
interactor.Start()
if __name__ == '__main__':
main()
| apache-2.0 | -1,957,190,375,992,868,900 | 35.78 | 74 | 0.741707 | false | 4.006536 | false | false | false |
ThatGeoGuy/ENGO629-ROBPCA | engo629/robust_pca.py | 1 | 9853 | #!/usr/bin/env python3
# This file is a part of ENGO629-ROBPCA
# Copyright (c) 2015 Jeremy Steward
# License: http://www.gnu.org/licenses/gpl-3.0-standalone.html GPL v3+
"""
Defines a class which computes the ROBPCA method as defined by Mia Hubert,
Peter J. Rousseeuw and Karlien Vandem Branden (2005)
"""
import sys
import numpy as np
from sklearn.covariance import fast_mcd, MinCovDet
from numpy.random import choice
from .classic_pca import principal_components
class ROBPCA(object):
"""
Implements the ROBPCA algorithm as defined by Mia Hubert, Peter J.
Rousseeuw, and Karlien Vandem Branden (2005)
"""
def __init__(self, X, k=0, kmax=10, alpha=0.75, mcd=True):
"""
Initializes the class instance with the data you wish to compute the
ROBPCA algorithm over.
Parameters
----------
X : An n x p data matrix (where n is number of data points and p
is number of dimensions in data) which is to be reduced.
k : Number of principal components to compute. If k is missing,
the algorithm itself will determine the number of components
by finding a k such that L_k / L_1 > 1e-3 and (sum_1:k L_j /
sum_1:r L_j >= 0.8)
kmax : Maximal number of components that will be computed. Set to 10
by default
alpha : Assists in determining step 2. The higher alpha is, the more
efficient the estimates will be for uncontaminated data.
However, lower values for alpha make the algorithm more
robust. Can be any real value in the range [0.5, 1].
mcd : Specifies whether or not to use the MCD covariance matrix to
compute the principal components when p << n.
"""
if k < 0:
raise ValueError("ROBPCA: number of dimensions k must be greater than or equal to 0.")
if kmax < 1:
raise ValueError("ROBPCA: kmax must be greater than 1 (default is 10).")
if not (0.5 <= alpha <= 1.0):
raise ValueError("ROBPCA: alpha must be a value in the range [0.5, 1.0].")
if mcd is not True or mcd is not False:
raise ValueError("ROBPCA: mcd must be either True or False.")
if k > kmax:
print("ROBPCA: WARNING - k is greater than kmax, setting k to kmax.",
file=sys.stderr)
k = kmax
self.data = X
self.k = k
self.kmax = kmax
self.alpha = alpha
self.mcd = mcd
return
@staticmethod
def reduce_to_affine_subspace(X):
"""
Takes the data-matrix and computes the affine subspace spanned by n
observations of the mean-centred data.
Parameters
----------
X : X is an n by p data matrix where n is the number of observations
and p is the number of dimensions in the data.
Returns
--------
Z : Z is the affine subspace of the data matrix X. It is the same
data as X but represents itself within its own dimensionality.
rot : Specifies the PCs computed here that were used to rotate X into
the subspace Z.
"""
# Compute regular PCA
# L -> lambdas (eigenvalues)
# PC -> principal components (eigenvectors)
_, PC = principal_components(X)
centre = np.mean(X, axis=0)
# New data matrix
Z = np.dot((X - centre), PC)
return Z, PC
def num_least_outlying_points(self):
"""
Determines the least number of outlying points h, which should be less
than n, our number of data points. `h` is computed as the maximum of
either:
alpha * n OR
(n + kmax + 1) / 2
Returns
--------
h : number of least outlying points.
"""
n = self.data.shape[0]
return int(np.max([self.alpha * n, (n + self.kmax + 1) / 2]))
@staticmethod
def direction_through_hyperplane(X):
"""
Calculates a direction vector between two points in Z, where Z is an
n x p matrix. This direction is projected upon to find the number of
least outlying points using the Stahel-Donoho outlyingness measure.
Parameters
----------
X : Affine subspace of mean-centred data-matrix
Returns
--------
p0 : point of origin of the direction vector d
d : direction vector between two points in Z
"""
n = Z.shape[0]
p = Z.shape[1]
d = None
if n > p:
P = np.array(X[choice(n,p), :])
Q, R = np.linalg.qr(P)
if np.linalg.matrix_rank(Q) == p:
d = np.linalg.solve(Q, np.ones(p))
else:
P = np.array(X[choice(n,2), :])
tmp = P[1, :] - P[0, :]
N = np.sqrt(np.dot(E,E))
if N > 1e-8:
d = tmp / N
return d
def find_least_outlying_points(self, X):
"""
Finds the `h` number of points in the dataset that are least-outlying.
Does this by first computing the modified Stahel-Donoho
affine-invariant outlyingness.
Parameters
----------
X : The data matrix with which you want to find the least outlying
points using the modified Stahel-Donoho outlyingness measure
Returns
--------
H0 : indices of data points from X which have the least
outlyingness
"""
n, p = X.shape
self.h = num_least_outlying_points()
num_directions = min(250, n * (n - 1) / 2)
B = np.array([ROBPCA.direction_through_hyperplane(X)
for _ in range(num_directions)])
B_norm = np.linalg.norm(B, axis = 1)
index_norm = B_norm > 1e-12
A = np.dot(np.diag(1 / B_norm[index_norm]), B[index_norm, :])
# Used as a matrix because there's a bug in fast_mcd / MinCovDet
# The bug is basically due to single row / column arrays not
# maintaining the exact shape information (e.g. np.zeros(3).shape
# returns (3,) and not (3,1) or (1,3).
Y = np.matrix(np.dot(Z, A.T))
ny, ry = Y.shape
# Set up lists for univariate t_mcd and s_mcd
t_mcd = np.zeros(ry)
s_mcd = np.zeros(ry)
for i in range(ry):
mcd = MinCovDet(support_fraction=self.alpha).fit(Y[:,i])
t_mcd[i] = mcd.location_
s_mcd[i] = mcd.covariance_
# Supposedly if any of the s_mcd values is zero we're supposed to
# project all the data points onto the hyperplane defined by the
# direction orthogonal to A[i, :]. However, the reference
# implementation in LIBRA does not explicitly do this, and quite
# frankly the code is so terrible I've moved on for the time being.
outl = np.max(np.abs(np.array(Y) - t_mcd) / s_mcd, axis=1)
H0 = np.argsort(outl)[::-1][0:h]
return H0
def compute_pc(self):
"""
Robustly computes the principal components of the data matrix.
This is primarily broken up into one of several ways, depending on the
dimensionality of the data (whether p > n or p < n)
"""
X, rot = ROBPCA.reduce_to_affine_subspace(self.data)
centre = np.mean(self.data, axis=0)
if np.linalg.rank(X) == 0:
raise ValueError("All data points collapse!")
n, p = X.shape
self.h = num_least_outlying_points()
# Use MCD instead of ROBPCA if p << n
if p < min(np.floor(n / 5), self.kmax) and self.mcd:
mcd = MinCovDet(support_fraction=self.alpha).fit(X)
loc = mcd.location_
cov = mcd.covariance_
L, PCs = principal_components(X, lambda x: cov)
result = {
'location' : np.dot(rot,loc) + centre,
'covariance' : np.dot(rot, cov),
'eigenvalues' : L,
'loadings' : np.dot(rot, PCs),
}
return result
# Otherwise just continue with ROBPCA
H0 = find_least_outlying_points(X)
Xh = X[H0, :]
Lh, Ph = principal_components(Xh)
centre_Xh = np.mean(Xh, axis=0)
self.kmax = np.min(np.sum(Lh > 1e-12), self.kmax)
# If k was not set or chosen to be 0, then we should calculate it
# Basically we test if the ratio of the k-th eigenvalue to the 1st
# eigenvalue (sorted decreasingly) is larger than 1e-3 and if the
# fraction of cumulative dispersion is greater than 80%
if self.k == 0:
test, = np.where(Lh / Lh[0] <= 1e-3)
if len(test):
self.k = min(np.sum(Lh > 1e-12), int(test + 1), self.kmax)
else:
self.k = min(np.sum(Lh > 1e-12), self.kmax)
cumulative = np.cumsum(Lh[1:self.k]) / np.sum(Lh)
if cumulative[self.k-1] > 0.8:
self.k = int(np.where(cumulative >= 0.8)[0])
centre += np.mean(Xh, axis=0)
rot = np.dot(rot, Ph)
X2 = np.dot(X - np.mean(Xh, axis = 0), Ph)
X2 = X2[:, 1:self.k]
rot = rot[:, 1:self.k]
mcd = MinCovDet(support_fraction=(self.h / n)).fit(X2)
loc = mcd.location_
cov = mcd.covariance_
L, PCs = principal_components(X2, lambda x: cov)
result = {
'location' : np.dot(rot, loc) + centre,
'covariance' : np.dot(rot,cov),
'eigenvalues' : L,
'loadings' : np.dot(rot, PCs),
}
return result
| gpl-3.0 | -38,017,735,330,801,064 | 34.315412 | 98 | 0.547447 | false | 3.642514 | false | false | false |
mfatihaktas/q_sim | multiq_sim.py | 1 | 13223 | import sys, pprint, math, numpy, simpy, getopt, itertools, operator
from rvs import *
from arepeat_models import *
from arepeat_sim import *
# ************************ Multiple Qs for Jobs with multiple Tasks **************************** #
class Job(object):
def __init__(self, _id, k, tsize, n=0):
self._id = _id
self.k = k
self.tsize = tsize
self.n = n
self.prev_hop_id = None
def __repr__(self):
return "Job[id= {}, k= {}, n= {}]".format(self._id, self.k, self.n)
def deep_copy(self):
j = Job(self._id, self.k, self.tsize, self.n)
j.prev_hop_id = self.prev_hop_id
return j
class JG(object): # Job Generator
def __init__(self, env, ar, k_dist, tsize_dist):
self.env = env
self.ar = ar
self.k_dist = k_dist
self.tsize_dist = tsize_dist
self.nsent = 0
self.out = None
self.action = None
def init(self):
self.action = self.env.process(self.run() )
def run(self):
while 1:
yield self.env.timeout(random.expovariate(self.ar) )
self.nsent += 1
k = self.k_dist.gen_sample()
tsize = self.tsize_dist.gen_sample()
self.out.put(Job(self.nsent, k, tsize) )
class Task(object):
def __init__(self, jid, k, size, remaining):
self.jid = jid
self.k = k
self.size = size
self.remaining = remaining
self.prev_hop_id = None
self.ent_time = None
def __repr__(self):
return "Task[jid= {}, k= {}, size= {}, remaining= {}]".format(self.jid, self.k, self.size, self.remaining)
def deep_copy(self):
t = Task(self.jid, self.k, self.size, self.remaining)
t.prev_hop_id = self.prev_hop_id
t.ent_time = self.ent_time
return t
class PSQ(object): # Process Sharing Queue
def __init__(self, _id, env, h, out):
self._id = _id
self.env = env
self.h = h
self.out = out
self.t_l = []
self.tinserv_l = []
self.got_busy = None
self.sinterrupt = None
self.add_to_serv = False
self.cancel = False
self.cancel_jid = None
self.store = simpy.Store(env)
self.action = env.process(self.serv_run() )
self.action = env.process(self.put_run() )
self.lt_l = []
self.sl_l = []
def __repr__(self):
return "PSQ[id= {}]".format(self._id)
def length(self):
return len(self.t_l)
def serv_run(self):
while True:
self.tinserv_l = self.t_l[:self.h]
if len(self.tinserv_l) == 0:
# sim_log(DEBUG, self.env, self, "idle; waiting for arrival", None)
self.got_busy = self.env.event()
yield (self.got_busy)
# sim_log(DEBUG, self.env, self, "got busy!", None)
continue
# TODO: This seems wrong
# t_justmovedHoL = self.tinserv_l[-1]
# self.out.put_c({'m': 'HoL', 'jid': t_justmovedHoL.jid, 'k': t_justmovedHoL.k, 'qid': self._id} )
serv_size = len(self.tinserv_l)
r_l = [self.tinserv_l[i].remaining for i in range(serv_size) ]
time = min(r_l)
i_min = r_l.index(time)
# sim_log(DEBUG, self.env, self, "back to serv; time= {}, serv_size= {}".format(time, serv_size), None)
start_t = self.env.now
self.sinterrupt = self.env.event()
yield (self.sinterrupt | self.env.timeout(time) )
serv_t = (self.env.now - start_t)/serv_size
for i in range(serv_size):
try:
self.t_l[i].remaining -= serv_t
except IndexError:
break
if self.add_to_serv:
# sim_log(DEBUG, self.env, self, "new task added to serv", None)
self.sinterrupt = None
self.add_to_serv = False
elif self.cancel:
for t in self.t_l:
if t.jid == self.cancel_jid:
# sim_log(DEBUG, self.env, self, "cancelled task in serv", t)
self.t_l.remove(t)
self.sinterrupt = None
self.cancel = False
else:
t = self.t_l.pop(i_min)
# sim_log(DEBUG, self.env, self, "serv done", t)
lt = self.env.now - t.ent_time
self.lt_l.append(lt)
self.sl_l.append(lt/t.size)
t.prev_hop_id = self._id
self.out.put(t)
def put_run(self):
while True:
t = (yield self.store.get() )
_l = len(self.t_l)
self.t_l.append(t)
if _l == 0:
self.got_busy.succeed()
elif _l < self.h:
self.add_to_serv = True
self.sinterrupt.succeed()
def put(self, t):
# sim_log(DEBUG, self.env, self, "recved", t)
t.ent_time = self.env.now
return self.store.put(t) # .deep_copy()
def put_c(self, m):
# sim_log(DEBUG, self.env, self, "recved; tinserv_l= {}".format(self.tinserv_l), m)
# if m['m'] == 'cancel':
jid = m['jid']
if jid in [t.jid for t in self.tinserv_l]:
self.cancel = True
self.cancel_jid = jid
self.sinterrupt.succeed()
else:
for t in self.t_l:
if t.jid == jid:
self.t_l.remove(t)
class FCFS(object):
def __init__(self, _id, env, sl_dist, out):
self._id = _id
self.env = env
self.sl_dist = sl_dist
self.out = out
self.t_l = []
self.t_inserv = None
self.got_busy = None
self.cancel_flag = False
self.cancel = None
self.lt_l = []
self.sl_l = []
self.action = env.process(self.serv_run() )
def __repr__(self):
return "FCFS[_id= {}]".format(self._id)
def length(self):
return len(self.t_l) + (self.t_inserv is not None)
def serv_run(self):
while True:
if len(self.t_l) == 0:
self.got_busy = self.env.event()
yield (self.got_busy)
self.got_busy = None
# sim_log(DEBUG, self.env, self, "got busy!", None)
self.t_inserv = self.t_l.pop(0)
self.cancel = self.env.event()
clk_start_time = self.env.now
st = self.t_inserv.size * self.sl_dist.gen_sample()
# sim_log(DEBUG, self.env, self, "starting {}s-clock on ".format(st), self.t_inserv)
yield (self.cancel | self.env.timeout(st) )
if self.cancel_flag:
# sim_log(DEBUG, self.env, self, "cancelled clock on ", self.t_inserv)
self.cancel_flag = False
else:
# sim_log(DEBUG, self.env, self, "serv done in {}s on ".format(self.env.now-clk_start_time), self.t_inserv)
lt = self.env.now - self.t_inserv.ent_time
self.lt_l.append(lt)
self.sl_l.append(lt/self.t_inserv.size)
self.t_inserv.prev_hop_id = self._id
self.out.put(self.t_inserv)
self.t_inserv = None
def put(self, t):
# sim_log(DEBUG, self.env, self, "recved", t)
_l = len(self.t_l)
t.ent_time = self.env.now
self.t_l.append(t) # .deep_copy()
if self.got_busy is not None and _l == 0:
self.got_busy.succeed()
def put_c(self, m):
# sim_log(DEBUG, self.env, self, "recved", m)
# if m['m'] == 'cancel':
jid = m['jid']
for t in self.t_l:
if t.jid == jid:
self.t_l.remove(t)
if jid == self.t_inserv.jid:
self.cancel_flag = True
self.cancel.succeed()
class JQ(object):
def __init__(self, env, in_qid_l):
self.env = env
self.in_qid_l = in_qid_l
self.jid__t_l_map = {}
self.deped_jid_l = []
self.jid_HoLqid_l_map = {}
self.movedHoL_jid_l = []
self.store = simpy.Store(env)
self.action = env.process(self.run() )
# self.store_c = simpy.Store(env)
# self.action = env.process(self.run_c() )
def __repr__(self):
return "JQ[in_qid_l= {}]".format(self.in_qid_l)
def run(self):
while True:
t = (yield self.store.get() )
if t.jid in self.deped_jid_l: # Redundant tasks of a job may be received
continue
if t.jid not in self.jid__t_l_map:
self.jid__t_l_map[t.jid] = []
self.jid__t_l_map[t.jid].append(t.deep_copy() )
t_l = self.jid__t_l_map[t.jid]
if len(t_l) > t.k:
log(ERROR, "len(t_l)= {} > k= {}".format(len(t_l), t.k) )
elif len(t_l) < t.k:
continue
else:
self.jid__t_l_map.pop(t.jid, None)
self.deped_jid_l.append(t.jid)
self.out_c.put_c({'jid': t.jid, 'm': 'jdone', 'deped_from': [t.prev_hop_id for t in t_l] } )
def put(self, t):
# sim_log(DEBUG, self.env, self, "recved", t)
return self.store.put(t)
# def run_c(self):
# while True:
# m = (yield self.store_c.get() )
# if m['m'] == 'HoL':
# jid, k, qid = m['jid'], m['k'], m['qid']
# if m['jid'] in self.movedHoL_jid_l: # Redundant tasks may move HoL simultaneously
# continue
# if jid not in self.jid_HoLqid_l_map:
# self.jid_HoLqid_l_map[jid] = []
# self.jid_HoLqid_l_map[jid].append(qid)
# HoLqid_l = self.jid_HoLqid_l_map[jid]
# if len(HoLqid_l) > k:
# log(ERROR, "len(HoLqid_l)= {} > k= {}".format(len(HoLqid_l), k) )
# elif len(HoLqid_l) < k:
# continue
# else:
# self.movedHoL_jid_l.append(jid)
# HoLqid_l = self.jid_HoLqid_l_map[jid]
# self.out_c.put_c({'m': 'jHoL', 'jid': jid, 'at': HoLqid_l} )
# self.jid_HoLqid_l_map.pop(jid, None)
# def put_c(self, m):
# sim_log(DEBUG, self.env, self, "recved", m)
# return self.store_c.put(m)
class MultiQ(object):
def __init__(self, env, N, sching_m, sl_dist):
self.env = env
self.N = N
self.sching_m = sching_m
self.jq = JQ(env, list(range(self.N) ) )
self.jq.out_c = self
# self.q_l = [PSQ(i, env, h=4, out=self.jq) for i in range(self.N) ]
# sl_dist = DUniform(1, 1) # Dolly()
self.q_l = [FCFS(i, env, sl_dist, out=self.jq) for i in range(self.N) ]
self.jid_info_m = {}
self.store = simpy.Store(env)
self.action = env.process(self.run() )
self.jtime_l = []
self.k__jtime_m = {}
def __repr__(self):
return "MultiQ[N= {}]".format(self.N)
def tlt_l(self):
l = []
for q in self.q_l:
l.extend(q.lt_l)
return l
def tsl_l(self):
l = []
for q in self.q_l:
l.extend(q.sl_l)
return l
def get_sorted_qids(self):
qid_length_m = {q._id: q.length() for q in self.q_l}
# print("qid_length_m= {}".format(qid_length_m) )
qid_length_l = sorted(qid_length_m.items(), key=operator.itemgetter(1) )
# print("qid_length_l= {}".format(qid_length_l) )
return [qid_length[0] for qid_length in qid_length_l]
def run(self):
while True:
j = (yield self.store.get() )
toi_l = random.sample(range(self.N), j.n)
# toi_l = self.get_sorted_qids()[:j.n]
for i in toi_l:
self.q_l[i].put(Task(j._id, j.k, j.tsize, j.tsize) )
self.jid_info_m[j._id] = {'k': j.k, 'ent_time': self.env.now, 'tsize': j.tsize, 'qid_l': toi_l}
def put(self, j):
# sim_log(DEBUG, self.env, self, "recved", j)
if self.sching_m['t'] == 'coded':
# j.n = j.k + self.sching_m['n-k']
j.n = min(self.N, math.floor(j.k*self.sching_m['r'] ) )
# return self.store.put(j.deep_copy() )
return self.store.put(j)
def put_c(self, m):
# sim_log(DEBUG, self.env, self, "recved", m)
# if m['m'] == 'jHoL':
# jid = m['jid']
# jinfo = self.jid_info_m[jid]
# for i in jinfo['qid_l']:
# if i not in m['at']:
# self.q_l[i].put_c({'m': 'cancel', 'jid': jid} )
# elif m['m'] == 'jdone':
jid = m['jid']
jinfo = self.jid_info_m[jid]
t = (self.env.now - jinfo['ent_time'] )/jinfo['tsize']
# t = self.env.now - jinfo['ent_time'] - jinfo['tsize']
self.jtime_l.append(t)
if jinfo['k'] not in self.k__jtime_m:
self.k__jtime_m[jinfo['k'] ] = []
self.k__jtime_m[jinfo['k'] ].append(t)
for i in jinfo['qid_l']:
if i not in m['deped_from']:
self.q_l[i].put_c({'m': 'cancel', 'jid': jid} )
self.jid_info_m.pop(jid, None)
# class MultiQ_RedToIdle(MultiQ):
# def __init__(self, env, N, sching_m):
# super().__init__(env, N, sching_m)
# self.qid_redjid_l = {i: [] for i in range(N) }
# def __repr__(self):
# return "MultiQ_RedToIdle[N = {}]".format(self.N)
# def get_sorted_qid_length_l(self):
# qid_length_m = {q._id: q.length() for q in self.q_l}
# qid_length_l = sorted(qid_length_m.items(), key=operator.itemgetter(1) )
# return qid_length_l
# def run(self):
# while True:
# j = (yield self.store.get() )
# qid_length_l = self.get_sorted_qid_length_l()
# toqid_l = []
# for i, qid_length in enumerate(qid_length_l):
# if i < j.k:
# toqid_l.append(qid_length[0] )
# elif i < j.n:
# if qid_length[1] == 0:
# qid = qid_length[0]
# toqid_l.append(qid)
# self.qid_redjid_l[qid].append(j._id)
# for i, qid in enumerate(toqid_l):
# if i < j.k:
# for jid in self.qid_redjid_l[qid]:
# self.q_l[qid].put_c({'m': 'cancel', 'jid': jid} )
# self.qid_redjid_l[qid].remove(jid)
# self.q_l[qid].put(Task(j._id, j.k, j.tsize, j.tsize) )
# self.jid_info_m[j._id] = {'ent_time': self.env.now, 'tsize': j.tsize, 'qid_l': toqid_l}
| mit | 783,390,319,104,076,200 | 29.189498 | 115 | 0.534901 | false | 2.673474 | false | false | false |
hachreak/invenio-pidstore | invenio_pidstore/providers/recordid.py | 1 | 2596 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record ID provider."""
from __future__ import absolute_import, print_function
from ..models import PIDStatus, RecordIdentifier
from .base import BaseProvider
class RecordIdProvider(BaseProvider):
"""Record identifier provider."""
pid_type = 'recid'
"""Type of persistent identifier."""
pid_provider = None
"""Provider name.
The provider name is not recorded in the PID since the provider does not
provide any additional features besides creation of record ids.
"""
default_status = PIDStatus.RESERVED
"""Record IDs are by default registered immediately.
Default: :attr:`invenio_pidstore.models.PIDStatus.RESERVED`
"""
@classmethod
def create(cls, object_type=None, object_uuid=None, **kwargs):
"""Create a new record identifier.
Note: if the object_type and object_uuid values are passed, then the
PID status will be automatically setted to
:attr:`invenio_pidstore.models.PIDStatus.REGISTERED`.
:param object_type: The object type. (Default: None.)
:param object_uuid: The object identifier. (Default: None).
:param kwargs: You specify the pid_value.
"""
# Request next integer in recid sequence.
assert 'pid_value' not in kwargs
kwargs['pid_value'] = str(RecordIdentifier.next())
kwargs.setdefault('status', cls.default_status)
if object_type and object_uuid:
kwargs['status'] = PIDStatus.REGISTERED
return super(RecordIdProvider, cls).create(
object_type=object_type, object_uuid=object_uuid, **kwargs)
| gpl-2.0 | 1,027,690,269,467,489,400 | 35.56338 | 76 | 0.70416 | false | 4.214286 | false | false | false |
eicher31/compassion-switzerland | sms_939/models/recurring_contract.py | 3 | 1427 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import api, models
class Contract(models.Model):
_inherit = 'recurring.contract'
@api.multi
def contract_waiting_mandate(self):
"""
In case the sponsor paid the first month online, we want to force
activation of contract and later put it in waiting mandate state.
"""
for contract in self.filtered('invoice_line_ids'):
invoices = contract.invoice_line_ids.mapped('invoice_id')
payment = self.env['account.payment'].search([
('invoice_ids', 'in', invoices.ids),
('state', '=', 'draft')
])
if payment:
# Activate contract
contract._post_payment_first_month()
contract.contract_active()
return super(Contract, self).contract_waiting_mandate()
def associate_group(self, payment_mode_id):
res = super(Contract, self).associate_group(payment_mode_id)
self.group_id.on_change_payment_mode()
return res
| agpl-3.0 | -4,034,868,219,426,647,000 | 36.552632 | 78 | 0.53609 | false | 4.272455 | false | false | false |
CWSL/access-cm-tools | config/smooth_mom_bathymetry.py | 1 | 3434 | #!/usr/bin/env python
import sys
import argparse
import shutil
import numpy as np
import netCDF4 as nc
from scipy import signal
"""
Smooth out bathymetry by applying a gaussian blur.
See: http://wiki.scipy.org/Cookbook/SignalSmooth
Convolving a noisy image with a gaussian kernel (or any bell-shaped curve)
blurs the noise out and leaves the low-frequency details of the image standing
out.
After smoothing various things need to be fixed up:
- A minimum depth should be set, e.g. we don't want ocean depths of < 1m
- All land points should be the same in the smoothed bathymetry.
FIXME: what about using scipy.convolve?
FIXME: what about a more generalised smoothing tool?
"""
def gauss_kern(size, sizey=None):
""" Returns a normalized 2D gauss kernel array for convolutions """
size = int(size)
if not sizey:
sizey = size
else:
sizey = int(sizey)
x, y = np.mgrid[-size:size+1, -sizey:sizey+1]
g = np.exp(-(x**2/float(size) + y**2/float(sizey)))
return g / g.sum()
def blur_image(im, n, ny=None) :
"""
Blurs the image by convolving with a gaussian kernel of typical
size n. The optional keyword argument ny allows for a different
size in the y direction.
"""
g = gauss_kern(n, sizey=ny)
improc = signal.convolve(im, g, mode='valid')
return improc
def main():
parser = argparse.ArgumentParser()
parser.add_argument("point_x", help="x coordinate of centre point.", type=int)
parser.add_argument("point_y", help="y coordinate of centre point.", type=int)
parser.add_argument("size", help="""
Rows and columns that will be smoothed. For example if size == 10,
then a 10x10 array centred at (x, y) will be smoothed.""", type=int)
parser.add_argument("--kernel", help="Size of guassian kernel.", default=5, type=int)
parser.add_argument("--minimum_depth", help="""
After smoothing, set values to a minimum depth.
This is used to fix up shallow waters""", default=40, type=int)
parser.add_argument("input_file", help="Name of the input file.")
parser.add_argument("input_var", help="Name of the variable to blur.")
parser.add_argument("output_file", help="Name of the output file.")
args = parser.parse_args()
shutil.copy(args.input_file, args.output_file)
f = nc.Dataset(args.output_file, mode='r+')
input_var = f.variables[args.input_var][:]
north_ext = args.point_y + args.size
assert(north_ext < input_var.shape[0])
south_ext = args.point_y - args.size
assert(south_ext >= 0)
east_ext = args.point_x + args.size
assert(east_ext < input_var.shape[1])
west_ext = args.point_x - args.size
assert(west_ext >= 0)
input_var = input_var[south_ext:north_ext,west_ext:east_ext]
# We need to extend/pad the array by <kernel> points along each edge.
var = np.pad(input_var, (args.kernel, args.kernel), mode='edge')
smoothed = blur_image(var, args.kernel)
# After smoothing make sure that there is a certain minimum depth.
smoothed[(smoothed > 0) & (smoothed < args.minimum_depth)] = args.minimum_depth
# Ensure that all land points remain the same.
smoothed[input_var == 0] = 0
f.variables[args.input_var][south_ext:north_ext,west_ext:east_ext] = smoothed
f.close()
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | -1,449,171,919,763,620,600 | 34.040816 | 93 | 0.65696 | false | 3.373281 | false | false | false |
makemytrip/dataShark | plugins/output/out_csv.py | 1 | 2209 | # Copyright 2016 MakeMyTrip (Kunal Aggarwal)
#
# This file is part of dataShark.
#
# dataShark is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataShark is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataShark. If not, see <http://www.gnu.org/licenses/>.
import csv
from datetime import datetime
class Plugin:
def __init__(self, conf):
self.path = conf.get('path', 'UseCase.csv')
self.separator = conf.get('separator', ',')
self.quote_char = conf.get('quote_char', '"')
self.title = conf.get('title', 'Use Case')
self.debug = conf.get('debug', False)
if self.debug == "true":
self.debug = True
else:
self.debug = False
def save(self, dataRDD, progType):
if progType == "streaming":
dataRDD.foreachRDD(lambda s: self.__writeToCSV(s))
elif progType == "batch":
self.__writeToCSV(dataRDD)
def __writeToCSV(self, dataRDD):
if dataRDD.collect():
with open(self.path, 'a') as csvfile:
spamwriter = csv.writer(csvfile, delimiter = self.separator, quotechar = self.quote_char, quoting=csv.QUOTE_MINIMAL)
currentTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
for row in dataRDD.collect():
if self.debug:
print row
csvrow = [currentTime, row[0], row[1]]
try:
metadata = row[2]
except:
metadata = {}
if metadata:
for key in sorted(metadata):
csvrow.append(metadata[key])
spamwriter.writerow(csvrow)
print "%s - (%s) - Written %s Documents to CSV File %s " % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"), self.title, len(dataRDD.collect()), self.path)
else:
print "%s - (%s) - No RDD Data Recieved" % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"), self.title)
| gpl-3.0 | 6,888,654,487,208,212,000 | 35.816667 | 155 | 0.649163 | false | 3.331825 | false | false | false |
tfeldmann/tryagain | setup.py | 1 | 1224 | from setuptools import setup
with open('README.rst') as f:
readme = f.read()
setup(
name="tryagain",
version=__import__('tryagain').__version__,
license='MIT',
description="A lightweight and pythonic retry helper",
long_description=readme,
author="Thomas Feldmann",
author_email="[email protected]",
url="https://github.com/tfeldmann/tryagain",
py_modules=["tryagain"],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords=[
'retry', 'unstable', 'tryagain', 'redo', 'try', 'again', 'exception'],
)
| mit | 4,477,936,880,400,505,000 | 35 | 78 | 0.60866 | false | 4.163265 | false | true | false |
minixalpha/WatchTips | app/controllers/user.py | 1 | 2095 | #coding:utf-8
# User Management
import web
import auth
from app.views import wrapper as views
from app.models import wtdata
import config
class register:
def GET(self):
return views.layout.register(info='')
def POST(self):
"""
Posted data has three keys:
'username', 'email', 'password'
"""
data = web.input()
email = data['email']
email_validate = auth.validate_email(email)
if not email_validate:
return views.layout.register(info='email can not be validate')
email_exist = wtdata.email_exist(email)
if email_exist:
return views.layout.register(info='email exist')
pwd = data['password']
hashed_pwd = auth.encrypt_password(pwd)
print(hashed_pwd)
user_info = {}
user_info['username'] = data['username']
user_info['password'] = hashed_pwd
user_info['email'] = email
user_id = wtdata.add_user(user_info)
wtdata.add_default_category(user_id)
return web.seeother('/login')
class login:
def GET(self):
return views.layout.login(info='')
def POST(self):
data = web.input()
data = web.input()
email = data['email']
if not config.DEBUG:
email_validate = auth.validate_email(email)
if not email_validate:
return 'email is not validate'
pwd = data['password']
hashed_pwd = wtdata.get_pwd_by_email(email)
if hashed_pwd:
login_success = auth.validate_password(hashed_pwd, pwd)
else:
login_success = False
if login_success:
username = wtdata.get_username_by_email(email)
userid = wtdata.get_userid_by_email(email)
auth.set_login_state(username, userid)
return web.seeother('/home/' + str(userid))
else:
return views.layout.login(info='Username or Password is Wrong')
class logout:
def GET(self):
auth.clear_login_state()
return web.seeother('/')
| bsd-3-clause | 6,929,546,341,971,897,000 | 25.858974 | 75 | 0.58043 | false | 3.98289 | false | false | false |
Kefkius/electrum-frc-server | setup.py | 1 | 1062 | from setuptools import setup
setup(
name="electrum-frc-server",
version="0.9",
scripts=['run_electrum_frc_server','electrum-frc-server'],
install_requires=['plyvel','jsonrpclib', 'irc>=11'],
package_dir={
'electrumfrcserver':'src'
},
py_modules=[
'electrumfrcserver.__init__',
'electrumfrcserver.utils',
'electrumfrcserver.storage',
'electrumfrcserver.deserialize',
'electrumfrcserver.networks',
'electrumfrcserver.blockchain_processor',
'electrumfrcserver.server_processor',
'electrumfrcserver.processor',
'electrumfrcserver.version',
'electrumfrcserver.ircthread',
'electrumfrcserver.stratum_tcp',
'electrumfrcserver.stratum_http'
],
description="Freicoin Electrum Server",
author="Thomas Voegtlin",
author_email="[email protected]",
license="GNU Affero GPLv3",
url="https://github.com/spesmilo/electrum-server/",
long_description="""Server for the Electrum-FRC Lightweight Freicoin Wallet"""
)
| agpl-3.0 | -3,910,920,785,936,369,700 | 31.181818 | 82 | 0.65725 | false | 3.587838 | false | false | false |
makyo/honeycomb | tags/templatetags/tag_extras.py | 2 | 1999 | from django import template
from django.conf import settings
from django.db.models import (
Count,
Max,
Min,
)
register = template.Library()
TAG_MAX = getattr(settings, 'TAGCLOUD_MAX', 5.0)
TAG_MIN = getattr(settings, 'TAGCLOUD_MIN', 1.0)
def get_weight_closure(tag_min, tag_max, count_min, count_max):
"""Gets a closure for generating the weight of the tag.
Args:
tag_min: the minimum weight to use for a tag
tag_max: the maximum weight to use for a tag
count_min: the minimum number a tag is used
count_max: the maximum number a tag is used
Returns:
A closure to be used for calculating tag weights
"""
def linear(count, tag_min=tag_min, tag_max=tag_max,
count_min=count_min, count_max=count_max):
# Prevent a division by zero here, found to occur under some
# pathological but nevertheless actually occurring circumstances.
if count_max == count_min:
factor = 1.0
else:
factor = float(tag_max - tag_min) / float(count_max - count_min)
return tag_max - (count_max - count) * factor
return linear
@register.assignment_tag
def get_weighted_tags(tags):
"""Annotates a list of tags with the weight of the tag based on use.
Args:
tags: the list of tags to annotate
Returns:
The tag list annotated with weights
"""
# Annotate each tag with the number of times it's used
use_count = tags.annotate(use_count=Count('taggit_taggeditem_items'))
if len(use_count) == 0:
return tags
# Get the closure needed for adding weights to tags
get_weight = get_weight_closure(
TAG_MIN,
TAG_MAX,
use_count.aggregate(Min('use_count'))['use_count__min'],
use_count.aggregate(Max('use_count'))['use_count__max'])
tags = use_count.order_by('name')
# Add weight to each tag
for tag in tags:
tag.weight = get_weight(tag.use_count)
return tags
| mit | -7,846,668,506,608,236,000 | 29.753846 | 76 | 0.637819 | false | 3.674632 | false | false | false |
sanjioh/django-header-filter | src/header_filter/matchers.py | 1 | 6373 | """Composable matchers for HTTP headers."""
import re
RE_TYPE = type(re.compile(r''))
class BaseMatcher:
"""Matcher base class."""
def match(self, request):
"""
Check HTTP request headers against some criteria.
This method checks whether the request headers satisfy some
criteria or not. Subclasses should override it and provide a
specialized implementation.
The default implementation just returns False.
`request`: a Django request.
returns: a boolean.
"""
return False
def __invert__(self):
return Not(self)
def __and__(self, other):
return And(self, other)
def __or__(self, other):
return Or(self, other)
def __xor__(self, other):
return Xor(self, other)
class And(BaseMatcher):
"""Composite matcher that implements the bitwise AND operation."""
def __init__(self, matcher1, matcher2):
"""
Initialize the instance.
`matcher1`, `matcher2`: matchers of any type.
"""
self._matchers = (matcher1, matcher2)
def match(self, request):
"""
Compute the bitwise AND between the results of two matchers.
`request`: a Django request.
returns: a boolean.
"""
return all(matcher.match(request) for matcher in self._matchers)
def __repr__(self):
return '({!r} & {!r})'.format(*self._matchers)
class Or(BaseMatcher):
"""Composite matcher that implements the bitwise OR operation."""
def __init__(self, matcher1, matcher2):
"""
Initialize the instance.
`matcher1`, `matcher2`: matchers of any type.
"""
self._matchers = (matcher1, matcher2)
def match(self, request):
"""
Compute the bitwise OR between the results of two matchers.
`request`: a Django request.
returns: a boolean.
"""
return any(matcher.match(request) for matcher in self._matchers)
def __repr__(self):
return '({!r} | {!r})'.format(*self._matchers)
class Xor(BaseMatcher):
"""Composite matcher that implements the bitwise XOR operation."""
def __init__(self, matcher1, matcher2):
"""
Initialize the instance.
`matcher1`, `matcher2`: matchers of any type.
"""
self._matcher1 = matcher1
self._matcher2 = matcher2
def match(self, request):
"""
Compute the bitwise XOR between the results of two matchers.
`request`: a Django request.
returns: a boolean.
"""
return self._matcher1.match(request) is not self._matcher2.match(request)
def __repr__(self):
return '({!r} ^ {!r})'.format(self._matcher1, self._matcher2)
class Not(BaseMatcher):
"""Composite matcher that implements the bitwise NOT operation."""
def __init__(self, matcher):
"""
Initialize the instance.
`matcher`: a matcher of any type.
"""
self._matcher = matcher
def match(self, request):
"""
Compute the bitwise NOT of the result of a matcher.
`request`: a Django request.
returns: a boolean.
"""
return not self._matcher.match(request)
def __repr__(self):
return '~{!r}'.format(self._matcher)
class Header(BaseMatcher):
"""HTTP header matcher."""
def __init__(self, name, value):
"""
Initialize the instance.
`name`: a header name, as string.
`value`: a header value, as string, compiled regular expression
object, or iterable of strings.
"""
self._name = name
self._value = value
self._compare_value = self._get_value_comparison_method()
def _get_value_comparison_method(self):
if isinstance(self._value, RE_TYPE):
return self._compare_value_to_re_object
if isinstance(self._value, str):
return self._compare_value_to_str
return self._compare_value_to_iterable
def _compare_value_to_re_object(self, request_value):
return bool(self._value.fullmatch(request_value))
def _compare_value_to_str(self, request_value):
return request_value == self._value
def _compare_value_to_iterable(self, request_value):
return request_value in set(self._value)
def match(self, request):
"""
Inspect a request for headers with given name and value.
This method checks whether:
a) the request contains a header with the same exact name the
matcher has been initialized with, and
b) the header value is equal, matches, or belongs to the value
the matcher has been initialized with, depending on that being
respectively a string, a compiled regexp object, or an iterable
of strings.
`request`: a Django request.
returns: a boolean.
"""
try:
request_value = request.META[self._name]
except KeyError:
return False
else:
return self._compare_value(request_value)
def __repr__(self):
return '{}({!r}, {!r})'.format(self.__class__.__name__, self._name, self._value)
class HeaderRegexp(BaseMatcher):
"""HTTP header matcher based on regular expressions."""
def __init__(self, name_re, value_re):
"""
Initialize the instance.
`name_re`: a header name, as regexp string or compiled regexp
object.
`value_re`: a header value, as regexp string or compiled regexp
object.
"""
self._name_re = re.compile(name_re)
self._value_re = re.compile(value_re)
def match(self, request):
"""
Inspect a request for headers that match regular expressions.
This method checks whether the request contains at least one
header whose name and value match the respective regexps the
matcher has been initialized with.
`request`: a Django request.
returns: a boolean.
"""
for name, value in request.META.items():
if self._name_re.fullmatch(name) and self._value_re.fullmatch(value):
return True
return False
def __repr__(self):
return '{}({!r}, {!r})'.format(self.__class__.__name__, self._name_re, self._value_re)
| mit | -2,473,228,337,462,693,000 | 27.578475 | 94 | 0.594069 | false | 4.371056 | false | false | false |
benvermaercke/klustakwik2 | klustakwik2/numerics/cylib/m_step.py | 2 | 2918 | from .m_step_cy import *
from six.moves import range
__all__ = ['compute_cluster_mean', 'compute_covariance_matrix']
def get_diagonal(x):
'''
Return a writeable view of the diagonal of x
'''
return x.reshape(-1)[::x.shape[0]+1]
def compute_cluster_mean(kk, cluster):
data = kk.data
num_clusters = len(kk.num_cluster_members)
num_features = kk.num_features
cluster_mean = numpy.zeros(num_features)
num_added = numpy.zeros(num_features, dtype=int)
spikes = kk.get_spikes_in_cluster(cluster)
prior = 0
if cluster==kk.mua_cluster:
prior = kk.mua_point
elif cluster>=kk.num_special_clusters:
prior = kk.prior_point
do_compute_cluster_mean(
spikes, data.unmasked, data.unmasked_start, data.unmasked_end,
data.features, data.values_start, data.values_end,
cluster_mean, num_added, data.noise_mean, prior)
return cluster_mean
def compute_covariance_matrix(kk, cluster, cluster_mean, cov):
if cluster<kk.first_gaussian_cluster:
return
data = kk.data
num_cluster_members = kk.num_cluster_members
num_clusters = len(num_cluster_members)
num_features = kk.num_features
block = cov.block
block_diagonal = get_diagonal(block)
spikes_in_cluster = kk.spikes_in_cluster
spikes_in_cluster_offset = kk.spikes_in_cluster_offset
spike_indices = spikes_in_cluster[spikes_in_cluster_offset[cluster]:spikes_in_cluster_offset[cluster+1]]
f2m = numpy.zeros(num_features)
ct = numpy.zeros(num_features)
if kk.use_mua_cluster and cluster==kk.mua_cluster:
point = kk.mua_point
do_var_accum_mua(spike_indices, cluster_mean,
kk.data.noise_mean, kk.data.noise_variance,
cov.unmasked, block,
data.unmasked, data.unmasked_start, data.unmasked_end,
data.features, data.values_start, data.values_end,
f2m, ct, data.correction_terms, num_features,
)
else:
point = kk.prior_point
do_var_accum(spike_indices, cluster_mean,
kk.data.noise_mean, kk.data.noise_variance,
cov.unmasked, block,
data.unmasked, data.unmasked_start, data.unmasked_end,
data.features, data.values_start, data.values_end,
f2m, ct, data.correction_terms, num_features,
)
# add correction term for diagonal
cov.diagonal[:] += len(spike_indices)*data.noise_variance[cov.masked]
# Add prior
block_diagonal[:] += point*data.noise_variance[cov.unmasked]
cov.diagonal[:] += point*data.noise_variance[cov.masked]
# Normalise
factor = 1.0/(num_cluster_members[cluster]+point-1)
cov.block *= factor
cov.diagonal *= factor
| bsd-3-clause | 8,040,446,234,502,489,000 | 34.156627 | 108 | 0.611035 | false | 3.511432 | false | false | false |
SasView/sasmodels | sesansdemo.py | 1 | 3163 | # Example of conversion of scattering cross section from SANS in absolute
# units into SESANS using a Hankel transformation
# everything is in units of metres except specified otherwise
# Wim Bouwman ([email protected]), June 2013
from __future__ import division
from pylab import *
from scipy.special import jv as besselj
# q-range parameters
q = arange(0.0003, 1.0, 0.0003); # [nm^-1] range wide enough for Hankel transform
dq=(q[1]-q[0])*1e9; # [m^-1] step size in q, needed for integration
nq=len(q);
Lambda=2e-10; # [m] wavelength
# sample parameters
phi=0.1; # volume fraction
R=100; # [nm] radius particles
DeltaRho=6e14; # [m^-2]
V=4/3*pi*R**3 * 1e-27; # [m^3]
th=0.002; # [m] thickness sample
#2 PHASE SYSTEM
st= 1.5*Lambda**2*DeltaRho**2*th*phi*(1-phi)*R*1e-9 # scattering power in sesans formalism
# Form factor solid sphere
qr=q*R;
P=(3.*(sin(qr)-qr*cos(qr)) / qr**3)**2;
# Structure factor dilute
S=1.;
#2 PHASE SYSTEM
# scattered intensity [m^-1] in absolute units according to SANS
I=phi*(1-phi)*V*(DeltaRho**2)*P*S;
clf()
subplot(211) # plot the SANS calculation
plot(q,I,'k')
loglog(q,I)
xlim([0.01, 1])
ylim([1, 1e9])
xlabel(r'$Q [nm^{-1}]$')
ylabel(r'$d\Sigma/d\Omega [m^{-1}]$')
# Hankel transform to nice range for plot
nz=61;
zz=linspace(0,240,nz); # [nm], should be less than reciprocal from q
G=zeros(nz);
for i in range(len(zz)):
integr=besselj(0,q*zz[i])*I*q;
G[i]=sum(integr);
G=G*dq*1e9*2*pi; # integr step, conver q into [m**-1] and 2 pi circle integr
# plot(zz,G);
stt= th*Lambda**2/4/pi/pi*G[0] # scattering power according to SANS formalism
PP=exp(th*Lambda**2/4/pi/pi*(G-G[0]));
subplot(212)
plot(zz,PP,'k',label="Hankel transform") # Hankel transform 1D
xlabel('spin-echo length [nm]')
ylabel('polarisation normalised')
hold(True)
# Cosine transformation of 2D scattering patern
if False:
qy,qz = meshgrid(q,q)
qr=R*sqrt(qy**2 + qz**2); # reuse variable names Hankel transform, but now 2D
P=(3.*(sin(qr)-qr*cos(qr)) / qr**3)**2;
# Structure factor dilute
S=1.;
# scattered intensity [m^-1] in absolute units according to SANS
I=phi*V*(DeltaRho**2)*P*S;
GG=zeros(nz);
for i in range(len(zz)):
integr=cos(qz*zz[i])*I;
GG[i]=sum(sum(integr));
GG=4*GG* dq**2; # take integration step into account take 4 quadrants
# plot(zz,GG);
sstt= th*Lambda**2/4/pi/pi*GG[0] # scattering power according to SANS formalism
PPP=exp(th*Lambda**2/4/pi/pi*(GG-GG[0]));
plot(zz,PPP,label="cosine transform") # cosine transform 2D
# For comparison calculation in SESANS formalism, which overlaps perfectly
def gsphere(z,r):
"""
Calculate SESANS-correlation function for a solid sphere.
Wim Bouwman after formulae Timofei Kruglov J.Appl.Cryst. 2003 article
"""
d = z/r
g = zeros_like(z)
g[d==0] = 1.
low = ((d > 0) & (d < 2))
dlow = d[low]
dlow2 = dlow**2
print dlow.shape, dlow2.shape
g[low] = sqrt(1-dlow2/4.)*(1+dlow2/8.) + dlow2/2.*(1-dlow2/16.)*log(dlow/(2.+sqrt(4.-dlow2)))
return g
if True:
plot(zz,exp(st*(gsphere(zz,R)-1)),'r', label="analytical")
legend()
show()
| bsd-3-clause | 8,877,363,310,400,501,000 | 29.413462 | 97 | 0.649383 | false | 2.514308 | false | false | false |
bobbyxuy/flask_web | app/models.py | 1 | 1281 | from werkzeug.security import generate_password_hash, check_password_hash
from . import db
from . import login_manager
from flask_login import UserMixin
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User %r>' % self.username
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| mit | 1,768,659,366,592,258,800 | 27.466667 | 73 | 0.67057 | false | 3.519231 | false | false | false |
sibirrer/astrofunc | astrofunc/LensingProfiles/sersic.py | 1 | 2561 | __author__ = 'sibirrer'
#this file contains a class to make a gaussian
import numpy as np
import astrofunc.util as util
from astrofunc.LensingProfiles.sersic_utils import SersicUtil
import astrofunc.LensingProfiles.calc_util as calc_util
class Sersic(SersicUtil):
"""
this class contains functions to evaluate a Sersic mass profile: https://arxiv.org/pdf/astro-ph/0311559.pdf
"""
def function(self, x, y, n_sersic, r_eff, k_eff, center_x=0, center_y=0):
"""
returns Gaussian
"""
n = n_sersic
x_red = self._x_reduced(x, y, n, r_eff, center_x, center_y)
b = self.b_n(n)
#hyper2f2_b = util.hyper2F2_array(2*n, 2*n, 1+2*n, 1+2*n, -b)
hyper2f2_bx = util.hyper2F2_array(2*n, 2*n, 1+2*n, 1+2*n, -b*x_red)
f_eff = np.exp(b)*r_eff**2/2.*k_eff# * hyper2f2_b
f_ = f_eff * x_red**(2*n) * hyper2f2_bx# / hyper2f2_b
return f_
def derivatives(self, x, y, n_sersic, r_eff, k_eff, center_x=0, center_y=0):
"""
returns df/dx and df/dy of the function
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
if isinstance(r, int) or isinstance(r, float):
r = max(self._s, r)
else:
r[r < self._s] = self._s
alpha = -self.alpha_abs(x, y, n_sersic, r_eff, k_eff, center_x, center_y)
f_x = alpha * x_ / r
f_y = alpha * y_ / r
return f_x, f_y
def hessian(self, x, y, n_sersic, r_eff, k_eff, center_x=0, center_y=0):
"""
returns Hessian matrix of function d^2f/dx^2, d^f/dy^2, d^2/dxdy
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
if isinstance(r, int) or isinstance(r, float):
r = max(self._s, r)
else:
r[r < self._s] = self._s
d_alpha_dr = self.d_alpha_dr(x, y, n_sersic, r_eff, k_eff, center_x, center_y)
alpha = -self.alpha_abs(x, y, n_sersic, r_eff, k_eff, center_x, center_y)
#f_xx_ = d_alpha_dr * calc_util.d_r_dx(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dx(x_, y_)
#f_yy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * y_/r + alpha * calc_util.d_y_diffr_dy(x_, y_)
#f_xy_ = d_alpha_dr * calc_util.d_r_dy(x_, y_) * x_/r + alpha * calc_util.d_x_diffr_dy(x_, y_)
f_xx = -(d_alpha_dr/r + alpha/r**2) * x_**2/r + alpha/r
f_yy = -(d_alpha_dr/r + alpha/r**2) * y_**2/r + alpha/r
f_xy = -(d_alpha_dr/r + alpha/r**2) * x_*y_/r
return f_xx, f_yy, f_xy
| mit | -3,187,150,951,711,086,000 | 36.115942 | 111 | 0.522452 | false | 2.486408 | false | false | false |
YoQuieroSaber/votainteligente-portal-electoral | elections/urls.py | 2 | 4564 | from django.conf import settings
from django.conf.urls import patterns, url
from haystack.views import SearchView
from elections.forms import ElectionForm
from elections.views import ElectionsSearchByTagView, HomeView, ElectionDetailView,\
CandidateDetailView, SoulMateDetailView, FaceToFaceView, AreaDetailView, \
CandidateFlatPageDetailView, ElectionRankingView, QuestionsPerCandidateView
from sitemaps import *
from django.views.decorators.cache import cache_page
from elections.preguntales_views import MessageDetailView, ElectionAskCreateView, AnswerWebHook
media_root = getattr(settings, 'MEDIA_ROOT', '/')
new_answer_endpoint = r"^new_answer/%s/?$" % (settings.NEW_ANSWER_ENDPOINT)
sitemaps = {
'elections': ElectionsSitemap,
'candidates': CandidatesSitemap,
}
urlpatterns = patterns('',
url(new_answer_endpoint,AnswerWebHook.as_view(), name='new_answer_endpoint' ),
url(r'^/?$', cache_page(60 * settings.CACHE_MINUTES)(HomeView.as_view(template_name='elections/home.html')), name='home'),
url(r'^buscar/?$', SearchView(template='search.html',
form_class=ElectionForm), name='search'),
url(r'^busqueda_tags/?$', ElectionsSearchByTagView.as_view(), name='tags_search'),
url(r'^election/(?P<slug>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionDetailView.as_view(template_name='elections/election_detail.html')),
name='election_view'),
url(r'^election/(?P<slug>[-\w]+)/questionary/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionDetailView.as_view(template_name='elections/election_questionary.html')),
name='questionary_detail_view'),
#compare two candidates
url(r'^election/(?P<slug>[-\w]+)/face-to-face/(?P<slug_candidate_one>[-\w]+)/(?P<slug_candidate_two>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(FaceToFaceView.as_view(template_name='elections/compare_candidates.html')),
name='face_to_face_two_candidates_detail_view'),
#one candidate for compare
url(r'^election/(?P<slug>[-\w]+)/face-to-face/(?P<slug_candidate_one>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionDetailView.as_view(template_name='elections/compare_candidates.html')),
name='face_to_face_one_candidate_detail_view'),
#no one candidate
url(r'^election/(?P<slug>[-\w]+)/face-to-face/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionDetailView.as_view(template_name='elections/compare_candidates.html')),
name='face_to_face_no_candidate_detail_view'),
#soulmate
url(r'^election/(?P<slug>[-\w]+)/soul-mate/?$',
SoulMateDetailView.as_view(template_name='elections/soulmate_candidate.html'),
name='soul_mate_detail_view'),
# Preguntales
url(r'^election/(?P<election_slug>[-\w]+)/messages/(?P<pk>\d+)/?$',
MessageDetailView.as_view(template_name='elections/message_detail.html'),
name='message_detail'),
#ranking
url(r'^election/(?P<slug>[-\w]+)/ranking/?$',
cache_page(60 * settings.CACHE_MINUTES)(ElectionRankingView.as_view(template_name='elections/ranking_candidates.html')),
name='ranking_view'),
url(r'^election/(?P<election_slug>[-\w]+)/(?P<slug>[-\w]+)/questions?$',
QuestionsPerCandidateView.as_view(template_name='elections/questions_per_candidate.html'),
name='questions_per_candidate'
),
#ask
url(r'^election/(?P<slug>[-\w]+)/ask/?$',
ElectionAskCreateView.as_view(template_name='elections/ask_candidate.html'),
name='ask_detail_view'),
url(r'^election/(?P<election_slug>[-\w]+)/(?P<slug>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(CandidateDetailView.as_view(template_name='elections/candidate_detail.html')),
name='candidate_detail_view'
),
# End Preguntales
url(r'^election/(?P<election_slug>[-\w]+)/(?P<slug>[-\w]+)/(?P<url>[-\w]+)/?$',
cache_page(60 * settings.CACHE_MINUTES)(CandidateFlatPageDetailView.as_view()),
name='candidate_flatpage'
),
url(r'^election/(?P<slug>[-\w]+)/extra_info.html$',
ElectionDetailView.as_view(template_name='elections/extra_info.html'),
name='election_extra_info'),
url(r'^area/(?P<slug>[-\w]+)/?$',
AreaDetailView.as_view(template_name='elections/area.html'),
name='area'),
url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
)
urlpatterns += patterns('',
url(r'^cache/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': media_root})
)
| gpl-3.0 | -4,919,388,031,415,043,000 | 50.280899 | 129 | 0.668273 | false | 3.297688 | false | false | false |
javachengwc/hue | desktop/core/ext-py/python-ldap-2.3.13/Demo/schema_tree.py | 40 | 2375 | """
Outputs the object class tree read from LDAPv3 schema
of a given server
Usage: schema_oc_tree.py [--html] [LDAP URL]
"""
import sys,getopt,ldap,ldap.schema
ldap.trace_level = 1
def PrintSchemaTree(schema,se_class,se_tree,se_oid,level):
"""ASCII text output for console"""
se_obj = schema.get_obj(se_class,se_oid)
if se_obj!=None:
print '| '*(level-1)+'+---'*(level>0), \
', '.join(se_obj.names), \
'(%s)' % se_obj.oid
for sub_se_oid in se_tree[se_oid]:
print '| '*(level+1)
PrintSchemaTree(schema,se_class,se_tree,sub_se_oid,level+1)
def HTMLSchemaTree(schema,se_class,se_tree,se_oid,level):
"""HTML output for browser"""
se_obj = schema.get_obj(se_class,se_oid)
if se_obj!=None:
print """
<dt><strong>%s (%s)</strong></dt>
<dd>
%s
""" % (', '.join(se_obj.names),se_obj.oid,se_obj.desc)
if se_tree[se_oid]:
print '<dl>'
for sub_se_oid in se_tree[se_oid]:
HTMLSchemaTree(schema,se_class,se_tree,sub_se_oid,level+1)
print '</dl>'
print '</dd>'
ldap.set_option(ldap.OPT_DEBUG_LEVEL,0)
ldap._trace_level = 0
subschemasubentry_dn,schema = ldap.schema.urlfetch(sys.argv[-1],ldap.trace_level)
if subschemasubentry_dn is None:
print 'No sub schema sub entry found!'
sys.exit(1)
try:
options,args=getopt.getopt(sys.argv[1:],'',['html'])
except getopt.error,e:
print 'Error: %s\nUsage: schema_oc_tree.py [--html] [LDAP URL]'
html_output = options and options[0][0]=='--html'
oc_tree = schema.tree(ldap.schema.ObjectClass)
at_tree = schema.tree(ldap.schema.AttributeType)
#for k,v in oc_tree.items():
# print k,'->',v
#for k,v in at_tree.items():
# print k,'->',v
if html_output:
print """<html>
<head>
<title>Object class tree</title>
</head>
<body bgcolor="#ffffff">
<h1>Object class tree</h1>
<dl>
"""
HTMLSchemaTree(schema,ldap.schema.ObjectClass,oc_tree,'2.5.6.0',0)
print """</dl>
<h1>Attribute type tree</h1>
<dl>
"""
for a in schema.listall(ldap.schema.AttributeType):
if at_tree[a]:
HTMLSchemaTree(schema,ldap.schema.AttributeType,at_tree,a,0)
print
print """</dl>
</body>
</html>
"""
else:
print '*** Object class tree ***\n'
print
PrintSchemaTree(schema,ldap.schema.ObjectClass,oc_tree,'2.5.6.0',0)
print '\n*** Attribute types tree ***\n'
PrintSchemaTree(schema,ldap.schema.AttributeType,at_tree,'_',0)
| apache-2.0 | 7,142,333,691,781,884,000 | 22.75 | 81 | 0.641263 | false | 2.729885 | false | false | false |
madmath/sous-chef | src/delivery/views.py | 1 | 24247 | import datetime
import types
import json
import collections
import textwrap
import os
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.views import generic
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.decorators import login_required
from delivery.models import Delivery
from member.models import Member, Route
from django.http import JsonResponse
from django.core.urlresolvers import reverse_lazy
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE
from django.contrib.contenttypes.models import ContentType
from django.db.models.functions import Lower
from .apps import DeliveryConfig
from sqlalchemy import func, or_, and_
import labels # package pylabels
from reportlab.graphics import shapes
from .models import Delivery
from .forms import DishIngredientsForm
from order.models import (
Order, component_group_sorting, SIZE_CHOICES_REGULAR, SIZE_CHOICES_LARGE)
from meal.models import (
COMPONENT_GROUP_CHOICES, COMPONENT_GROUP_CHOICES_MAIN_DISH,
Component, Ingredient,
Menu, Menu_component,
Component_ingredient)
from member.apps import db_sa_session
from member.models import Client, Route
from datetime import date
from . import tsp
MEAL_LABELS_FILE = os.path.join(settings.BASE_DIR, "meallabels.pdf")
class Orderlist(generic.ListView):
# Display all the order on a given day
model = Delivery
template_name = 'review_orders.html'
context_object_name = 'orders'
def get_queryset(self):
queryset = Order.objects.get_orders_for_date()
return queryset
def get_context_data(self, **kwargs):
context = super(Orderlist, self).get_context_data(**kwargs)
context['orders_refresh_date'] = None
if LogEntry.objects.exists():
log = LogEntry.objects.latest('action_time')
context['orders_refresh_date'] = log
return context
class MealInformation(generic.View):
# Choose today's main dish and its ingredients
def get(self, request, **kwargs):
# Display today's main dish and its ingredients
date = datetime.date.today()
main_dishes = Component.objects.order_by(Lower('name')).filter(
component_group=COMPONENT_GROUP_CHOICES_MAIN_DISH)
if 'id' in kwargs:
# today's main dish has been chosen by user
main_dish = Component.objects.get(id=int(kwargs['id']))
# delete existing ingredients for the date + dish
Component_ingredient.objects.filter(
component=main_dish, date=date).delete()
else:
# see if a menu exists for today
menu_comps = Menu_component.objects.filter(
menu__date=date,
component__component_group=COMPONENT_GROUP_CHOICES_MAIN_DISH)
if menu_comps:
# main dish is known in today's menu
main_dish = menu_comps[0].component
else:
# take first main dish
main_dish = main_dishes[0]
# see if existing chosen ingredients for the dish
dish_ingredients = Component.get_day_ingredients(
main_dish.id, date)
if not dish_ingredients:
# get recipe ingredients for the dish
dish_ingredients = Component.get_recipe_ingredients(
main_dish.id)
form = DishIngredientsForm(
initial={
'maindish': main_dish.id,
'ingredients': dish_ingredients})
return render(
request,
'ingredients.html',
{'form': form,
'date': str(date)})
def post(self, request):
# Choose ingredients in today's main dish
# print("Pick Ingredients POST request=", request.POST) # For testing
date = datetime.date.today()
form = DishIngredientsForm(request.POST)
if '_restore' in request.POST:
# restore ingredients of main dish to those in recipe
if form.is_valid():
component = form.cleaned_data['maindish']
# delete existing ingredients for the date + dish
Component_ingredient.objects.filter(
component=component, date=date).delete()
return HttpResponseRedirect(
reverse_lazy("delivery:meal_id", args=[component.id]))
elif '_next' in request.POST:
# forward to kitchen count
if form.is_valid():
ingredients = form.cleaned_data['ingredients']
component = form.cleaned_data['maindish']
# delete existing ingredients for the date + dish
Component_ingredient.objects.filter(
component=component, date=date).delete()
# add revised ingredients for the date + dish
for ing in ingredients:
ci = Component_ingredient(
component=component,
ingredient=ing,
date=date)
ci.save()
# END FOR
# Create menu and its components for today
compnames = [component.name] # main dish
# take first sorted name of each other component group
for group, ignore in COMPONENT_GROUP_CHOICES:
if group != COMPONENT_GROUP_CHOICES_MAIN_DISH:
compnames.append(
Component.objects.order_by(Lower('name')).filter(
component_group=group)[0].name)
Menu.create_menu_and_components(date, compnames)
return HttpResponseRedirect(
reverse_lazy("delivery:kitchen_count"))
# END IF
# END IF
return render(
request,
'ingredients.html',
{'date': date,
'form': form})
class RoutesInformation(generic.ListView):
# Display all the route information for a given day
model = Delivery
template_name = "routes.html"
def get_context_data(self, **kwargs):
context = super(RoutesInformation, self).get_context_data(**kwargs)
context['routes'] = Route.objects.all()
return context
# Kitchen count report view, helper classes and functions
class KitchenCount(generic.View):
def get(self, request, **kwargs):
# Display kitchen count report for given delivery date
# or for today by default
if 'year' in kwargs and 'month' in kwargs and 'day' in kwargs:
date = datetime.date(
int(kwargs['year']), int(kwargs['month']), int(kwargs['day']))
else:
date = datetime.date.today()
kitchen_list = Order.get_kitchen_items(date)
component_lines, meal_lines = kcr_make_lines(kitchen_list, date)
num_labels = kcr_make_labels(kitchen_list)
# release session for SQLAlchemy TODO use signals instead
db_sa_session.remove()
return render(request, 'kitchen_count.html',
{'component_lines': component_lines,
'meal_lines': meal_lines,
'num_labels': num_labels})
class Component_line(types.SimpleNamespace):
# line to display component count summary
def __init__(self,
component_group='', rqty=0, lqty=0,
name='', ingredients=''):
self.__dict__.update(
{k: v for k, v in locals().items() if k != 'self'})
class Meal_line(types.SimpleNamespace):
# line to display client meal specifics
def __init__(self,
client='', rqty='', lqty='', comp_clash='',
ingr_clash='', preparation='', rest_comp='',
rest_ingr='', rest_item=''):
self.__dict__.update(
{k: v for k, v in locals().items() if k != 'self'})
def meal_line(v):
# factory for Meal_line
return Meal_line(
client=v.lastname + ', ' + v.firstname[0:2] + '.',
rqty=str(v.meal_qty) if v.meal_size == SIZE_CHOICES_REGULAR else '',
lqty=str(v.meal_qty) if v.meal_size == SIZE_CHOICES_LARGE else '',
comp_clash=', '.join(v.incompatible_components),
ingr_clash=', '.join(v.incompatible_ingredients),
preparation=', '.join(v.preparation),
rest_comp=', '.join(v.other_components),
rest_ingr=', '.join(v.other_ingredients),
rest_item=', '.join(v.restricted_items))
def kcr_cumulate(regular, large, meal):
# count cumulative meal quantities by size
if meal.meal_size == SIZE_CHOICES_REGULAR:
regular = regular + meal.meal_qty
else:
large = large + meal.meal_qty
return (regular, large)
def kcr_total_line(lines, label, regular, large):
# add line to display subtotal or total quantities by size
if regular or large:
lines.append(
Meal_line(client=label, rqty=str(regular), lqty=str(large)))
def kcr_make_lines(kitchen_list, date):
# generate all the lines for the kitchen count report
component_lines = {}
for k, item in kitchen_list.items():
for component_group, meal_component \
in item.meal_components.items():
component_lines.setdefault(
component_group,
Component_line(
component_group=component_group,
name=meal_component.name,
ingredients=", ".join(
[ing.name for ing in
Component.get_day_ingredients(
meal_component.id, date)])))
if (component_group == COMPONENT_GROUP_CHOICES_MAIN_DISH and
item.meal_size == SIZE_CHOICES_LARGE):
component_lines[component_group].lqty += \
meal_component.qty
else:
component_lines[component_group].rqty += \
meal_component.qty
# END FOR
# END FOR
items = component_lines.items()
if items:
component_lines_sorted = \
[component_lines[COMPONENT_GROUP_CHOICES_MAIN_DISH]]
component_lines_sorted.extend(
sorted([v for k, v in items if
k != COMPONENT_GROUP_CHOICES_MAIN_DISH],
key=lambda x: x.component_group))
else:
component_lines_sorted = []
meal_lines = []
rtotal, ltotal = (0, 0)
# part 1 Components clashes (and other columns)
rsubtotal, lsubtotal = (0, 0)
for v in sorted(
[val for val in kitchen_list.values() if
val.incompatible_components],
key=lambda x: x.lastname + x.firstname):
meal_lines.append(meal_line(v))
rsubtotal, lsubtotal = kcr_cumulate(rsubtotal, lsubtotal, v)
# END FOR
kcr_total_line(meal_lines, 'SUBTOTAL', rsubtotal, lsubtotal)
rtotal, ltotal = (rtotal + rsubtotal, ltotal + lsubtotal)
# part 2 Ingredients clashes , no components clashes (and other columns)
rsubtotal, lsubtotal = (0, 0)
clients = iter(sorted(
[(ke, val) for ke, val in kitchen_list.items() if
(val.incompatible_ingredients and
not val.incompatible_components)],
key=lambda x: x[1].incompatible_ingredients))
k, v = next(clients, (0, 0))
while k > 0:
combination = v.incompatible_ingredients
meal_lines.append(meal_line(v))
rsubtotal, lsubtotal = kcr_cumulate(rsubtotal, lsubtotal, v)
k, v = next(clients, (0, 0))
if k == 0 or combination != v.incompatible_ingredients:
kcr_total_line(meal_lines, 'SUBTOTAL', rsubtotal, lsubtotal)
rtotal, ltotal = (rtotal + rsubtotal, ltotal + lsubtotal)
rsubtotal, lsubtotal = (0, 0)
# END WHILE
# part 3 No clashes but preparation (and other columns)
rsubtotal, lsubtotal = (0, 0)
for v in sorted(
[val for val in kitchen_list.values() if
(not val.incompatible_ingredients and
not val.incompatible_components and
val.preparation)],
key=lambda x: x.lastname + x.firstname):
meal_lines.append(meal_line(v))
rsubtotal, lsubtotal = kcr_cumulate(rsubtotal, lsubtotal, v)
# END FOR
kcr_total_line(meal_lines, 'SUBTOTAL', rsubtotal, lsubtotal)
rtotal, ltotal = (rtotal + rsubtotal, ltotal + lsubtotal)
kcr_total_line(meal_lines, 'TOTAL SPECIALS', rtotal, ltotal)
rsubtotal, lsubtotal = (0, 0)
# part 4 No clashes nor preparation but other restrictions (NOT PRINTED)
for v in sorted(
[val for val in kitchen_list.values() if
(not val.incompatible_ingredients and
not val.incompatible_components and
not val.preparation and
(val.other_components or
val.other_ingredients or
val.restricted_items))],
key=lambda x: x.lastname + x.firstname):
meal_lines.append(meal_line(v))
rsubtotal, lsubtotal = kcr_cumulate(rsubtotal, lsubtotal, v)
# END FOR
# part 5 All columns empty (NOT PRINTED)
for v in sorted(
[val for val in kitchen_list.values() if
(not val.incompatible_ingredients and
not val.incompatible_components and
not val.preparation and
not val.other_components and
not val.other_ingredients and
not val.restricted_items)],
key=lambda x: x.lastname + x.firstname):
meal_lines.append(meal_line(v))
rsubtotal, lsubtotal = kcr_cumulate(rsubtotal, lsubtotal, v)
# END FOR
kcr_total_line(meal_lines, 'SUBTOTAL', rsubtotal, lsubtotal)
return (component_lines_sorted, meal_lines)
def kcr_make_labels(kitchen_list):
# see https://github.com/bcbnz/pylabels
# dimensions are in millimeters; 1 inch = 25.4 mm
specs = labels.Specification(
sheet_width=8.5 * 25.4, sheet_height=11 * 25.4,
columns=2, rows=7,
label_width=4 * 25.4, label_height=1.33 * 25.4,
top_margin=20, bottom_margin=20,
corner_radius=2)
def draw_label(label, width, height, data):
# callback function
obj, j, qty = data
label.add(shapes.String(2, height * 0.8,
obj.lastname + ", " + obj.firstname[0:2] + ".",
fontName="Helvetica-Bold",
fontSize=12))
label.add(shapes.String(width-2, height * 0.8,
"{}".format(datetime.date.today().
strftime("%a, %b-%d")),
fontName="Helvetica",
fontSize=10,
textAnchor="end"))
if obj.meal_size == SIZE_CHOICES_LARGE:
label.add(shapes.String(2, height * 0.65,
"LARGE",
fontName="Helvetica",
fontSize=10))
if qty > 1:
label.add(shapes.String(width * 0.5, height * 0.65,
"(" + str(j) + " of " + str(qty) + ")",
fontName="Helvetica",
fontSize=10))
label.add(shapes.String(width-3, height * 0.65,
obj.routename,
fontName="Helvetica-Oblique",
fontSize=8,
textAnchor="end"))
special = obj.preparation or []
special.extend(["No " + item for item in obj.incompatible_ingredients])
special.extend(["No " + item for item in obj.other_ingredients])
special.extend(["No " + item for item in obj.restricted_items])
special = textwrap.wrap(
' / '.join(special), width=68,
break_long_words=False, break_on_hyphens=False)
position = height * 0.45
for line in special:
label.add(shapes.String(2, position,
line,
fontName="Helvetica",
fontSize=9))
position -= 10
sheet = labels.Sheet(specs, draw_label, border=True)
# obj is a KitchenItem instance (see order/models.py)
for obj in sorted(
list(kitchen_list.values()),
key=lambda x: x.lastname + x.firstname):
qty = obj.meal_qty
for j in range(1, qty + 1):
sheet.add_label((obj, j, qty))
if sheet.label_count > 0:
sheet.save(MEAL_LABELS_FILE)
print("SousChef Printed {} meal label(s) on {} page(s)"
" into file {}".format(
sheet.label_count, sheet.page_count, MEAL_LABELS_FILE))
return sheet.label_count
# END Kitchen count report view, helper classes and functions
# Delivery route sheet view, helper classes and functions
class MealLabels(generic.View):
def get(self, request, **kwargs):
try:
f = open(MEAL_LABELS_FILE, "rb")
except:
raise Http404("File " + MEAL_LABELS_FILE + " does not exist")
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = \
'attachment; filename="labels{}.pdf"'. \
format(datetime.date.today().strftime("%Y%m%d"))
response.write(f.read())
f.close()
return response
class DeliveryRouteSheet(generic.View):
def get(self, request, **kwargs):
# Display today's delivery sheet for given route
route_id = int(kwargs['id'])
date = datetime.date.today()
route = Route.objects.get(id=route_id)
route_client_ids = route.get_client_sequence(date)
# print("delivery route sheet", "route_client_ids", route_client_ids)
route_list = Order.get_delivery_list(date, route_id)
route_list = sort_sequence_ids(route_list, route_client_ids)
# TODO sort route_list using sequence from leaflet
summary_lines, detail_lines = drs_make_lines(route_list, date)
return render(request, 'route_sheet.html',
{'route': route,
'summary_lines': summary_lines,
'detail_lines': detail_lines})
RouteSummaryLine = \
collections.namedtuple(
'RouteSummaryLine',
['component_group',
'rqty',
'lqty'])
def drs_make_lines(route_list, date):
# generate all the lines for the delivery route sheet
summary_lines = {}
for k, item in route_list.items():
# print("\nitem = ", item)
for delivery_item in item.delivery_items:
component_group = delivery_item.component_group
if component_group:
line = summary_lines.setdefault(
component_group,
RouteSummaryLine(
component_group,
rqty=0,
lqty=0))
# print("\nline", line)
if (component_group == COMPONENT_GROUP_CHOICES_MAIN_DISH and
delivery_item.size == SIZE_CHOICES_LARGE):
summary_lines[component_group] = \
line._replace(lqty=line.lqty +
delivery_item.total_quantity)
elif component_group != '':
summary_lines[component_group] = \
line._replace(rqty=line.rqty +
delivery_item.total_quantity)
# END IF
# END IF
# END FOR
# END FOR
# print("values before sort", summary_lines.values())
summary_lines_sorted = sorted(
summary_lines.values(),
key=component_group_sorting)
# print("values after sort", summary_lines_sorted)
return summary_lines_sorted, list(route_list.values())
def sort_sequence_ids(dic, seq):
# sort items in dictionary according to sequence of keys
# dic : dictionary for which some keys are not items in sequence
# seq : list of keys that may not all be entries in dic
# build an ordered dictionary from seq skipping keys not in dic
od = collections.OrderedDict()
if seq:
for k in seq:
if dic.get(k):
od[k] = None
# place all values in dic into ordered dict;
# keys not in seq will be at the end.
for k, val in dic.items():
od[k] = val
# print("sort_sequence_ids",
# "dic.items()", dic.items(),
# "seq", seq,
# "od.items()", od.items())
return od
# END Delivery route sheet view, helper classes and functions
def dailyOrders(request):
data = []
route_id = request.GET.get('route')
# Load all orders for the day
orders = Order.objects.get_orders_for_date()
for order in orders:
if order.client.route is not None:
if order.client.route.id == int(route_id):
waypoint = {
'id': order.client.member.id,
'latitude': order.client.member.address.latitude,
'longitude': order.client.member.address.longitude,
'distance': order.client.member.address.distance,
'member': "{} {}".format(
order.client.member.firstname,
order.client.member.lastname),
'address': order.client.member.address.street
}
# print("waypoint=", waypoint)
data.append(waypoint)
# Since the
# https://www.mapbox.com/api-documentation/#retrieve-a-duration-matrix
# endpoint is not yet available, we solve an approximation of the
# problem by assuming the world is flat and has no obstacles (2D
# Euclidean plane). This should still give good results.
node_to_waypoint = {}
nodes = [tsp.Node(None, 45.516564, -73.575145)] # Santropol
for waypoint in data:
node = tsp.Node(waypoint['id'], float(waypoint['latitude']),
float(waypoint['longitude']))
node_to_waypoint[node] = waypoint
nodes.append(node)
nodes = tsp.solve(nodes)
data = []
for node in nodes:
# Guard against Santropol which is not in node_to_waypoint
if node in node_to_waypoint:
data.append(node_to_waypoint[node])
waypoints = {'waypoints': data}
return JsonResponse(waypoints, safe=False)
@csrf_exempt
def saveRoute(request):
# print("saveRoute1", "request", request, "request.body=", request.body)
data = json.loads(request.body.decode('utf-8'))
# print("saveRoute2", "data=", data)
member_ids = [member['id'] for member in data['members']]
route_id = data['route'][0]['id']
route_client_ids = \
[Client.objects.get(member__id=member_id).id
for member_id in member_ids]
# print("saveRoute3", "route_id=", route_id,
# "route_client_ids=", route_client_ids)
route = Route.objects.get(id=route_id)
route.set_client_sequence(datetime.date.today(), route_client_ids)
route.save()
# To do print roadmap according the list of members received
return JsonResponse('OK', safe=False)
def refreshOrders(request):
creation_date = date.today()
delivery_date = date.today()
last_refresh_date = datetime.datetime.now()
clients = Client.active.all()
Order.create_orders_on_defaults(creation_date, delivery_date, clients)
LogEntry.objects.log_action(
user_id=1, content_type_id=1,
object_id="", object_repr="Generation of order for " + str(
datetime.datetime.now().strftime('%Y-%m-%d %H:%M')),
action_flag=ADDITION,
)
return HttpResponseRedirect(reverse_lazy("delivery:order"))
| agpl-3.0 | -7,782,200,366,459,179,000 | 37.184252 | 79 | 0.577226 | false | 3.95611 | false | false | false |
eroicaleo/LearningPython | Algorithm/DepthFirstDirectedPaths.py | 1 | 1499 | #!/usr/bin/env python3
from Digraph import Digraph
class DepthFirstDirectedPaths:
def __init__(self, G, s):
self.marked = [0]*G.V
self.edgeTo = [0]*G.V
self.s = s
self.validateVertex(s)
self.dfs(G, s)
def dfs(self, G, v):
self.marked[v] = 1
for w in G.getAdj(v):
if not self.marked[w]:
self.edgeTo[w] = v
self.dfs(G, w)
def validateVertex(self, v):
V = len(self.marked)
assert 0 <= v < V, f'vertex {v} is not between 0 and {V-1}'
def hasPathTo(self, v):
self.validateVertex(v)
return self.marked[v]
def pathTo(self, v):
self.validateVertex(v)
if not self.hasPathTo(v):
return None
path = [v]
while self.edgeTo[v] != s:
w = self.edgeTo[v]
path, v = path+[w], w
path.append(s)
return path[::-1]
if __name__ == '__main__':
# The test case can be downloaded from here
# https://algs4.cs.princeton.edu/42digraph/tinyDG.txt
# https://algs4.cs.princeton.edu/42digraph/mediumDG.txt
# https://algs4.cs.princeton.edu/42digraph/largeDG.txt
import sys
G = Digraph(sys.argv[1])
print(G)
s = int(sys.argv[2])
dfs = DepthFirstDirectedPaths(G, s)
for v in range(G.V):
if dfs.hasPathTo(v):
print(f'{s} to {v}: {"-".join(map(str, dfs.pathTo(v)))}')
else:
print(f'{s} to {v}: not connected')
| mit | 6,723,031,972,839,636,000 | 25.767857 | 69 | 0.531688 | false | 3.034413 | false | false | false |
wxwilcke/MINOS | directives/pakbonLD_B3.py | 1 | 7661 | #!/usr/bin/python3
import logging
from operator import itemgetter
from timeit import default_timer as timer
import rdflib
from .abstract_instruction_set import AbstractInstructionSet
from readers import rdf
from writers import rule_set, pickler
from samplers import by_definition as sampler
from algorithms.semantic_rule_learning import generate_semantic_association_rules,\
generate_semantic_item_sets,\
generate_common_behaviour_sets,\
support_of,\
confidence_of
class PakbonLD(AbstractInstructionSet):
def __init__(self, time=""):
self.time = time
self.logger = logging.getLogger(__name__)
def print_header(self):
header = "PAKBON: Context ('Sporen') with 12 attributes"
print(header)
print('-' * len(header))
def load_dataset(self, abox, tbox):
"""
# pakbonLD SPARQL endpoint
endpoint = "http://pakbon-ld.spider.d2s.labs.vu.nl/sparql/"
# query
query_string = "" "
prefix pbont: <http://pakbon-ld.spider.d2s.labs.vu.nl/ont/>
prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT DISTINCT ?s ?p ?o
WHERE {
?s a pbont:SIKB0102S_Vondstcontext;
?p ?o.
FILTER (?p != rdf:type)
} LIMIT 1000"" "
# perform query and return a KnowledgeGraph instance
kg_i = rdf.query(query_string, endpoint)
"""
# read graphs
kg_i = rdf.read(local_path=abox)
kg_s = rdf.read(local_path=tbox)
# sample by pattern
pattern = (None,
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_grondspoortype"),
None)
# define context
# spoor with vulling
context = [rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_grondspoortype"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P53i_is_former_or_current_location_of"),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P89_falls_within"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_contexttype")),
(rdflib.URIRef("http://purl.org/crmeh#EHP3i"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_kleur")),
(rdflib.URIRef("http://purl.org/crmeh#EHP3i"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_textuur")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P53i_is_former_or_current_location_of"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_structuurtype")),
(rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_diepte"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P40_observed_dimension"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P90_has_value")),
(rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_diepte"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P40_observed_dimension"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P91_has_unit")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P140i_was_attributed_by"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P141_assigned"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_beginperiode")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P140i_was_attributed_by"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P141_assigned"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_eindperiode")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P53i_is_former_or_current_location_of"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P140i_was_attributed_by"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P141_assigned"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_beginperiode")),
(rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P53i_is_former_or_current_location_of"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P140i_was_attributed_by"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P141_assigned"),
rdflib.URIRef("http://pakbon-ld.spider.d2s.labs.vu.nl/ont/SIKB0102S_eindperiode"))]
kg_i_sampled = kg_i.sample(sampler, patterns=[pattern], context=context)
return (kg_i_sampled, kg_s)
def run_program(self, dataset, hyperparameters):
self.logger.info("Starting run\nParameters:\n{}".format(
"\n".join(["\t{}: {}".format(k,v) for k,v in hyperparameters.items()])))
kg_i, kg_s = dataset
# fit model
t0 = timer()
# generate semantic item sets from sampled graph
si_sets = generate_semantic_item_sets(kg_i)
# generate common behaviour sets
cbs_sets = generate_common_behaviour_sets(si_sets,
hyperparameters["similarity_threshold"],
hyperparameters["max_cbs_size"])
# generate semantic association rules
rules = generate_semantic_association_rules(kg_i,
kg_s,
cbs_sets,
hyperparameters["minimal_local_support"])
# calculate support and confidence, skip those not meeting minimum requirements
final_rule_set = []
for rule in rules:
support = support_of(kg_i, rule)
confidence = confidence_of(kg_i, rule)
if support >= hyperparameters["minimal_support"] and\
confidence >= hyperparameters["minimal_confidence"]:
final_rule_set.append((rule, support, confidence))
# sorting rules on both support and confidence
final_rule_set.sort(key=itemgetter(2, 1), reverse=True)
# time took
t1 = timer()
dt = t1 - t0
print(" Program completed in {:.3f} ms".format(dt))
print(" Found {} rules".format(len(final_rule_set)))
return final_rule_set
def write_to_file(self, path="./of/latest", output=[]):
overwrite = False
print(" Writing output to {}...".format(path))
rule_set.pretty_write(output, path, overwrite)
pickler.write(output, path+".pickle", overwrite)
def run(self, abox, tbox, output_path):
self.print_header()
print(" {}\n".format(self.time))
hyperparameters = {}
hyperparameters["similarity_threshold"] = .8
hyperparameters["max_cbs_size"] = 4
hyperparameters["minimal_local_support"] = 0.0
hyperparameters["minimal_support"] = 0.0
hyperparameters["minimal_confidence"] = 0.0
print(" Importing Data Sets...")
dataset = self.load_dataset(abox, tbox)
print(" Initiated Pattern Learning...")
output = self.run_program(dataset, hyperparameters)
if len(output) > 0:
self.write_to_file(output_path, output)
| gpl-3.0 | -5,489,121,447,361,127,000 | 45.713415 | 110 | 0.571857 | false | 3.461817 | false | false | false |
thedrow/invoke | invoke/cli.py | 1 | 8712 | from functools import partial
import sys
import textwrap
from .vendor import six
from .context import Context
from .loader import Loader
from .parser import Parser, Context as ParserContext, Argument
from .executor import Executor
from .exceptions import Failure, CollectionNotFound, ParseError
from .util import debug, pty_size
from ._version import __version__
def task_name_to_key(x):
return (x.count('.'), x)
sort_names = partial(sorted, key=task_name_to_key)
indent_num = 2
indent = " " * indent_num
def print_help(tuples):
padding = 3
# Calculate column sizes: don't wrap flag specs, give what's left over
# to the descriptions.
flag_width = max(len(x[0]) for x in tuples)
desc_width = pty_size()[0] - flag_width - indent_num - padding - 1
wrapper = textwrap.TextWrapper(width=desc_width)
for flag_spec, help_str in tuples:
# Wrap descriptions/help text
help_chunks = wrapper.wrap(help_str)
# Print flag spec + padding
flag_padding = flag_width - len(flag_spec)
spec = ''.join((
indent,
flag_spec,
flag_padding * ' ',
padding * ' '
))
# Print help text as needed
if help_chunks:
print(spec + help_chunks[0])
for chunk in help_chunks[1:]:
print((' ' * len(spec)) + chunk)
else:
print(spec)
print('')
def parse_gracefully(parser, argv):
"""
Run ``parser.parse_argv(argv)`` & gracefully handle ``ParseError``.
'Gracefully' meaning to print a useful human-facing error message instead
of a traceback; the program will still exit if an error is raised.
If no error is raised, returns the result of the ``parse_argv`` call.
"""
try:
return parser.parse_argv(argv)
except ParseError as e:
sys.exit(str(e))
def parse(argv, collection=None):
"""
Parse ``argv`` list-of-strings into useful core & per-task structures.
:returns:
Three-tuple of ``args`` (core, non-task `.Argument` objects), ``collection``
(compiled `.Collection` of tasks, using defaults or core arguments
affecting collection generation) and ``tasks`` (a list of
`~.parser.context.Context` objects representing the requested task
executions).
"""
# Initial/core parsing (core options can affect the rest of the parsing)
initial_context = ParserContext(args=(
# TODO: make '--collection' a list-building arg, not a string
Argument(
names=('collection', 'c'),
help="Specify collection name to load. May be given >1 time."
),
Argument(
names=('root', 'r'),
help="Change root directory used for finding task modules."
),
Argument(
names=('help', 'h'),
optional=True,
help="Show core or per-task help and exit."
),
Argument(
names=('version', 'V'),
kind=bool,
default=False,
help="Show version and exit."
),
Argument(
names=('list', 'l'),
kind=bool,
default=False,
help="List available tasks."
),
Argument(
names=('no-dedupe',),
kind=bool,
default=False,
help="Disable task deduplication."
),
Argument(
names=('echo', 'e'),
kind=bool,
default=False,
help="Echo executed commands before running.",
),
Argument(
names=('warn-only', 'w'),
kind=bool,
default=False,
help="Warn, instead of failing, when shell commands fail.",
),
Argument(
names=('pty', 'p'),
kind=bool,
default=False,
help="Use a pty when executing shell commands.",
),
Argument(
names=('hide', 'H'),
help="Set default value of run()'s 'hide' kwarg.",
)
))
# 'core' will result an .unparsed attribute with what was left over.
debug("Parsing initial context (core args)")
parser = Parser(initial=initial_context, ignore_unknown=True)
core = parse_gracefully(parser, argv)
debug("After core-args pass, leftover argv: %r" % (core.unparsed,))
args = core[0].args
# Print version & exit if necessary
if args.version.value:
print("Invoke %s" % __version__)
sys.exit(0)
# Core (no value given) --help output
# TODO: if this wants to display context sensitive help (e.g. a combo help
# and available tasks listing; or core flags modified by plugins/task
# modules) it will have to move farther down.
if args.help.value == True:
print("Usage: inv[oke] [--core-opts] task1 [--task1-opts] ... taskN [--taskN-opts]")
print("")
print("Core options:")
print_help(initial_context.help_tuples())
sys.exit(0)
# Load collection (default or specified) and parse leftovers
# (Skip loading if somebody gave us an explicit task collection.)
if not collection:
debug("No collection given, loading from %r" % args.root.value)
loader = Loader(root=args.root.value)
collection = loader.load_collection(args.collection.value)
parser = Parser(contexts=collection.to_contexts())
debug("Parsing actual tasks against collection %r" % collection)
tasks = parse_gracefully(parser, core.unparsed)
# Per-task help. Use the parser's contexts dict as that's the easiest way
# to obtain Context objects here - which are what help output needs.
name = args.help.value
if name in parser.contexts:
# Setup
ctx = parser.contexts[name]
tuples = ctx.help_tuples()
docstring = collection[name].__doc__
header = "Usage: inv[oke] [--core-opts] %s %%s[other tasks here ...]" % name
print(header % ("[--options] " if tuples else ""))
print("")
print("Docstring:")
if docstring:
# Really wish textwrap worked better for this.
doclines = docstring.lstrip().splitlines()
for line in doclines:
print(indent + textwrap.dedent(line))
# Print trailing blank line if docstring didn't end with one
if textwrap.dedent(doclines[-1]):
print("")
else:
print(indent + "none")
print("")
print("Options:")
if tuples:
print_help(tuples)
else:
print(indent + "none")
print("")
sys.exit(0)
# Print discovered tasks if necessary
if args.list.value:
print("Available tasks:\n")
# Sort in depth, then alpha, order
task_names = collection.task_names
names = sort_names(task_names.keys())
for primary in names:
aliases = sort_names(task_names[primary])
out = primary
if aliases:
out += " (%s)" % ', '.join(aliases)
print(" %s" % out)
print("")
sys.exit(0)
# Return to caller so they can handle the results
return args, collection, tasks
def derive_opts(args):
run = {}
if args['warn-only'].value:
run['warn'] = True
if args.pty.value:
run['pty'] = True
if args.hide.value:
run['hide'] = args.hide.value
if args.echo.value:
run['echo'] = True
return {'run': run}
def dispatch(argv):
args, collection, tasks = parse(argv)
results = []
executor = Executor(collection, Context(**derive_opts(args)))
# Take action based on 'core' options and the 'tasks' found
for context in tasks:
kwargs = {}
for _, arg in six.iteritems(context.args):
# Use the arg obj's internal name - not what was necessarily given
# on the CLI. (E.g. --my-option vs --my_option for
# mytask(my_option=xxx) requires this.)
# TODO: store 'given' name somewhere in case somebody wants to see
# it when handling args.
kwargs[arg.name] = arg.value
try:
# TODO: allow swapping out of Executor subclasses based on core
# config options
results.append(executor.execute(
name=context.name,
kwargs=kwargs,
dedupe=not args['no-dedupe']
))
except Failure as f:
sys.exit(f.result.exited)
return results
def main():
# Parse command line
argv = sys.argv[1:]
debug("Base argv from sys: %r" % (argv,))
dispatch(argv)
| bsd-2-clause | -5,722,951,377,510,803,000 | 32.125475 | 92 | 0.572429 | false | 4.186449 | false | false | false |
xzturn/caffe2 | caffe2/python/operator_test/pack_ops_test.py | 1 | 8544 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
from hypothesis import strategies as st
import numpy as np
import time
class TestTensorPackOps(hu.HypothesisTestCase):
def pack_segments_ref(self, return_presence_mask=False):
def pack_segments_ref(lengths, data):
arr = []
constant_values = 0
if data.dtype.char == 'S':
constant_values = ''
for idx in range(np.size(lengths)):
chunk = data[np.sum(lengths[:idx]):np.sum(lengths[:idx + 1])]
pad_length = np.max(lengths) - lengths[idx]
# ((0, pad_length), (0, 0)) says add pad_length rows of padding
# below chunk and 0 rows of padding elsewhere
arr.append(
np.pad(
chunk, ((0, pad_length), (0, 0)),
mode=str("constant"),
constant_values=constant_values
)
)
result = [arr]
if return_presence_mask:
presence_arr = []
for length in lengths:
pad_length = np.max(lengths) - length
presence_arr.append(
np.pad(
np.ones((length), dtype=np.bool), ((0, pad_length)),
mode=str("constant")
)
)
result.append(presence_arr)
return result
return pack_segments_ref
@given(
num_seq=st.integers(10, 500),
cell_size=st.integers(1, 10),
**hu.gcs
)
def test_pack_ops(self, num_seq, cell_size, gc, dc):
# create data
lengths = np.arange(num_seq, dtype=np.int32) + 1
num_cell = np.sum(lengths)
data = np.zeros(num_cell * cell_size, dtype=np.float32)
left = np.cumsum(np.arange(num_seq) * cell_size)
right = np.cumsum(lengths * cell_size)
for i in range(num_seq):
data[left[i]:right[i]] = i + 1.0
data.resize(num_cell, cell_size)
print("\nnum seq:{}, num cell: {}, cell size:{}\n".format(
num_seq, num_cell, cell_size)
+ "=" * 60
)
# run test
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'])
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
start = time.time()
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[lengths, data],
reference=self.pack_segments_ref(),
)
end = time.time()
print("{} used time: {}".format(gc, end - start).replace('\n', ' '))
with core.DeviceScope(gc):
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
workspace.RunOperatorOnce(core.CreateOperator(
'PackSegments',
['l', 'd'],
['t'],
device_option=gc))
workspace.RunOperatorOnce(core.CreateOperator(
'UnpackSegments',
['l', 't'],
['newd'],
device_option=gc))
assert((workspace.FetchBlob('newd') == workspace.FetchBlob('d')).all())
@given(
**hu.gcs_cpu_only
)
def test_pack_ops_str(self, gc, dc):
# GPU does not support string. Test CPU implementation only.
workspace.FeedBlob('l', np.array([1, 2, 3], dtype=np.int64))
strs = np.array([
["a", "a"],
["b", "b"],
["bb", "bb"],
["c", "c"],
["cc", "cc"],
["ccc", "ccc"]],
dtype='|S')
workspace.FeedBlob('d', strs)
workspace.RunOperatorOnce(core.CreateOperator(
'PackSegments',
['l', 'd'],
['t'],
device_option=gc))
workspace.RunOperatorOnce(core.CreateOperator(
'UnpackSegments',
['l', 't'],
['newd'],
device_option=gc))
assert((workspace.FetchBlob('newd') == workspace.FetchBlob('d')).all())
def test_pad_minf(self):
workspace.FeedBlob('l', np.array([1, 2, 3], dtype=np.int32))
workspace.FeedBlob(
'd',
np.array([
[1.0, 1.1],
[2.0, 2.1],
[2.2, 2.2],
[3.0, 3.1],
[3.2, 3.3],
[3.4, 3.5]],
dtype=np.float32))
workspace.RunOperatorOnce(core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'], pad_minf=True))
workspace.RunOperatorOnce(core.CreateOperator(
'Exp', ['t'], ['r']
))
result = workspace.FetchBlob('t')
assert(result[0, -1, 0] < -1000.0)
# The whole point of padding with -inf is that when we exponentiate it
# then it should be zero.
exponentiated = workspace.FetchBlob('r')
assert(exponentiated[0, -1, 0] == 0.0)
@given(**hu.gcs_cpu_only)
def test_presence_mask(self, gc, dc):
lengths = np.array([1, 2, 3], dtype=np.int32)
data = np.array(
[
[1.0, 1.0], [2.0, 2.0], [2.0, 2.0], [3.0, 3.0], [3.0, 3.0],
[3.0, 3.0]
],
dtype=np.float32
)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t', 'p'], return_presence_mask=True
)
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
inputs = [lengths, data]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=self.pack_segments_ref(return_presence_mask=True),
)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t', 'p'], return_presence_mask=True
)
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('t')
expected_output_shape = (3, 3, 2)
self.assertEquals(output.shape, expected_output_shape)
presence_mask = workspace.FetchBlob('p')
expected_presence_mask = np.array(
[[True, False, False], [True, True, False], [True, True, True]],
dtype=np.bool
)
self.assertEqual(presence_mask.shape, expected_presence_mask.shape)
np.testing.assert_array_equal(presence_mask, expected_presence_mask)
def test_presence_mask_empty(self):
lengths = np.array([], dtype=np.int32)
data = np.array([], dtype=np.float32)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t', 'p'], return_presence_mask=True
)
workspace.FeedBlob('l', lengths)
workspace.FeedBlob('d', data)
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('p')
expected_output_shape = (0, 0)
self.assertEquals(output.shape, expected_output_shape)
@given(**hu.gcs_cpu_only)
def test_out_of_bounds(self, gc, dc):
# Copy pasted from test_pack_ops but with 3 changed to 4
lengths = np.array([1, 2, 4], dtype=np.int32)
data = np.array([
[1.0, 1.0],
[2.0, 2.0],
[2.0, 2.0],
[3.0, 3.0],
[3.0, 3.0],
[3.0, 3.0]], dtype=np.float32)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'])
inputs = [lengths, data]
self.assertRunOpRaises(
device_option=gc,
op=op,
inputs=inputs,
exception=RuntimeError
)
@given(**hu.gcs_cpu_only)
def test_under_bounds(self, gc, dc):
# Copy pasted from test_pack_ops but with 3 changed to 2
lengths = np.array([1, 2, 2], dtype=np.int32)
data = np.array([
[1.0, 1.0],
[2.0, 2.0],
[2.0, 2.0],
[3.0, 3.0],
[3.0, 3.0],
[3.0, 3.0]], dtype=np.float32)
op = core.CreateOperator(
'PackSegments', ['l', 'd'], ['t'])
inputs = [lengths, data]
self.assertRunOpRaises(
device_option=gc,
op=op,
inputs=inputs,
exception=RuntimeError
)
if __name__ == "__main__":
import unittest
unittest.main()
| apache-2.0 | 273,140,610,715,093,660 | 32.245136 | 80 | 0.494733 | false | 3.700303 | true | false | false |
Thump/peuchre | euchreplayer.py | 1 | 64890 | # This class implements an interface for a single player to the euchred
# server. This class expects to be used as a base class for an actual
# player class that can be used with the peuchre program. The real
# player class is expected to be called Player, it needs to sub-class
# EuchrePlayer, and needs to implement the following methods:
#
# - decideOrderPass()
# - decideCallPass()
# - decideDrop()
# - decideDefend()
# - decidePlayLead()
# - decidePlayFollow()
import socket
import struct
import logging
import sys
import random
import string
import select
from logging import warning as warn, log, debug, info, error, critical
from card import Card
class EuchrePlayer:
# this is the dict that maps message ID to message name: we also generate
# a reverse mapping at the end
messageId = {
# sent by the client after connection, as well as the server's replies
'JOIN' : 123401 ,
'JOINDENY' : 123402 ,
'JOINACCEPT' : 123403 ,
# sent by the server to connected clients when the server is quitting */
'SERVERQUIT' : 123404 ,
# sent by the client to the server when the client is quitting */
'CLIENTQUIT' : 123405 ,
# sent if the server is full when the client tries to connect */
'DECLINE' : 123406 ,
# sent by the server when the client is about to be terminated */
'KICK' : 123407 ,
# the ID messages, request from client, responses from server */
'ID' : 123408 ,
'IDACCEPT' : 123409 ,
'IDDENY' : 123410 ,
# sent by the client when sending in a chat message, sent by the server
# when broadcasting the chat message
'CHAT' : 123411 ,
# sent by server to clients after game state change: provides all info
# needed by client to enter or resume game
'STATE' : 123412 ,
# sent as a request when the creator wants to kick another player */
'KICKPLAYER' : 123413 ,
'KICKDENY' : 123414 ,
# sent by a client setting options */
'OPTIONS' : 123415 ,
'OPTIONSDENY' : 123416 ,
# sent by the creator to start the game */
'START' : 123417 ,
'STARTDENY' : 123418 ,
# sent by the creator to end or reset the game and sent by the server
# to tell the clients the game is ending */
'END' : 123419 ,
'ENDDENY' : 123420 ,
# sent by client as responses to an order offer */
'ORDER' : 123421 ,
'ORDERALONE' : 123422 ,
'ORDERPASS' : 123423 ,
'ORDERDENY' : 123424 ,
# sent by client to indicate dropped card, and the deny message */
'DROP' : 123425 ,
'DROPDENY' : 123426 ,
# sent by client as responses to a call offer */
'CALL' : 123427 ,
'CALLALONE' : 123428 ,
'CALLPASS' : 123429 ,
'CALLDENY' : 123430 ,
# sent by client as responses to a defend offer */
'DEFEND' : 123431 ,
'DEFENDPASS' : 123432 ,
'DEFENDDENY' : 123433 ,
# sent by client as responses to a play offer */
'PLAY' : 123434 ,
'PLAYDENY' : 123435 ,
# flag messages sent by server */
'TRICKOVER' : 123436 ,
'HANDOVER' : 123437 ,
'GAMEOVER' : 123438 ,
'PLAYOFFER' : 123439 ,
'DEFENDOFFER' : 123440 ,
'CALLOFFER' : 123441 ,
'ORDEROFFER' : 123442 ,
'DROPOFFER' : 123443 ,
'DEAL' : 123444 ,
# these are the trailing bytes, to indicate the end of a message
'TAIL1' : 250 ,
'TAIL2' : 222 ,
}
# now generate the reverse mapping: thanks stack overflow!
#messageName = {v: k for k,v in messageId.items()}
messageName = {}
for k, v in messageId.items():
messageName[v] = k
###########################################################################
#
def __init__(self, **kwargs):
self.server = "0.0.0.0"
self.port = -1
self.playerhandle = -1
self.gamehandle = -1
self.team = -1
# this tracks the data from the most recent state information
self.state = {}
self.state[0] = {}
self.state[1] = {}
self.state[2] = {}
self.state[3] = {}
self.state['state'] = 0
# initialize scores and tricks to 0
self.state['usscore'] = 0
self.state['themscore'] = 0
self.state['ustricks'] = 0
self.state['themtricks'] = 0
# init orderer to -1
self.state['orderer'] = -1
# randomize that name!
self.name = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(10))
# override the defaults if we were passed relevant arguments
if 'server' in kwargs:
self.server = kwargs['server']
if 'port' in kwargs:
self.port = kwargs['port']
if 'name' in kwargs:
self.name = kwargs['name']
# if we were passed a record object, save it
if 'record' in kwargs:
self.record = kwargs['record']
# we use this to ID hands in the log
self.gcount = 0
if 'gcount' in kwargs:
self.gcount = kwargs['gcount']
self.hcount = 0
self.tcount = 0
self.setId()
if 'lock' in kwargs:
self.lock = kwargs['lock']
###########################################################################
# This is a utility function to set the self.id string: it uses the
# game, hand, and trick count variables to compose a self.id string
# which is included in every log message, to make the log files easier
# to parse.
#
def setId(self):
self.id = \
"%s g%dh%dt%d : " % (self.name,self.gcount,self.hcount,self.tcount)
###########################################################################
# prints the score
#
def printScore(self):
info(self.id+"score us:%d them:%d" %
(self.state['usscore'],self.state['themscore']) )
###########################################################################
# print out the detailed state information
#
def status(self):
info("")
info(self.id+"My Status")
info(self.id+" server: " + self.server)
info(self.id+" port : " + str(self.port))
info("")
info(self.id+" Name : " + str(self.name))
info(self.id+" Player: " + str(self.playerhandle))
info(self.id+" Team : " + str(self.team))
info(self.id+" Game : " + str(self.gamehandle))
# just the game stuff
self.gameStatus()
# if we've got a hand state information, we should have all the
# player information, so print that
if 'hstate' in self.state:
self.playerStatus()
###########################################################################
# print out the game state information
#
def gameStatus(self):
info("")
info(self.id+"Game Status:")
info(self.id+" Score : %d vs %d"
% (self.state['usscore'],self.state['themscore']))
info(self.id+" Tricks: %d vs %d"
% (self.state['ustricks'],self.state['themtricks']))
info(self.id+" Game Started: %d" % (self.state['ingame']))
info(self.id+" Hand Status : %d" % (self.state['hstate']))
info(self.id+" options:")
info(self.id+" Can Defend Alone: %d"
% (self.state['defend']))
info(self.id+" Must Go Alone on Order: %d"
% (self.state['aloneonorder']))
info(self.id+" Screw the Dealer: %d"
% (self.state['screw']))
info(self.id+" Number of cards: %d (%s)"
% (self.state['numcards'], self.printHand(self.hand)) )
info(self.id+" Trump is Set: %d" % (self.state['trumpset']))
if not self.state['holein']:
info(self.id+" Hole Card: not dealt")
else:
info(self.id+" Hole Card: " + self.state['hole'])
###########################################################################
# print out all the player state info
#
def playerStatus(self):
for i in (0,1,2,3):
# skip this player if their state isn't joined
if self.state[i]['state'] != 2:
continue
# otherwise print all the info
info("")
info(self.id+"Player %d:" % (i))
info(self.id+" Name: %s" % (self.state[i]['name']))
info(self.id+" Team: %d" % (self.state[i]['team']))
info(self.id+" Dealer: %d" % (self.state[i]['dealer']))
info(self.id+" Ordered: %d" % (self.state[i]['ordered']))
info(self.id+" Passed: %d" % (self.state[i]['passed']))
info(self.id+" Made It: %d" % (self.state[i]['maker']))
info(self.id+" Alone: %d" % (self.state[i]['alone']))
info(self.id+" Lead: %d" % (self.state[i]['leader']))
info(self.id+" Creator: %d" % (self.state[i]['creator']))
info(self.id+" Offers:")
info(self.id+" Drop: %d" % (self.state[i]['dropoffer']))
info(self.id+" Order: %d"
% (self.state[i]['orderoffer']))
info(self.id+" Call: %d" % (self.state[i]['calloffer']))
info(self.id+" Play: %d" % (self.state[i]['playoffer']))
info(self.id+" Defend: %d"
% (self.state[i]['defendoffer']))
# if the player has a card in play, show it
if self.state[i]['cardinplay']:
info(self.id+" Card Played: " + self.state[i]['card'])
else:
info(self.id+" Card Played: none")
###########################################################################
# this routine will connect to the game server
#
def sendJoin(self):
# create the socket for connection to the server: we'll need this
# for use in the rest of the object
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.server,self.port))
except ConnectionRefusedError:
return False
# get the length of the name and use that length in the format strign
namelen = len(self.name)
format = "!iiii" + str(namelen) + "sBB"
size = struct.calcsize(format)
# reduce the size by 4, to leave out the space needed for the
# leading size value
size = size - 4
# now generate a packed array of bytes for the message using that
# format string
message = struct.pack(format,
size,
self.messageId['JOIN'],
1,
len(self.name),
str.encode(self.name),
self.messageId['TAIL1'],
self.messageId['TAIL2'],
)
#self.printMessage(message)
self.s.send(message)
# set up a select with this socket
inputs = [ self.s ]
# wait for a message to come in
readable, writable, exceptional = select.select(inputs, [], inputs)
# we read single int from the socket: this should represent the
# length of the entire message
(size,) = struct.unpack("!i",self.s.recv(4))
# read the specified number of bytes from the socket
bytes = self.s.recv(size)
#info(self.id+"len of bytes is " + str(len(bytes)))
# decode the message identifier
(id,) = struct.unpack_from("!i",bytes)
#info(self.id+"message is: %s (%d)" % (self.messageName[id],id))
# now we mung out a case switch on the message identifier
if ( id == self.messageId['JOINACCEPT'] ):
info(self.id+"join successful")
return self.parseJoinAccept(bytes)
elif ( id == self.messageId['JOINDENY'] ):
return self.parseJoinDeny(bytes)
elif ( id == self.messageId['DECLINE'] ):
return self.parseDecline(bytes)
else:
info(self.id+"unknown join response: %s (%d)" %
(self.messageName[id],id))
return self.badMessage(bytes)
###########################################################################
# this routine will send the start message to the game server
#
def sendStart(self):
# a start message looks like this:
# <msg> : <msglen> <START> <gh> <ph> <tail>
# prep the format string
format = "!iiiiBB"
size = struct.calcsize(format)
# reduce the size by 4, to leave out the space needed for the
# leading size value
size = size - 4
# now generate a packed array of bytes for the message using that
# format string
message = struct.pack(format,
size,
self.messageId['START'],
self.gamehandle,
self.playerhandle,
self.messageId['TAIL1'],
self.messageId['TAIL2'],
)
#self.printMessage(message)
self.s.send(message)
###########################################################################
# this routine will send an order, order alone, or order pass message,
# based on what the player sub-class implementation of decideOrderPass()
# returns
#
def sendOrderPass(self):
# possible messages look like this:
# <msg> : <msglen> <ORDER> <gh> <ph> <tail>
# <msg> : <msglen> <ORDERALONE> <gh> <ph> <tail>
# <msg> : <msglen> <ORDERPASS> <gh> <ph> <tail>
# get the message we should send to the server: this should be one
# of ORDER, ORDERALONE, or ORDERPASS
message = self.decideOrderPass()
# prep the format string
format = "!iiiiBB"
size = struct.calcsize(format)
# reduce the size by 4, to leave out the space needed for the
# leading size value
size = size - 4
# now generate a packed array of bytes for the message using that
# format string
message = struct.pack(format,
size,
message,
self.gamehandle,
self.playerhandle,
self.messageId['TAIL1'],
self.messageId['TAIL2'],
)
#self.printMessage(message)
self.s.send(message)
###########################################################################
# this routine will send a call, call alone, or call pass message,
# based on what the player sub-class implementation of decideCallPass()
# returns
#
def sendCallPass(self):
# a call looks like this:
# <msg> : <msglen> <CALL> <gh> <ph> <suit> <tail>
# <msg> : <msglen> <CALLALONE> <gh> <ph> <suit> <tail>
# a call pass looks like this:
# <msg> : <msglen> <CALLPASS> <gh> <ph> <tail>
# get the message we should send to the server: this should be one
# of CALL, CALLALONE, or CALLPASS, and a suit (which will be
# None if the return is a CALLPASS
result = self.decideCallPass()
op = result['op']
suit = result['suit']
# now generate a packed array of bytes for the message using that
# format string, depending on the message we're supposed to return
if op == self.messageId['CALL'] \
or op == self.messageId['CALLALONE']:
# prep the format string
format = "!iiiiiBB"
size = struct.calcsize(format)
# reduce the size by 4, to leave out the space needed for the
# leading size value
size = size - 4
message = struct.pack(format,
size,
op,
self.gamehandle,
self.playerhandle,
suit,
self.messageId['TAIL1'],
self.messageId['TAIL2'],
)
#self.printMessage(message)
self.s.send(message)
# now generate a packed array of bytes for the message using that
# format string, depending on the message we're supposed to return
if op == self.messageId['CALLPASS']:
format = "!iiiiBB"
size = struct.calcsize(format)
# reduce the size by 4, to leave out the space needed for the
# leading size value
size = size - 4
message = struct.pack(format,
size,
op,
self.gamehandle,
self.playerhandle,
self.messageId['TAIL1'],
self.messageId['TAIL2'],
)
#self.printMessage(message)
self.s.send(message)
###########################################################################
# this routine will randomly drop a card, in response to a drop offer
#
def sendDrop(self):
# a start message looks like this:
# <msg> : <msglen> <DROP> <gh> <ph> <card> <tail>
# call decideDrop() which should return a card to drop
card = self.decideDrop(self.state['hole'])
# prep the format string
format = "!iiiiiiBB"
size = struct.calcsize(format)
# reduce the size by 4, to leave out the space needed for the
# leading size value
size = size - 4
# now generate a packed array of bytes for the message using that
# format string
message = struct.pack(format,
size,
self.messageId['DROP'],
self.gamehandle,
self.playerhandle,
card.value,
card.suit,
self.messageId['TAIL1'],
self.messageId['TAIL2'],
)
#self.printMessage(message)
self.s.send(message)
###########################################################################
# this routine will always decline a defend offer
#
def sendDefend(self):
# a start message looks like this:
# <msg> : <msglen> <DEFEND> <gh> <ph> <card> <tail>
# call the decideDefend() routine to determine if we should
# defend alone or not
message = self.decideDefend()
# prep the format string
format = "!iiiiBB"
size = struct.calcsize(format)
# reduce the size by 4, to leave out the space needed for the
# leading size value
size = size - 4
# now generate a packed array of bytes for the message using that
# format string
message = struct.pack(format,
size,
message,
self.gamehandle,
self.playerhandle,
self.messageId['TAIL1'],
self.messageId['TAIL2'],
)
#self.printMessage(message)
self.s.send(message)
###########################################################################
# this routine will play a card
#
def sendPlay(self):
# a start message looks like this:
# <msg> : <msglen> <PLAY> <gh> <ph> <card> <tail>
# are we leading?
me = self.playerhandle
leader = self.state[me]['leader']
# if we're the leader, we can play anything
if leader:
self.sendPlayLead()
else:
self.sendPlayFollow()
###########################################################################
# This plays a card to lead a new trick, for the moment it will play
# anything
#
def sendPlayLead(self):
# call decidePlayLead() to determine what card we should play as
# a lead
card = self.decidePlayLead()
# remove the card from our hand
self.removeCard(card)
# prep the format string
format = "!iiiiiiBB"
size = struct.calcsize(format)
# reduce the size by 4, to leave out the space needed for the
# leading size value
size = size - 4
# now generate a packed array of bytes for the message using that
# format string
message = struct.pack(format,
size,
self.messageId['PLAY'],
self.gamehandle,
self.playerhandle,
card.value,
card.suit,
self.messageId['TAIL1'],
self.messageId['TAIL2'],
)
#info(self.id+"sending PLAY")
#self.printMessage(message)
self.s.send(message)
###########################################################################
# This plays a card to follow in a new trick, for the moment it will
# play a random (valid) card
#
def sendPlayFollow(self):
# call decidePlayFollow() to determine the card we should follow
# with: this assumes that the returned card is valid
card = self.decidePlayFollow()
# remove the card from our hand
self.removeCard(card)
# prep the format string
format = "!iiiiiiBB"
size = struct.calcsize(format)
# reduce the size by 4, to leave out the space needed for the
# leading size value
size = size - 4
# now generate a packed array of bytes for the message using that
# format string
message = struct.pack(format,
size,
self.messageId['PLAY'],
self.gamehandle,
self.playerhandle,
card.value,
card.suit,
self.messageId['TAIL1'],
self.messageId['TAIL2'],
)
#info(self.id+"sending PLAY")
#self.printMessage(message)
self.s.send(message)
###########################################################################
# this reads a message from the server socket, and processes it
#
def parseMessage(self):
# we read single int from the socket: this should represent the
# length of the entire message
(size,) = struct.unpack("!i",self.s.recv(4))
# read the specified number of bytes from the socket
bytes = self.s.recv(size)
#info(self.id+"len of bytes is " + str(len(bytes)))
# decode the message identifier
(id,) = struct.unpack_from("!i",bytes)
#info(self.id+"message is: %s (%d)" % (self.messageName[id],id))
# now we mung out a case switch on the message identifier
if ( id == self.messageId['JOINACCEPT'] ):
return self.parseJoinAccept(bytes)
elif ( id == self.messageId['JOINDENY'] ):
return self.parseJoinDeny(bytes)
elif ( id == self.messageId['CHAT'] ):
return self.parseChat(bytes)
elif ( id == self.messageId['STATE'] ):
return self.parseState(bytes)
elif ( id == self.messageId['DEAL'] ):
return self.parseDeal(bytes)
elif ( id == self.messageId['STARTDENY'] ):
return self.parseStartDeny(bytes)
elif ( id == self.messageId['ORDEROFFER'] ):
return self.parseOrderOffer(bytes)
elif ( id == self.messageId['ORDERDENY'] ):
return self.parseOrderDeny(bytes)
elif ( id == self.messageId['CALLOFFER'] ):
return self.parseCallOffer(bytes)
elif ( id == self.messageId['CALLDENY'] ):
return self.parseCallDeny(bytes)
elif ( id == self.messageId['DROPOFFER'] ):
return self.parseDropOffer(bytes)
elif ( id == self.messageId['DROPDENY'] ):
return self.parseDropDeny(bytes)
elif ( id == self.messageId['DEFENDOFFER'] ):
return self.parseDefendOffer(bytes)
elif ( id == self.messageId['DEFENDDENY'] ):
return self.parseDefendDeny(bytes)
elif ( id == self.messageId['PLAYOFFER'] ):
return self.parsePlayOffer(bytes)
elif ( id == self.messageId['PLAYDENY'] ):
return self.parsePlayDeny(bytes)
elif ( id == self.messageId['TRICKOVER'] ):
return self.parseTrickOver(bytes)
elif ( id == self.messageId['HANDOVER'] ):
return self.parseHandOver(bytes)
elif ( id == self.messageId['GAMEOVER'] ):
return self.parseGameOver(bytes)
else:
info(self.id+"message is: %s (%d)" % (self.messageName[id],id))
return self.badMessage(bytes)
###########################################################################
# This routine parses a JOINACCEPT message
#
def parseJoinAccept(self, bytes):
#debug(self.id+"parsing JOINACCEPT")
#self.printMessage(bytes)
# the format of a JOINACCEPT message is:
# <msg> : <msglen> <JOINACCEPT> <gh> <ph> <team> <tail>
# where we've already read the msglen bytes
(msg, gh, ph, team, tail1, tail2) = struct.unpack("!iiiiBB",bytes)
# run some sanity checks
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseJoinAccept()")
return False
# ok, otherwise we carry on
self.gamehandle = gh
self.playerhandle = ph
self.team = team
return True
###########################################################################
# This routine parses a JOINDENY message
#
def parseJoinDeny(self, bytes):
#debug(self.id+"parsing JOINDENY")
#self.printMessage(bytes)
# the format of a JOINDENY message is:
# <msg> : <msglen> <JOINDENY> <string> <tail>
# where the string explains why it was denied
message = self.parseString(bytes[4:-2])
info(self.id+"join denied: " + message)
return(False)
###########################################################################
# This routine parses a DECLINE message
#
def parseDecline(self, bytes):
#debug(self.id+"parsing DECLINE")
#self.printMessage(bytes)
# the format of a DECLINE message is:
# <msg> : <msglen> <DECLINE> <string> <tail>
# where the string explains why it was denied
message = self.parseString(bytes[4:-2])
info(self.id+"join declined: " + message)
return(False)
###########################################################################
# This routine parses a CHAT message
#
def parseChat(self, bytes):
#debug(self.id+"parsing CHAT")
#self.printMessage(bytes)
# the format of a CHAT message is:
# <msg> : <msglen> <CHAT> <string> <tail>
# where we've already read the msglen bytes
# since the only content we have is the string, we slice the leading
# <CHAT> (ie. 4 bytes) off the bytes array and pass it to a
# specialized string parser
chat = self.parseString(bytes[4:-2])
# now we peel off the tail and make sure it's sane
(tail1,tail2) = struct.unpack("!BB",bytes[-2:])
# run some sanity checks
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseChat()")
return False
# ok, log the chat
#info(self.id+"" + chat)
return True
###########################################################################
# This routine parses a string component of a message: it expects
# to be passed a bytes array beginning with the string length
#
def parseString(self, bytes):
#debug(self.id+"parsing string")
#self.printMessage(bytes)
# the format of a string is:
# <string> : <textlen> <text>
(len,) = struct.unpack_from("!i",bytes)
#info(self.id+"string len: " + str(len))
# now parse out the text of the string
format = "!"+str(len)+"s"
#info(self.id+"format is "+format)
(chat,) = struct.unpack_from(format,bytes[4:])
#info(self.id+"chat is: " + chat.decode("utf-8"))
return(chat.decode("utf-8"))
###########################################################################
# This routine parses a string component of a message: it expects
# to be passed a bytes array beginning with the string length
#
def parseState(self, bytes):
#info(self.id+"parsing STATE")
#self.printMessage(bytes)
offset = 0
# the format of a state is:
# <msg> : <msglen> <STATE> <statedata> <tail>
# <statedata> : <playersdata> <gamedata> <cards>
# <playersdata> : <p1> <p2> <p3> <p4>
# <pN> : <pstate> <pdata>
# <pstate> : {0|1|2} # unconnected, connected, joined
# <pdata> : if <pstate> == joined
# <ph> <nmstring> <clstring> <hwstring> <osstring>
# <cmtstring> <team> <numcards> <creator> <ordered>
# <dealer> <alone> <defend> <leader> <maker>
# <playoffer> <orderoffer> <dropoffer> <calloffer>
# <defendoffer> <cardinplay> [<card>] <passed>
# else
# <NULL>
# <NULL> : # no data
# <team> : {-1|0|1} # no team, team 0, or team 1
# <creator>|<ordered>|<dealer>|<alone>|<defend>|<leader>|
# <maker>|<playoffer>|<orderoffer>|<dropoffer>|<calloffer>|
# <defendoffer>|<cardinplay>|<passed>
# : <boolean>
# <gamedata> : <ingame> <suspend> <holein> <hole> <trumpset>
# <trump> <tricks> <score> <options>
# <ingame> : <boolean>
# <hstate> : <0|1|2|3|4> # pregame,hole,trump,defend,play
# <suspend> : <boolean>
# <holein> : <boolean> # true if hole card
# <hole> : <card> # only packed if <holein> true
# <card> : <value> <suit>
# <value> : {2|3|4|5|6|7|8|9|10|11|12|13|14}
# <suit> : {0|1|2|3}
# <trumpset> : <boolean> # true if trump set
# <trump> : <suit> # only packed if <trumpset> true
# <tricks> : <tricks0> <tricks1>
# <tricks0> : # tricks for team 0
# <tricks1> : # tricks for team 1
# <score> : <team0> <team1>
# <team0> : # score of team 0
# <team1> : # score of team 1
# <options> : <defend> <aloneonorder> <screw>
# <defend>|<aloneonorder>|<screw> : <boolean>
# <cards> : <numcards> <card1> .. <cardN>
# <cardN> : <value> <suit>
# we pass a slice of the bytes array with the <STATE> removed;
# parseStatePlayer() will return the parsed length, which we'll
# then use to compose further slices to parse the game and cards
offset += self.parseStatePlayer(bytes[4:])
# next we parse the game state, for which we use the offset
# returned from the parseStatePlayer() routine to build a new
# slice of the bytes array
#info("")
offset += self.parseStateGame(bytes[4+offset:])
# next we parse the cards, which may number 0 if we haven't been
# dealt any yet
#info("")
offset += self.parseStateCards(bytes[4+offset:])
# check that we have a valid tail
(tail1,tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseState()")
return False
return True
###########################################################################
# This routine parses the player data of the <STATE> message
#
def parseStatePlayer(self, bytes):
#debug(self.id+"parsing player STATE")
offset = 0
#info("")
offset += self.parseStatePlayerN(bytes[offset:],0)
#info("")
offset += self.parseStatePlayerN(bytes[offset:],1)
#info("")
offset += self.parseStatePlayerN(bytes[offset:],2)
#info("")
offset += self.parseStatePlayerN(bytes[offset:],3)
return offset
###########################################################################
# This reads the N'th player state information
#
def parseStatePlayerN(self, bytes, n):
#debug(self.id+"parsing player STATE for player %d" % (n))
offset = 0
# The player data looks like this:
# <playersdata> : <p1> <p2> <p3> <p4>
# <pN> : <pstate> <pdata>
# <pstate> : {0|1|2} # unconnected, connected, joined
# <pdata> : if <pstate> == joined
# <ph> <nmstring> <clstring> <hwstring> <osstring>
# <cmtstring> <team> <numcards> <creator> <ordered>
# <dealer> <alone> <defend> <leader> <maker>
# <playoffer> <orderoffer> <dropoffer> <calloffer>
# <defendoffer> <cardinplay> [<card>] <passed>
# else
# <NULL>
# <NULL> : # no data
# <team> : {-1|0|1} # no team, team 0, or team 1
# <creator>|<ordered>|<dealer>|<alone>|<defend>|<leader>|<maker>
# <playoffer>|<orderoffer>|<dropoffer>|<calloffer>|<defendoffer>
# <cardinplay> <passed>
# : <boolean>
#
# pull player 0 state: 0 is unconnected, 1 is connected, 2 is joined;
# if the value is 2, there will be further player data
(self.state[n]['state'],) = struct.unpack_from("!i",bytes)
offset += 4 # track the offset into the bytes array
# if this is our state, promote it up
if n == self.playerhandle:
self.state['state'] = self.state[n]['state']
# if player state is 2 (ie. connected), then read the rest of the info
if self.state[n]['state'] == 2:
# get the player handle: not sure why I duped this, since the
# handle is implicit in the order, but anyway...
(ph,) = struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the name
self.state[ph]['name'] = self.parseString(bytes[offset:])
offset += 4+len(self.state[ph]['name'])
#info(self.id+"player name is " + self.state[ph]['name'])
# get the client name
self.state[ph]['clientname'] = self.parseString(bytes[offset:])
offset += 4+len(self.state[ph]['clientname'])
# get the client hardware
self.state[ph]['hardware'] = self.parseString(bytes[offset:])
offset += 4+len(self.state[ph]['hardware'])
# get the OS
self.state[ph]['os'] = self.parseString(bytes[offset:])
offset += 4+len(self.state[ph]['os'])
# get the comment
self.state[ph]['comment'] = self.parseString(bytes[offset:])
offset += 4+len(self.state[ph]['comment'])
# get the team number
(self.state[ph]['team'],) = struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the number of cards
(self.state[ph]['numcards'],) = \
struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the creator boolean
(self.state[ph]['creator'],) = \
struct.unpack_from("!i",bytes[offset:])
if ph == self.playerhandle:
self.state['creator'] = self.state[ph]['creator']
offset += 4
# get the ordered boolean
(self.state[ph]['ordered'],) = \
struct.unpack_from("!i",bytes[offset:])
if self.state[ph]['ordered'] == 1:
self.state['orderer'] = ph
offset += 4
# get the dealer boolean
(self.state[ph]['dealer'],) = \
struct.unpack_from("!i",bytes[offset:])
if self.state[ph]['dealer'] == 1:
self.state['dealer'] = ph
offset += 4
# get the alone boolean
(self.state[ph]['alone'],) = struct.unpack_from("!i",bytes[offset:])
if self.state[ph]['alone'] == 1:
self.state['aloner'] = ph
offset += 4
# get the defend boolean
(self.state[ph]['defend'],) = \
struct.unpack_from("!i",bytes[offset:])
if self.state[ph]['defend'] == 1:
self.state['defender'] = ph
offset += 4
# get the leader boolean
(self.state[ph]['leader'],) = \
struct.unpack_from("!i",bytes[offset:])
if self.state[ph]['leader'] == 1:
self.state['leader'] = ph
offset += 4
# get the maker boolean
(self.state[ph]['maker'],) = struct.unpack_from("!i",bytes[offset:])
if self.state[ph]['maker'] == 1:
self.state['maker'] = ph
offset += 4
# get the playoffer boolean
(self.state[ph]['playoffer'],) = \
struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the orderoffer boolean
(self.state[ph]['orderoffer'],) = \
struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the dropoffer boolean
(self.state[ph]['dropoffer'],) = \
struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the calloffer boolean
(self.state[ph]['calloffer'],) = \
struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the defendoffer boolean
(self.state[ph]['defendoffer'],) = \
struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the cardinplay boolean
(self.state[ph]['cardinplay'],) = \
struct.unpack_from("!i",bytes[offset:])
offset += 4
# if there is a card in play, read it
if self.state[ph]['cardinplay'] == 1:
(value,suit) = struct.unpack_from("!ii",bytes[offset:])
offset += 8
self.state[ph]['card'] = Card(value=value,suit=suit)
# get whether they've passed or not
(self.state[ph]['passed'],) = \
struct.unpack_from("!i",bytes[offset:])
offset += 4
return offset
###########################################################################
# This routine parses the game data of the <STATE> message
#
def parseStateGame(self, bytes):
#debug(self.id+"parsing game STATE")
#self.printMessage(bytes)
offset = 0
# The game data looks like this:
# <gamedata> : <ingame> <hstate> <suspend> <holein> <hole> <trumpset>
# <trump> <tricks> <score> <options>
# <ingame> : <boolean>
# <hstate> : <0|1|2|3|4> # pregame,hole,trump,defend,play
# <suspend> : <boolean>
# <holein> : <boolean> # true if hole card
# <hole> : <card> # only packed if <holein> true
# <card> : <value> <suit>
# <value> : {2|3|4|5|6|7|8|9|10|11|12|13|14}
# <suit> : {0|1|2|3}
# <trumpset> : <boolean> # true if trump set
# <trump> : <suit> # only packed if <trumpset> true
# <tricks> : <tricks0> <tricks1>
# <tricks0> : # tricks for team 0
# <tricks1> : # tricks for team 1
# <score> : <team0> <team1>
# <team0> : # score of team 0
# <team1> : # score of team 1
# <options> : <defend> <aloneonorder> <screw>
# <defend>|<aloneonorder>|<screw> : <boolean>
# get the ingame boolean
(self.state['ingame'],) = struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the hand state: 0, 1, 2, 3, or 4, corresponding to a hand state
# of pregame (hands haven't been dealt yet), hole (hole card ordering
# is available), trump (arbitrary trump can be called), defend (defend
# alone is on offer), play (game is underway)
(self.state['hstate'],) = struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the suspend state: this would be true only if the number of
# players drops below 4
(self.state['suspend'],) = struct.unpack_from("!i",bytes[offset:])
offset += 4
# get the hole card available state: this would be true if there is a
# a hole card on offer
(self.state['holein'],) = struct.unpack_from("!i",bytes[offset:])
offset += 4
# if there is a hole card on offer, read it
if self.state['holein'] == 1:
#info(self.id+"parsing hole card")
(value,suit) = struct.unpack_from("!ii",bytes[offset:])
self.state['hole'] = Card(value=value,suit=suit)
offset += 8
# read whether trump has been set
(self.state['trumpset'],) = struct.unpack_from("!i",bytes[offset:])
offset += 4
# if it has, read the trump suit
if self.state['trumpset'] == 1:
(self.state['trump'],) = struct.unpack_from("!i",bytes[offset:])
offset += 4
#info("")
#info(self.id+"trump is " + Card.suitName(self.state['trump']))
# and set the number of tricks for each team
(tricks0,tricks1) = struct.unpack_from("!ii",bytes[offset:])
offset += 8
# store the previous us and them tricks, so we can compute deltas
prevus = self.state['ustricks']
prevthem = self.state['themtricks']
# set the tricks as an "ustricks" and "themtricks", to make things
# easier to parse later
if self.team == 1:
self.state['ustricks'] = tricks0
self.state['themtricks'] = tricks1
elif self.team == 2:
self.state['ustricks'] = tricks1
self.state['themtricks'] = tricks0
# if the tricks have changed, compute the delta: either ustricks
# has changed, or themtricks, but can't (shouldn't) be both
if prevus != self.state['ustricks']:
self.state['trickdelta'] = self.state['ustricks'] - prevus
if prevthem != self.state['themtricks']:
self.state['trickdelta'] = -1*(self.state['themtricks'] - prevthem)
# similarly, parse the score values into usscore and themscore
(score0,score1) = struct.unpack_from("!ii",bytes[offset:])
offset += 8
# store the previous us and them scores, so we can compute deltas
prevus = self.state['usscore']
prevthem = self.state['themscore']
# set the scores as an "usscore" and "themscore", to make things
# easier to parse later
if self.team == 1:
self.state['usscore'] = score0
self.state['themscore'] = score1
elif self.team == 2:
self.state['usscore'] = score1
self.state['themscore'] = score0
# if the score has changed, compute the delta: either usscore
# has changed, or themscore, but can't (shouldn't) be both
if prevus != self.state['usscore']:
self.state['scoredelta'] = self.state['usscore'] - prevus
if prevthem != self.state['themscore']:
self.state['scoredelta'] = -1*(self.state['themscore'] - prevthem)
# and then read a bunch of options
(self.state['defend'],self.state['aloneonorder'],self.state['screw'],)\
= struct.unpack_from("!iii",bytes[offset:])
offset += 12
return offset
###########################################################################
# This reads the cards information in the state message
#
def parseStateCards(self, bytes):
#debug(self.id+"parsing cards STATE")
#self.printMessage(bytes)
offset = 0
# The cards data looks like this:
# <cards> : <numcards> <card1> .. <cardN>
# <cardN> : <value> <suit>
# get the number of cards to be read
(self.state['numcards'],) = struct.unpack_from("!i",bytes)
offset += 4
# if we have a non-zero number of cards, read them
self.hand = list([])
for i in range(self.state['numcards']):
(value,suit) = struct.unpack_from("!ii",bytes[offset:])
self.hand.append(Card(value=value,suit=suit))
offset += 8
return offset
###########################################################################
# This routine parses a DEAL message: this message is sent after cards
# for the deal are completed. The state structure for the player
# receiving the deal message should be fully populated
#
def parseDeal(self, bytes):
debug("")
debug(self.id+"parsing DEAL")
#self.printMessage(bytes)
# the format of a DEAL message is:
# <msg> : <msglen> <DEAL> <tail>
# it's really just a notification message, so check we have a valid
# tail and otherwise do nothing
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseDeal()")
return False
# at this point we've received and parsed the state message with
# our hand details in it: we will want to know our original hand
# later, to run stats on it, so we record the original hand now
self.originalHand = self.hand
return True
###########################################################################
# This routine parses a STARTDENY message
#
def parseStartDeny(self, bytes):
#debug(self.id+"parsing STARTDENY")
#self.printMessage(bytes)
# the format of a STARTDENY message is:
# <msg> : <msglen> <STARTDENY> <string> <tail>
# where the string explains why it was denied
message = self.parseString(bytes[4:-2])
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseStartDeny()")
return False
info("")
info(self.id+"uh-oh, got a STARTDENY message: " + message)
return False
###########################################################################
# This routine parses an ORDEROFFER message
#
def parseOrderOffer(self, bytes):
debug(self.id+"parsing ORDEROFFER")
#self.printMessage(bytes)
# the format of an ORDEROFFER message is:
# <msg> : <msglen> <ORDEROFFER> <ph> <tail>
# it's really just a notification message, unless we're the <ph>
(msg, ph) = struct.unpack_from("!ii",bytes)
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseOrderOffer()")
return False
# if the person offered the order is us, call sendOrderPass()
if ph == self.playerhandle:
self.sendOrderPass()
return True
###########################################################################
# This routine parses a ORDERDENY message
#
def parseOrderDeny(self, bytes):
#debug(self.id+"parsing ORDERDENY")
#self.printMessage(bytes)
# the format of a ORDERDENY message is:
# <msg> : <msglen> <ORDERDENY> <string> <tail>
# where the string explains why it was denied
message = self.parseString(bytes[4:-2])
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseDefendDeny()")
return False
info("")
info(self.id+"uh-oh, got a ORDERDENY message: " + message)
return False
###########################################################################
# This routine parses a CALLOFFER message
#
def parseCallOffer(self, bytes):
debug(self.id+"parsing CALLOFFER")
#self.printMessage(bytes)
# the format of an CALLOFFER message is:
# <msg> : <msglen> <CALLOFFER> <ph> <tail>
# it's really just a notification message, unless we're the <ph>
(msg, ph) = struct.unpack_from("!ii",bytes)
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseOrderOffer()")
return False
# if the person offered the order is us, call sendOrderPass()
if ph == self.playerhandle:
self.sendCallPass()
return True
###########################################################################
# This routine parses a CALLDENY message
#
def parseCallDeny(self, bytes):
#debug(self.id+"parsing CALLDENY")
#self.printMessage(bytes)
# the format of a CALLDENY message is:
# <msg> : <msglen> <CALLDENY> <string> <tail>
# where the string explains why it was denied
message = self.parseString(bytes[4:-2])
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseDefendDeny()")
return False
info("")
info(self.id+"uh-oh, got a CALLDENY message: " + message)
return False
###########################################################################
# This routine parses a DROPOFFER message
#
def parseDropOffer(self, bytes):
#debug(self.id+"parsing DROPOFFER")
#self.printMessage(bytes)
# the format of an DROPOFFER message is:
# <msg> : <msglen> <DROPOFFER> <ph> <tail>
# it's really just a notification message, unless we're the <ph>
(msg, ph) = struct.unpack_from("!ii",bytes)
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseDropOffer()")
return False
# if the person offered the drop is us, call sendDrop()
if ph == self.playerhandle:
self.sendDrop()
return True
###########################################################################
# This routine parses a DROPDENY message
#
def parseDropDeny(self, bytes):
#debug(self.id+"parsing DROPDENY")
#self.printMessage(bytes)
# the format of a DROPDENY message is:
# <msg> : <msglen> <DROPDENY> <string> <tail>
# where the string explains why it was denied
message = self.parseString(bytes[4:-2])
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseDefendDeny()")
return False
info("")
info(self.id+"uh-oh, got a DROPDENY message: " + message)
return False
###########################################################################
# This routine parses a DEFENDOFFER message
#
def parseDefendOffer(self, bytes):
#debug(self.id+"parsing DEFENDOFFER")
#self.printMessage(bytes)
# the format of an DEFENDOFFER message is:
# <msg> : <msglen> <DEFENDOFFER> <ph> <tail>
# it's really just a notification message, unless we're the <ph>
(msg, ph) = struct.unpack_from("!ii",bytes)
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseDefendOffer()")
return False
# if the person offered the defend is us, call sendDefend()
if ph == self.playerhandle:
info(self.id+"declining defend alone")
self.sendDefend()
return True
###########################################################################
# This routine parses a DEFENDDENY message
#
def parseDefendDeny(self, bytes):
#debug(self.id+"parsing DEFENDDENY")
#self.printMessage(bytes)
# the format of a DEFENDDENY message is:
# <msg> : <msglen> <DEFENDDENY> <string> <tail>
# where the string explains why it was denied
message = self.parseString(bytes[4:-2])
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseDefendDeny()")
return False
info("")
info(self.id+"uh-oh, got a DEFENDDENY message: " + message)
return False
###########################################################################
# This routine parses a PLAYOFFER message
#
def parsePlayOffer(self, bytes):
#info(self.id+"parsing PLAYOFFER")
#self.printMessage(bytes)
# the format of an PLAYOFFER message is:
# <msg> : <msglen> <PLAYOFFER> <ph> <tail>
# it's really just a notification message, unless we're the <ph>
(msg, ph) = struct.unpack_from("!ii",bytes)
#info("")
#info(self.id+"got PLAYOFFER for %s" % (self.state[ph]['name']))
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseDropOffer()")
return False
# if the person offered the play is us, call sendPlay()
if ph == self.playerhandle:
self.sendPlay()
return True
###########################################################################
# This routine parses a PLAYDENY message
#
def parsePlayDeny(self, bytes):
#debug(self.id+"parsing PLAYDENY")
#self.printMessage(bytes)
# the format of a PLAYDENY message is:
# <msg> : <msglen> <PLAYDENY> <string> <tail>
# where the string explains why it was denied
message = self.parseString(bytes[4:-2])
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parsePlayDeny()")
return False
info("")
info(self.id+"uh-oh, got a PLAYDENY message: " + message)
return False
###########################################################################
# This routine parses a TRICKOVER message
#
def parseTrickOver(self, bytes):
#debug(self.id+"parsing TRICKOVER")
#self.printMessage(bytes)
# the format of a TRICKOVER message is:
# <msg> : <msglen> <TRICKOVER> <tail>
# ie. it's just an alert, so no need to parse anything out of it
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseTrickOver()")
return False
# we don't want to clutter the log by reporting all instances
# of the trick over message, so we only print it for the maker
if self.playerhandle == self.state['maker']:
if self.state['trickdelta'] < 0: wl="lost"
elif self.state['trickdelta'] > 0: wl="won"
else: wl="bad bad bad"
info(self.id+"trick is over, we %s, now %d to %d"
% (wl,self.state['ustricks'],self.state['themtricks']))
# increment the trick counter for the id string
self.tcount += 1
self.setId()
return True
###########################################################################
# This routine parses a HANDOVER message
#
def parseHandOver(self, bytes):
#info("")
#info(self.id+"parsing HANDOVER")
#self.printMessage(bytes)
# the format of a HANDOVER message is:
# <msg> : <msglen> <HANDOVER> <tail>
# ie. it's just an alert, so no need to parse anything out of it
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseHandOver()")
return False
# if we were the maker, print some info and then record the score
# delta for this hand
if self.playerhandle == self.state['maker']:
info("")
info(self.id+"hand is over")
self.printScore()
info(self.id+"score delta: %d" % (self.state['scoredelta']))
info(self.id+"original hand: %s, trump: %s"
% (self.printHand(self.originalHand),
Card.suitName(self.state['trump'])))
# log our data in a thread-safe fashion
self.lock.acquire()
remap = ""
try:
remap = self.record.addHand(
self.originalHand, self.state['trump'],
self.state['scoredelta'], self)
finally:
self.lock.release()
# log the remapped hand: makes it easier to debug things later
info(self.id+"remapped hand: %s" % (remap))
# clear the orderer info
self.state['orderer'] = -1
# increment the hand and trick counters for the id string
self.hcount += 1
self.tcount = 0
self.setId()
return True
###########################################################################
# This routine parses a GAMEOVER message
#
def parseGameOver(self, bytes):
#info("")
#info(self.id+"parsing GAMEOVER")
#self.printMessage(bytes)
# the format of a GAMEOVER message is:
# <msg> : <msglen> <GAMEOVER> <tail>
# ie. it's just an alert, so no need to parse anything out of it
# check we have a valid tail
(tail1, tail2) = struct.unpack("!BB",bytes[-2:])
if tail1 != self.messageId['TAIL1'] or tail2 != self.messageId['TAIL2']:
error(self.id+"bad tail value in parseHandOver()")
return False
# we don't want to clutter the log by reporting all instances
# of the trick over message, so we only print it for the maker
if self.playerhandle == self.state['maker']:
info("")
info(self.id+"game is over")
self.printScore()
info("")
# log our data in a thread-safe fashion
self.lock.acquire()
try:
self.record.addGame()
finally:
self.lock.release()
# we set the new game, hand, and trick values: this is really
# mostly useless, since when the game is over, the player object
# is going to deleted, but I think it's useful to do this for
# completeness
self.gcount += 1
self.hcount = 0
self.tcount = 0
self.setId()
# return False to indicate this client is finished
return False
###########################################################################
# This routine parses a random bad message
#
def badMessage(self, bytes):
#debug(self.id+"parsing bad message")
#self.printMessage(bytes)
return False
###########################################################################
# this takes a byte array and displays it as a series of bytes, useful
# for decoding and debugging messages
#
def printMessage(self, message):
print()
print("decoded message:")
hex_string = "".join("%02x " % b for b in message)
print("hex: " + hex_string)
print()
###########################################################################
# this prints out all the cards in our hand
#
def printHand(self,hand):
string = ""
sep = ""
for i in hand:
string += sep + i
sep = " "
return string
###########################################################################
# This takes a suit (the lead suit), and returns the set of cards from
# the player's hand which can be played to legally follow it. So if the
# player has one or more cards of that suit, the returned set will contain
# those cards, and if the player has no cards of that suit, then all
def followCards(self):
# begin by determining who the leader of the hand was
leader = -1
for i in (0,1,2,3):
if self.state[i]['leader'] == 1:
leader = i
# set the trump and complimentary suits
trumpsuit = self.state['trump']
compsuit = Card.suitComp(self.state['trump'])
# set the leadsuit to the suit of the lead card, unless the lead
# card is the left (ie. the J of compsuit), in which case set the
# leadsuit to trump
leadsuit = self.state[leader]['card'].suit
if leadsuit == compsuit and \
self.state[leader]['card'].value == Card.nameValue("J"):
leadsuit = trumpsuit
# step through the player's hand: anything with the same suit
# gets added to the playable cards list
playable = list([])
for card in self.hand:
# put the suit and value of this card into temporary variables
csuit = card.suit
cvalue = card.value
# if the card value is a J and its suit is the compsuit (ie.
# the complimentary suit of trump), then rewrite the suit as
# trump
if cvalue == Card.nameValue("J") and csuit == compsuit:
csuit = trumpsuit
# now if the possible-remapped csuit value matches the lead
# suit, add the card to the playable hand
if csuit == leadsuit:
playable.append(card)
# if we have no playable cards by suit, then we can play anything
#info(self.id+"before playable cards: " + self.printHand(playable))
if len(playable) == 0:
playable = self.hand.copy()
# print the hand
info(self.id+"playable: " + self.printHand(playable)
+ ", lead: " + Card.suitName(leadsuit)
+ ", trump: " + Card.suitName(trumpsuit) )
# generate some stats for follow requirements in a thread-safe way
self.lock.acquire()
try:
self.record.addFollow(len(self.hand),len(playable))
finally:
self.lock.release()
return playable
###########################################################################
# This takes a card and removes it from the player's hand: we need to
# do it like this because sometimes we're working with a copy of the
# card (ie. when we're using playable sets to follow), so we need to
# remove by value and not by reference
#
def removeCard(self, card):
# get the card value and suit
value = card.value
suit = card.suit
# loop across all cards in the hand
for card in self.hand:
if card.value == value and card.suit == suit:
self.hand.remove(card)
| lgpl-2.1 | 2,412,021,947,534,853,000 | 35.661017 | 116 | 0.518015 | false | 3.989058 | false | false | false |
Pl-M/zim-icontags-plugin | 0.67/icontags/tagsmanager.py | 1 | 9763 | # -*- coding: utf-8 -*-
# Copyright 2016-2017 Pavel_M <[email protected]>,
# released under the GNU GPL version 3.
# This is a plugin for Zim-wiki program (zim-wiki.org) by Jaap Karssenberg.
import gtk
import pango
from zim.notebook import Path
from zim.gui.widgets import ScrolledWindow, Dialog, SingleClickTreeView
from zim.notebook.index.tags import TagsView
from .iconutils import render_icon, RESERVED_ICON_NAMES, ICONS
class TagsManagerDialog(Dialog):
'''
Tags Manager dialog to do some basic operations with
tags and to set icons for tags.
'''
def __init__(self, window, index, uistate):
Dialog.__init__(self, window, _('Tags Manager (IconTags plugin)'), # T: dialog title
buttons=gtk.BUTTONS_OK_CANCEL,
defaultwindowsize=(450, 400) )
# Don't confuse with local variable 'self.uistate',
# which is already determined for this class.
self._window = window
self.plugin_uistate = uistate
self.show_pages_button = gtk.ToggleButton('Show Pages')
self.show_pages_button.connect('toggled', self.toggle_show_pages)
self.add_extra_button(self.show_pages_button)
self.treeview_tags = TagsManagerTagsView(index, self.plugin_uistate['Icons for Tags'])
self.treeview_pages = TagsManagerPagesView(index, window.ui)
self.scrolled_widget = ScrolledWindow(self.treeview_tags)
self.vbox.pack_start(self.scrolled_widget, True)
self.treeview_tags.connect('row-activated', self.get_tag)
# Enable left/right arrows to navigate between views.
self.treeview_tags.connect('key-release-event', self.toggle_view)
self.treeview_pages.connect('key-release-event', self.toggle_view)
# Update if tags change.
self.connectto_all(index.update_iter.tags, (
('tag-row-inserted', lambda *a: self.update()),
('tag-row-deleted', lambda *a: self.update())
))
self.show_all()
def toggle_view(self, treeview, event):
'''Change view by pressing Left/Right arrows on keyboard.'''
key = gtk.gdk.keyval_name(event.keyval)
if key == 'Right' and treeview == self.treeview_tags:
self.show_pages_button.set_active(True)
elif key == 'Left' and treeview == self.treeview_pages:
self.show_pages_button.set_active(False)
def get_tag(self, treeview, path, column):
'''Place the tag to the cursor position.'''
model = treeview.get_model()
tag = '@' + model.get_value(model.get_iter(path), treeview.TAG_COL)
self._window.pageview.view.get_buffer().insert_tag_at_cursor(tag)
def update(self):
'''Update both tags and pages trees.'''
self.treeview_tags.refill_model()
self.treeview_pages.refill_model(self.treeview_pages.current_tag)
def toggle_show_pages(self, button):
''' 'Show Pages' button is clicked.'''
for widget in self.scrolled_widget.get_children():
self.scrolled_widget.remove(widget)
model, iter = self.treeview_tags.get_selection().get_selected()
if button.get_active():
self.scrolled_widget.add(self.treeview_pages)
# Set values for 'self.treeview_pages'.
if iter:
selected_tag = model.get_value(iter, self.treeview_tags.TAG_COL)
self.treeview_pages.refill_model(selected_tag)
else:
self.scrolled_widget.add(self.treeview_tags)
# Scroll to tag in 'self.treeview_tags'.
if iter:
path = model.get_path(iter)
self.treeview_tags.scroll_to_cell(path)
self.show_all()
def do_response_ok(self, *a):
''' OK button is pressed.'''
self.plugin_uistate['Icons for Tags'] = self.treeview_tags.icons_for_tags
self.result = True
return True
class TagsManagerTagsView(SingleClickTreeView):
'''
Class to show tags with icons in a treeview.
Is used in Tags Manager Dialog.
'''
TAG_COL = 0 # column with tag name
ICON_COL = 1 # column with icon image
ICON_NAME = 2 # column to sort ICON_COL
N_PAGES_COL = 3 # column to show number of pages
def __init__(self, index, preferences):
self.index = index
# Icons corresponding to tags, prevent unnecessary changing.
self.icons_for_tags = preferences.copy()
self.model = gtk.ListStore(str, gtk.gdk.Pixbuf, str, int) # TAG_COL, ICON_COL, ICON_NAME, N_PAGES_COL
SingleClickTreeView.__init__(self, self.model)
cells = (('Tags', self.TAG_COL, True),
('Pages', self.N_PAGES_COL, False))
for name, col_id, expand in cells:
cell = gtk.CellRendererText()
cell.set_property('ellipsize', pango.ELLIPSIZE_END)
cell.set_property('cell-background', 'white')
col = gtk.TreeViewColumn(name, cell)
col.set_attributes(cell, text = col_id)
col.set_resizable(expand)
col.set_expand(expand)
col.set_sort_column_id(col_id)
self.append_column(col)
cell = gtk.CellRendererPixbuf()
cell.set_property('cell-background', 'white')
col = gtk.TreeViewColumn('Icon', cell)
col.set_attributes(cell, pixbuf = self.ICON_COL)
col.set_resizable(False)
col.set_expand(False)
col.set_sort_column_id(self.ICON_NAME)
self.append_column(col)
self.refill_model()
def row_activated(self, path, column):
if column.get_sort_column_id() != self.ICON_NAME:
return False
def set_icon(path, icon_name = None):
tag = self.model.get_value(self.model.get_iter(path), self.TAG_COL)
tag = unicode(tag) # to use with non latin characters
if icon_name:
self.icons_for_tags[tag] = icon_name
else:
self.icons_for_tags.pop(tag, None)
self.refill_model()
return True
menu = gtk.Menu()
item = gtk.MenuItem('None')
item.connect('activate', lambda item: set_icon(path))
menu.append(item)
icons = sorted([(a, render_icon(b)) for (a,b) in ICONS.iteritems()
if a not in RESERVED_ICON_NAMES])
for name, icon in icons:
image = gtk.Image()
image.set_from_pixbuf(icon)
item = gtk.ImageMenuItem(name)
item.set_use_underline(False)
item.set_image(image)
item.zim_icon_name = name
item.connect('activate', lambda item: set_icon(path, item.zim_icon_name))
menu.append(item)
menu.show_all()
menu.popup(None, None, None, 3, 0)
def refill_model(self):
'''Update model.'''
self.model.clear()
tagview = TagsView.new_from_index(self.index)
for tag in [a.name for a in tagview.list_all_tags()]:
if tag in self.icons_for_tags:
icon_name = self.icons_for_tags[tag]
rendered_icon = render_icon(ICONS[icon_name])
else:
icon_name, rendered_icon = None, None
self.model.append([tag, rendered_icon, icon_name,
tagview.n_list_pages(tag)])
# Sort tags by number of pages and then by names.
self.model.set_sort_column_id(self.TAG_COL, order = gtk.SORT_ASCENDING)
self.model.set_sort_column_id(self.N_PAGES_COL, order = gtk.SORT_DESCENDING)
class TagsManagerPagesView(SingleClickTreeView):
'''
Class to show pages for a selected tag.
Is used in Tags Manager Dialog.
'''
PAGE_COL = 0 # column with page name
TAGS_N_COL = 1 # column with number of tags for the page
TAGS_COL = 2 # column with all tags for the page
def __init__(self, index, ui):
self.tagview = TagsView.new_from_index(index)
self.ui = ui
self.current_tag = None
self.model = gtk.ListStore(str, int, str) # PAGE_COL, TAGS_COL
SingleClickTreeView.__init__(self, self.model)
cells = (('Page', self.PAGE_COL, True),
('N', self.TAGS_N_COL, False),
('Tags', self.TAGS_COL, True))
for name, col_id, expand in cells:
cell = gtk.CellRendererText()
cell.set_property('ellipsize', pango.ELLIPSIZE_END)
cell.set_property('cell-background', 'white')
col = gtk.TreeViewColumn(name, cell)
col.set_attributes(cell, text = col_id)
col.set_resizable(expand)
col.set_expand(expand)
col.set_sort_column_id(col_id)
self.append_column(col)
self.connect('row-activated', lambda treeview, path, column:
self.row_activated(path, column))
self.refill_model()
def refill_model(self, tag = None):
'''Update model.'''
self.model.clear()
self.current_tag = tag
if tag:
tag = unicode(tag) # to use with non latin names
for page in self.tagview.list_pages(tag):
# Exclude current tag to not include it in sorting.
tags = [tag] + sorted([a.name for a in self.tagview.list_tags(page)
if a.name != tag])
self.model.append([page.name, len(tags), ', '.join(tags)])
# Sort pages by names.
self.model.set_sort_column_id(self.PAGE_COL, order = gtk.SORT_DESCENDING)
def row_activated(self, path, column):
'''Open page in the view.'''
name = self.model.get_value(self.model.get_iter(path), self.PAGE_COL)
self.ui.open_page(Path(name))
| gpl-3.0 | 2,850,075,525,323,281,000 | 36.988327 | 109 | 0.596128 | false | 3.662041 | false | false | false |
cloudbase/coriolis | coriolis/api/v1/views/endpoint_options_view.py | 1 | 1168 | # Copyright 2020 Cloudbase Solutions Srl
# All Rights Reserved.
import itertools
def _format_opt(req, option, keys=None):
def transform(key, value):
if keys and key not in keys:
return
yield (key, value)
return dict(itertools.chain.from_iterable(
transform(k, v) for k, v in option.items()))
def destination_minion_pool_options_collection(req, destination_pool_options):
formatted_opts = [
_format_opt(req, opt) for opt in destination_pool_options]
return {'destination_minion_pool_options': formatted_opts}
def destination_options_collection(req, destination_options):
formatted_opts = [
_format_opt(req, opt) for opt in destination_options]
return {'destination_options': formatted_opts}
def source_minion_pool_options_collection(req, source_pool_options):
formatted_opts = [
_format_opt(req, opt) for opt in source_pool_options]
return {'source_minion_pool_options': formatted_opts}
def source_options_collection(req, source_options):
formatted_opts = [
_format_opt(req, opt) for opt in source_options]
return {'source_options': formatted_opts}
| agpl-3.0 | 4,883,837,704,224,303,000 | 29.736842 | 78 | 0.691781 | false | 3.707937 | false | false | false |
kells1986/Betfair-Finktank-Tool | bft/bfpy/bfwsdl/bfglobal.py | 4 | 132938 | #!/usr/bin/env python
# -*- coding: latin-1; py-indent-offset:4 -*-
################################################################################
#
# This file is part of BfPy
#
# BfPy is a Python library to communicate with the Betfair Betting Exchange
# Copyright (C) 2010 Daniel Rodriguez (aka Daniel Rodriksson)
# Copyright (C) 2011 Sensible Odds Ltd.
#
# You can learn more and contact the author at:
#
# http://code.google.com/p/bfpy/
#
# BfPy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BfPy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BfPy. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
'''
BfPy wsdsl variables holding the Betfair WSDL definitions
'''
#
# Variables containing the Betfair WSDL files
#
BFGlobalService = '''
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2003-2004 The Sporting Exchange Limited. All rights reserved.
The presentation, distribution or other dissemination of the information contained herein by The Sporting Exchange Limited (Betfair) is not a license, either expressly or impliedly, to any intellectual property owned or controlled by Betfair.
Save as provided by statute and to the fullest extent permitted by law, the following provisions set out the entire liability of Betfair (including any liability for the acts and omissions of its employees, agents and sub-contractors) to the User in respect of the use of its WSDL file whether in contract, tort, statute, equity or otherwise:
(a) The User acknowledges and agrees that (except as expressly provided in this Agreement) the WSDL is provided "AS IS" without warranties of any kind (whether express or implied);
(b) All conditions, warranties, terms and undertakings (whether express or implied, statutory or otherwise relating to the delivery, performance, quality, uninterrupted use, fitness for purpose, occurrence or reliability of the WSDL are hereby excluded to the fullest extent permitted by law; and
(c) Betfair shall not be liable to the User for loss of profit (whether direct or indirect), loss of contracts or goodwill, lost advertising, loss of data or any type of special, indirect, consequential or economic loss (including loss or damage suffered by the User as a result of an action brought by a third party) even if such loss was reasonably foreseeable or Betfair had been advised of the possibility of the User incurring such loss.
No exclusion or limitation set out in this Agreement shall apply in the case of fraud or fraudulent concealment, death or personal injury resulting from the negligence of either party or any of its employees, agents or sub-contractors; and/or any breach of the obligations implied by (as appropriate) section 12 of the Sale of Goods Act 1979, section 2 of the Supply of Goods and Services Act 1982 or section 8 of the Supply of Goods (Implied Terms) Act 1973.
-->
<wsdl:definitions name="BFGlobalService"
targetNamespace="http://www.betfair.com/publicapi/v3/BFGlobalService/"
xmlns:types="http://www.betfair.com/publicapi/types/global/v3/"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:tns="http://www.betfair.com/publicapi/v3/BFGlobalService/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<wsdl:types>
<xsd:schema targetNamespace="http://www.betfair.com/publicapi/types/global/v3/">
<xsd:import namespace="http://schemas.xmlsoap.org/soap/encoding/"/>
<xsd:complexType name="LoginResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="currency" nillable="true" type="xsd:string"/>
<xsd:element name="errorCode" type="types:LoginErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="validUntil" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType abstract="true" name="APIResponse">
<xsd:sequence>
<xsd:element name="header" nillable="true" type="types:APIResponseHeader"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="APIResponseHeader">
<xsd:sequence>
<xsd:element name="errorCode" type="types:APIErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="sessionToken" nillable="true" type="xsd:string"/>
<xsd:element name="timestamp" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="APIErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INTERNAL_ERROR"/>
<xsd:enumeration value="EXCEEDED_THROTTLE"/>
<xsd:enumeration value="USER_NOT_SUBSCRIBED_TO_PRODUCT"/>
<xsd:enumeration value="SUBSCRIPTION_INACTIVE_OR_SUSPENDED"/>
<xsd:enumeration value="VENDOR_SOFTWARE_INACTIVE"/>
<xsd:enumeration value="VENDOR_SOFTWARE_INVALID"/>
<xsd:enumeration value="SERVICE_NOT_AVAILABLE_IN_PRODUCT"/>
<xsd:enumeration value="NO_SESSION"/>
<xsd:enumeration value="TOO_MANY_REQUESTS"/>
<xsd:enumeration value="PRODUCT_REQUIRES_FUNDED_ACCOUNT"/>
<xsd:enumeration value="SERVICE_NOT_AVAILABLE_FOR_LOGIN_STATUS"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="LoginErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="OK_MESSAGES"/>
<xsd:enumeration value="FAILED_MESSAGE"/>
<xsd:enumeration value="INVALID_USERNAME_OR_PASSWORD"/>
<xsd:enumeration value="USER_NOT_ACCOUNT_OWNER"/>
<xsd:enumeration value="INVALID_VENDOR_SOFTWARE_ID"/>
<xsd:enumeration value="INVALID_PRODUCT"/>
<xsd:enumeration value="INVALID_LOCATION"/>
<xsd:enumeration value="LOGIN_FAILED_ACCOUNT_LOCKED"/>
<xsd:enumeration value="ACCOUNT_SUSPENDED"/>
<xsd:enumeration value="T_AND_C_ACCEPTANCE_REQUIRED"/>
<xsd:enumeration value="POKER_T_AND_C_ACCEPTANCE_REQUIRED"/>
<xsd:enumeration value="LOGIN_REQUIRE_TERMS_AND_CONDITIONS_ACCEPTANCE"/>
<xsd:enumeration value="LOGIN_UNAUTHORIZED"/>
<xsd:enumeration value="ACCOUNT_CLOSED"/>
<xsd:enumeration value="LOGIN_RESTRICTED_LOCATION"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="LoginReq">
<xsd:sequence>
<xsd:element name="ipAddress" nillable="false" type="xsd:string"/>
<xsd:element name="locationId" nillable="false" type="xsd:int"/>
<xsd:element name="password" nillable="false" type="xsd:string"/>
<xsd:element name="productId" nillable="false" type="xsd:int"/>
<xsd:element name="username" nillable="false" type="xsd:string"/>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="RetrieveLIMBMessageReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest"/>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="RetrieveLIMBMessageResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:RetrieveLIMBMessageErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="totalMessagesCount" nillable="false" type="xsd:int"/>
<xsd:element name="retrievePersonalMessage" type="types:RetrievePersonalLIMBMessage"/>
<xsd:element name="retrieveTCPrivacyPolicyChangeMessage" type="types:RetrieveTCPrivacyPolicyChangeLIMBMessage"/>
<xsd:element name="retrievePasswordChangeMessage" type="types:RetrievePasswordChangeLIMBMessage"/>
<xsd:element name="retrieveBirthDateCheckMessage" type="types:RetrieveBirthDateCheckLIMBMessage"/>
<xsd:element name="retrieveAddressCheckMessage" type="types:RetrieveAddressCheckLIMBMessage"/>
<xsd:element name="retrieveContactDetailsCheckMessage" type="types:RetrieveContactDetailsCheckLIMBMessage"/>
<xsd:element name="retrieveChatNameChangeMessage" type="types:RetrieveChatNameChangeLIMBMessage"/>
<xsd:element name="retrieveCardBillingAddressCheckItems" nillable="true" type="types:ArrayOfRetrieveCardBillingAddressCheckLIMBMessage"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="RetrieveLIMBMessageErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="RetrievePersonalLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="enforceDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="indicator" nillable="false" type="xsd:boolean"/>
<xsd:element name="message" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="RetrieveTCPrivacyPolicyChangeLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="enforceDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="indicator" nillable="false" type="xsd:boolean"/>
<xsd:element name="reasonForChange" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="RetrievePasswordChangeLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="enforceDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="indicator" nillable="false" type="xsd:boolean"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="RetrieveBirthDateCheckLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="enforceDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="indicator" nillable="false" type="xsd:boolean"/>
<xsd:element name="birthDate" nillable="true" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="RetrieveAddressCheckLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="enforceDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="indicator" nillable="false" type="xsd:boolean"/>
<xsd:element name="address1" type="xsd:string"/>
<xsd:element name="address2" nillable="true" type="xsd:string"/>
<xsd:element name="address3" nillable="true" type="xsd:string"/>
<xsd:element name="town" nillable="true" type="xsd:string"/>
<xsd:element name="county" nillable="true" type="xsd:string"/>
<xsd:element name="zipCode" nillable="true" type="xsd:string"/>
<xsd:element name="country" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="RetrieveContactDetailsCheckLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="enforceDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="indicator" nillable="false" type="xsd:boolean"/>
<xsd:element name="homeTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="workTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="mobileTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="emailAddress" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="RetrieveChatNameChangeLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="enforceDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="indicator" nillable="false" type="xsd:boolean"/>
<xsd:element name="chatName" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfRetrieveCardBillingAddressCheckLIMBMessage">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="3" minOccurs="0"
name="retrieveCardBillingAddressCheckLIMBMessage" nillable="true" type="types:RetrieveCardBillingAddressCheckLIMBMessage"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="RetrieveCardBillingAddressCheckLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="enforceDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="indicator" nillable="false" type="xsd:boolean"/>
<xsd:element name="nickName" type="xsd:string"/>
<xsd:element name="cardShortNumber" type="xsd:string"/>
<xsd:element name="address1" type="xsd:string"/>
<xsd:element name="address2" nillable="true" type="xsd:string"/>
<xsd:element name="address3" nillable="true" type="xsd:string"/>
<xsd:element name="town" nillable="true" type="xsd:string"/>
<xsd:element name="county" nillable="true" type="xsd:string"/>
<xsd:element name="zipCode" nillable="true" type="xsd:string"/>
<xsd:element name="country" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="SubmitLIMBMessageReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="password" nillable="false" type="xsd:string"/>
<xsd:element name="submitPersonalMessage" type="types:SubmitPersonalLIMBMessage"/>
<xsd:element name="submitTCPrivacyPolicyChangeMessage" type="types:SubmitTCPrivacyPolicyChangeLIMBMessage"/>
<xsd:element name="submitPasswordChangeMessage" type="types:SubmitPasswordChangeLIMBMessage"/>
<xsd:element name="submitBirthDateCheckMessage" type="types:SubmitBirthDateCheckLIMBMessage"/>
<xsd:element name="submitAddressCheckMessage" type="types:SubmitAddressCheckLIMBMessage"/>
<xsd:element name="submitContactDetailsCheckMessage" type="types:SubmitContactDetailsCheckLIMBMessage"/>
<xsd:element name="submitChatNameChangeMessage" type="types:SubmitChatNameChangeLIMBMessage"/>
<xsd:element name="submitCardBillingAddressCheckItems" nillable="true" type="types:ArrayOfSubmitCardBillingAddressCheckLIMBMessage"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="SubmitPersonalLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="acknowledgment" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="SubmitTCPrivacyPolicyChangeLIMBMessage">
<xsd:sequence>
<xsd:element name="tCPrivacyPolicyChangeAcceptance" nillable="false" type="types:PrivacyPolicyChangeResponseEnum"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="PrivacyPolicyChangeResponseEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="NO_REPLY"/>
<xsd:enumeration value="ACCEPT"/>
<xsd:enumeration value="REJECT"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="SubmitPasswordChangeLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="newPassword" nillable="true" type="xsd:string"/>
<xsd:element name="newPasswordRepeat" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="SubmitBirthDateCheckLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="detailsCorrect" nillable="true" type="xsd:string"/>
<xsd:element name="correctBirthDate" nillable="true" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="SubmitAddressCheckLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="detailsCorrect" nillable="true" type="xsd:string"/>
<xsd:element name="newAddress1" nillable="true" type="xsd:string"/>
<xsd:element name="newAddress2" nillable="true" type="xsd:string"/>
<xsd:element name="newAddress3" nillable="true" type="xsd:string"/>
<xsd:element name="newTown" nillable="true" type="xsd:string"/>
<xsd:element name="newCounty" nillable="true" type="xsd:string"/>
<xsd:element name="newZipCode" nillable="true" type="xsd:string"/>
<xsd:element name="newCountry" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="SubmitContactDetailsCheckLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="detailsCorrect" nillable="true" type="xsd:string"/>
<xsd:element name="newHomeTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="newWorkTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="newMobileTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="newEmailAddress" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="SubmitChatNameChangeLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="newChatName" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfSubmitCardBillingAddressCheckLIMBMessage">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="3" minOccurs="0"
name="submitCardBillingAddressCheckLIMBMessage" nillable="true" type="types:SubmitCardBillingAddressCheckLIMBMessage"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="SubmitCardBillingAddressCheckLIMBMessage">
<xsd:sequence>
<xsd:element name="messageId" nillable="true" type="xsd:int"/>
<xsd:element name="detailsCorrect" nillable="true" type="xsd:string"/>
<xsd:element name="nickName" type="xsd:string"/>
<xsd:element name="newAddress1" nillable="true" type="xsd:string"/>
<xsd:element name="newAddress2" nillable="true" type="xsd:string"/>
<xsd:element name="newAddress3" nillable="true" type="xsd:string"/>
<xsd:element name="newTown" nillable="true" type="xsd:string"/>
<xsd:element name="newCounty" nillable="true" type="xsd:string"/>
<xsd:element name="newZipCode" nillable="true" type="xsd:string"/>
<xsd:element name="newCountry" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="SubmitLIMBMessageResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:SubmitLIMBMessageErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="validationErrors" nillable="true" type="types:ArrayOfLIMBValidationErrorsEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="LIMBValidationErrorsEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="INVALID_DOB"/>
<xsd:enumeration value="INVALID_ADDRESS_LINE1"/>
<xsd:enumeration value="INVALID_ADDRESS_LINE2"/>
<xsd:enumeration value="INVALID_ADDRESS_LINE3"/>
<xsd:enumeration value="INVALID_CITY"/>
<xsd:enumeration value="INVALID_COUNTY_STATE"/>
<xsd:enumeration value="INVALID_COUNTRY_OF_RESIDENCE"/>
<xsd:enumeration value="INVALID_POSTCODE"/>
<xsd:enumeration value="INVALID_HOME_PHONE"/>
<xsd:enumeration value="INVALID_WORK_PHONE"/>
<xsd:enumeration value="INVALID_MOBILE_PHONE"/>
<xsd:enumeration value="INVALID_EMAIL"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="RESERVED_PASSWORD"/>
<xsd:enumeration value="INVALID_NEW_PASSWORD"/>
<xsd:enumeration value="INVALID_TC_VERSION"/>
<xsd:enumeration value="INVALID_PRIVICY_VERSION"/>
<xsd:enumeration value="INVALID_CHATNAME"/>
<xsd:enumeration value="CHATNAME_ALREADY_TAKEN"/>
<xsd:enumeration value="INVALID_CARD_BILLING_ADDRESS_LINE_1"/>
<xsd:enumeration value="INVALID_CARD_BILLING_ADDRESS_LINE_2"/>
<xsd:enumeration value="INVALID_CARD_BILLING_ADDRESS_LINE_3"/>
<xsd:enumeration value="INVALID_CARD_BILLING_CITY"/>
<xsd:enumeration value="INVALID_CARD_BILLING_COUNTY_STATE"/>
<xsd:enumeration value="INVALID_CARD_BILLING_ZIP_CODE"/>
<xsd:enumeration value="INVALID_CARD_BILLING_COUNTRY_OF_RESIDENCE"/>
<xsd:enumeration value="NO_SUCH_PERSONAL_MESSAGE"/>
<xsd:enumeration value="NO_SUCH_TC_PRIVACY_POLICY_MESSAGE"/>
<xsd:enumeration value="NO_SUCH_PASSWORD_CHANGE_MESSAGE"/>
<xsd:enumeration value="NO_SUCH_BIRTH_DATE_CHECK_MESSAGE"/>
<xsd:enumeration value="NO_SUCH_ADDRESS_CHECK_MESSAGE"/>
<xsd:enumeration value="NO_SUCH_CONTACT_DETAILS_CHECK_MESSAGE"/>
<xsd:enumeration value="NO_SUCH_CHATNAME_CHENGE_MESSAGE"/>
<xsd:enumeration value="NO_SUCH_CARD_BILLING_ADDRESS_CHECK_MESSAGE"/>
<xsd:enumeration value="INVALID_PERSONAL_MESSAGE_ACKNOWLEDGMENT"/>
<xsd:enumeration value="INVALID_TC_PRIVACY_POLICY_MESSAGE_ACKNOWLEDGMENT"/>
<xsd:enumeration value="INVALID_BIRTH_DATE_CHECK_MESSAGE"/>
<xsd:enumeration value="INVALID_ADDRESS_CHECK_MESSAGE"/>
<xsd:enumeration value="INVALID_CONTACT_DETAILS_CHECK_MESSAGE"/>
<xsd:enumeration value="INVALID_CARD_BILLING_ADDRESS_CHECK_MESSAGE"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ArrayOfLIMBValidationErrorsEnum">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="LIMBValidationErrorsEnum" nillable="true" type="types:LIMBValidationErrorsEnum"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="SubmitLIMBMessageErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="VALIDATION_ERRORS"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="LogoutErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK" />
<xsd:enumeration value="API_ERROR" />
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="LogoutResp">
<xsd:complexContent mixed="false">
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string" />
<xsd:element name="errorCode" type="types:LogoutErrorEnum" />
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="LogoutReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest" />
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="KeepAliveResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="apiVersion" nillable="true" type="xsd:string"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="KeepAliveReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest"/>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType abstract="true" name="APIRequest">
<xsd:sequence>
<xsd:element name="header" nillable="true" type="types:APIRequestHeader"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="APIRequestHeader">
<xsd:sequence>
<xsd:element name="clientStamp" type="xsd:long"/>
<xsd:element name="sessionToken" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="GetEventsResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:GetEventsErrorEnum"/>
<xsd:element name="eventItems" nillable="true" type="types:ArrayOfBFEvent"/>
<xsd:element name="eventParentId" nillable="false" type="xsd:int"/>
<xsd:element name="marketItems" nillable="true" type="types:ArrayOfMarketSummary"/>
<xsd:element name="couponLinks" nillable="true" type="types:ArrayOfCouponLinks"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="GetEventsErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_EVENT_ID"/>
<xsd:enumeration value="NO_RESULTS"/>
<xsd:enumeration value="INVALID_LOCALE_DEFAULTING_TO_ENGLISH"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="BFEvent">
<xsd:sequence>
<xsd:element name="eventId" nillable="false" type="xsd:int"/>
<xsd:element name="eventName" nillable="true" type="xsd:string"/>
<xsd:element name="eventTypeId" nillable="false" type="xsd:int"/>
<xsd:element name="menuLevel" nillable="false" type="xsd:int"/>
<xsd:element name="orderIndex" nillable="false" type="xsd:int"/>
<xsd:element name="startTime" type="xsd:dateTime"/>
<xsd:element name="timezone" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfBFEvent">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="BFEvent" nillable="true" type="types:BFEvent"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="MarketSummary">
<xsd:sequence>
<xsd:element name="eventTypeId" nillable="false" type="xsd:int"/>
<xsd:element name="marketId" nillable="false" type="xsd:int"/>
<xsd:element name="marketName" nillable="true" type="xsd:string"/>
<xsd:element name="marketType" type="types:MarketTypeEnum"/>
<xsd:element name="marketTypeVariant" type="types:MarketTypeVariantEnum"/>
<xsd:element name="menuLevel" nillable="false" type="xsd:int"/>
<xsd:element name="orderIndex" nillable="false" type="xsd:int"/>
<xsd:element name="startTime" type="xsd:dateTime"/>
<xsd:element name="timezone" nillable="true" type="xsd:string"/>
<xsd:element name="venue" nillable="true" type="xsd:string"/>
<xsd:element name="betDelay" nillable="false" type="xsd:int"/>
<xsd:element name="numberOfWinners" nillable="false" type="xsd:int"/>
<xsd:element name="eventParentId" nillable="false" type="xsd:int"/>
<xsd:element name="exchangeId" nillable="false" type="xsd:int"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="MarketTypeEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="O"/>
<xsd:enumeration value="L"/>
<xsd:enumeration value="R"/>
<xsd:enumeration value="A"/>
<xsd:enumeration value="NOT_APPLICABLE"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="MarketTypeVariantEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="D"/><!-- default -->
<xsd:enumeration value="ASL"/><!-- asian single line -->
<xsd:enumeration value="ADL"/><!-- asian double line -->
<xsd:enumeration value="COUP"/><!-- coupon -->
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ArrayOfMarketSummary">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="MarketSummary" nillable="true" type="types:MarketSummary"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="CouponLink">
<xsd:sequence>
<xsd:element name="couponId" nillable="false" type="xsd:int"/>
<xsd:element name="couponName" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfCouponLinks">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="CouponLink" nillable="true" type="types:CouponLink"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="GetEventsReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="eventParentId" nillable="false" type="xsd:int"/>
<xsd:element name="locale" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="GetEventTypesResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="eventTypeItems" nillable="true" type="types:ArrayOfEventType"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="errorCode" type="types:GetEventsErrorEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="EventType">
<xsd:sequence>
<xsd:element name="id" nillable="false" type="xsd:int"/>
<xsd:element name="name" nillable="true" type="xsd:string"/>
<xsd:element name="nextMarketId" nillable="false" type="xsd:int"/>
<xsd:element name="exchangeId" nillable="false" type="xsd:int"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfEventType">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="EventType" nillable="true" type="types:EventType"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="GetEventTypesReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="locale" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="MarketStatusEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="ACTIVE"/>
<xsd:enumeration value="INACTIVE"/>
<xsd:enumeration value="CLOSED"/>
<xsd:enumeration value="SUSPENDED"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="Runner">
<xsd:sequence>
<xsd:element name="asianLineId" nillable="false" type="xsd:int"/>
<xsd:element name="handicap" nillable="false" type="xsd:double"/>
<xsd:element name="name" nillable="true" type="xsd:string"/>
<xsd:element name="selectionId" nillable="false" type="xsd:int"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="GetSubscriptionInfoResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="subscriptions" nillable="true" type="types:ArrayOfSubscription"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="Subscription">
<xsd:sequence>
<xsd:element name="billingAmount" nillable="false" type="xsd:double"/>
<xsd:element name="billingDate" type="xsd:dateTime"/>
<xsd:element name="billingPeriod" type="types:BillingPeriodEnum"/>
<xsd:element name="productId" nillable="false" type="xsd:int"/>
<xsd:element name="productName" nillable="true" type="xsd:string"/>
<xsd:element name="services" nillable="true" type="types:ArrayOfServiceCall"/>
<xsd:element name="setupCharge" nillable="false" type="xsd:double"/>
<xsd:element name="setupChargeActive" nillable="false" type="xsd:boolean"/>
<xsd:element name="status" type="types:SubscriptionStatusEnum"/>
<xsd:element name="subscribedDate" type="xsd:dateTime"/>
<xsd:element name="vatEnabled" nillable="false" type="xsd:boolean"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="BillingPeriodEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="WEEKLY"/>
<xsd:enumeration value="MONTHLY"/>
<xsd:enumeration value="QUARTERLY"/>
<xsd:enumeration value="ANNUALLY"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ServiceCall">
<xsd:sequence>
<xsd:element name="maxUsages" nillable="false" type="xsd:int"/>
<xsd:element name="period" type="xsd:long"/>
<xsd:element name="periodExpiry" type="xsd:dateTime"/>
<xsd:element name="serviceType" type="types:ServiceEnum"/>
<xsd:element name="usageCount" nillable="false" type="xsd:int"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="ServiceEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="ADD_PAYMENT_CARD"/>
<xsd:enumeration value="DELETE_PAYMENT_CARD"/>
<xsd:enumeration value="GET_PAYMENT_CARD"/>
<xsd:enumeration value="UPDATE_PAYMENT_CARD"/>
<xsd:enumeration value="LOGIN"/>
<xsd:enumeration value="GET_BET"/>
<xsd:enumeration value="PLACE_BETS"/>
<xsd:enumeration value="WITHDRAW_TO_PAYMENT_CARD"/>
<xsd:enumeration value="EDIT_BETS"/>
<xsd:enumeration value="DEPOSIT_FROM_PAYMENT_CARD"/>
<xsd:enumeration value="CANCEL_BETS"/>
<xsd:enumeration value="DO_KEEP_ALIVE"/>
<xsd:enumeration value="GET_ACCOUNT_STATEMENT"/>
<xsd:enumeration value="LOAD_MARKET_PROFIT_LOSS"/>
<xsd:enumeration value="GET_CURRENT_BETS"/>
<xsd:enumeration value="LOAD_ACCOUNT_FUNDS"/>
<xsd:enumeration value="LOAD_BET_HISTORY"/>
<xsd:enumeration value="LOAD_DETAILED_AVAIL_MKT_DEPTH"/>
<xsd:enumeration value="GET_MARKET_TRADED_VOLUME"/>
<xsd:enumeration value="LOAD_EVENTS"/>
<xsd:enumeration value="LOAD_EVENT_TYPES"/>
<xsd:enumeration value="LOAD_MARKET"/>
<xsd:enumeration value="LOAD_MARKET_PRICES"/>
<xsd:enumeration value="LOAD_MARKET_PRICES_COMPRESSED"/>
<xsd:enumeration value="LOAD_SERVICE_ANNOUNCEMENTS"/>
<xsd:enumeration value="LOAD_SUBSCRIPTION_INFO"/>
<xsd:enumeration value="CREATE_ACCOUNT"/>
<xsd:enumeration value="CONVERT_CURRENCY"/>
<xsd:enumeration value="GET_CURRENCIES"/>
<xsd:enumeration value="FORGOT_PASSWORD"/>
<xsd:enumeration value="MODIFY_PASSWORD"/>
<xsd:enumeration value="VIEW_PROFILE"/>
<xsd:enumeration value="MODIFY_PROFILE"/>
<xsd:enumeration value="LOGOUT"/>
<xsd:enumeration value="RETRIEVE_LIMB_MESSAGE"/>
<xsd:enumeration value="SUBMIT_LIMB_MESSAGE"/>
<xsd:enumeration value="GET_MARGIN_MARKET_PRICES"/>
<xsd:enumeration value="GET_MARGIN_MARKET_PRICES_COMPRESSED"/>
<xsd:enumeration value="GENERATE_REGISTERED_MARGIN_PRICES"/>
<xsd:enumeration value="MARGINLOGIN"/>
<xsd:enumeration value="TRANSFER_FUNDS"/>
<xsd:enumeration value="ADD_VENDORSUBSCRIPTION"/>
<xsd:enumeration value="UPDATE_VENDORSUBSCRIPTION"/>
<xsd:enumeration value="CANCEL_VENDORSUBSCRIPTION"/>
<xsd:enumeration value="GET_VENDOR_USERS"/>
<xsd:enumeration value="GET_VENDORSUBSCRIPTION_INFO"/>
<xsd:enumeration value="GET_VENDOR_INFO"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ArrayOfServiceCall">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="ServiceCall" nillable="true" type="types:ServiceCall"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="SubscriptionStatusEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="ACTIVE"/>
<xsd:enumeration value="INACTIVE"/>
<xsd:enumeration value="SUSPENDED"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ArrayOfSubscription">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="Subscription" nillable="true" type="types:Subscription"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="GetSubscriptionInfoReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest"/>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="DepositFromPaymentCardResp">
<xsd:annotation>
<xsd:documentation>
Result of a DepositFromPaymentCardReq. If errorCode is set to CARD_AMOUNT_OUTSIDE_LIMIT then minAmount and maxAmount
will be set. If errorCode is set to DEPOSIT_LIMIT_EXCEEDED then maxAmount will be set.
</xsd:documentation>
</xsd:annotation>
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:PaymentsErrorEnum"/>
<xsd:element name="fee" nillable="false" type="xsd:double"/>
<xsd:element name="maxAmount" nillable="false" type="xsd:double"/>
<xsd:element name="minAmount" nillable="false" type="xsd:double"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="netAmount" nillable="false" type="xsd:double"/>
<xsd:element name="transactionId" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="PaymentsErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="ACCOUNT_SUSPENDED"/>
<xsd:enumeration value="API_ERROR"/>
<xsd:enumeration value="CARD_AMOUNT_OUTSIDE_LIMIT"/>
<xsd:enumeration value="CARD_EXPIRED"/>
<xsd:enumeration value="CARD_LOCKED"/>
<xsd:enumeration value="CARD_NOT_FOUND"/>
<xsd:enumeration value="DEPOSIT_DECLINED"/>
<xsd:enumeration value="DEPOSIT_LIMIT_EXCEEDED"/>
<xsd:enumeration value="EXCEEDS_BALANCE"/>
<xsd:enumeration value="CARD_NOT_VALIDATED"/>
<xsd:enumeration value="INVALID_AMOUNT"/>
<xsd:enumeration value="INVALID_CARD_CV2"/>
<xsd:enumeration value="INVALID_CARD_DETAILS"/>
<xsd:enumeration value="INVALID_EXPIRY_DATE"/>
<xsd:enumeration value="INVALID_MASTERCARD"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="CFT_MAX_WITHDRAWAL_LIMIT"/>
<xsd:enumeration value="NEGATIVE_NET_DEPOSITS"/>
<xsd:enumeration value="NON_STERLING_TO_UK_MASTERCARD"/>
<xsd:enumeration value="NON_ZERO_NON_NEG_NET_DEPOSITS"/>
<xsd:enumeration value="UNAUTHORIZED"/>
<xsd:enumeration value="VISA_WITHDRAWAL_NOT_POSSIBLE"/>
<xsd:enumeration value="DUPLICATE_WITHDRAWAL"/>
<!-- The following four values added for new withdrawByBankTransfer operation -->
<!-- Do not use unless for other operations unless they are also new. There -->
<!-- will be some clients that are not aware of these new enum values. -->
<xsd:enumeration value="DEPOSITS_NOT_CLEARED"/>
<xsd:enumeration value="INVALID_BANK_ACCOUNT_DETAILS_FIELD"/>
<xsd:enumeration value="EXPRESS_TRANSFER_NOT_AVAILABLE"/>
<xsd:enumeration value="UNSUPPORTED_COUNTRY_FOR_BANK_TRANSFER"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="DepositFromPaymentCardReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="amount" nillable="false" type="xsd:double"/>
<xsd:element name="cardIdentifier" nillable="true" type="xsd:string"/>
<xsd:element name="cv2" nillable="true" type="xsd:string"/>
<xsd:element name="password" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="AddPaymentCardReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="cardNumber" type="xsd:string"/>
<xsd:element name="cardType" type="types:CardTypeEnum"/>
<xsd:element name="startDate" nillable="true" type="xsd:string"/>
<xsd:element name="expiryDate" type="xsd:string"/>
<xsd:element name="issueNumber" nillable="true" type="xsd:string"/>
<xsd:element name="billingName" type="xsd:string"/>
<xsd:element name="nickName" type="xsd:string"/>
<xsd:element name="password" type="xsd:string"/>
<xsd:element name="address1" type="xsd:string"/>
<xsd:element name="address2" nillable="true" type="xsd:string"/>
<xsd:element name="address3" nillable="true" type="xsd:string"/>
<xsd:element name="address4" nillable="true" type="xsd:string"/>
<xsd:element name="town" nillable="true" type="xsd:string"/>
<xsd:element name="county" nillable="true" type="xsd:string"/>
<xsd:element name="zipCode" nillable="true" type="xsd:string"/>
<xsd:element name="country" nillable="true" type="xsd:string"/>
<xsd:element name="cardStatus" type="types:PaymentCardStatusEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="DeletePaymentCardReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="nickName" type="xsd:string"/>
<xsd:element name="password" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="GetPaymentCardReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="UpdatePaymentCardReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="nickName" type="xsd:string"/>
<xsd:element name="password" type="xsd:string"/>
<xsd:element name="expiryDate" nillable="true" type="xsd:string"/>
<xsd:element name="startDate" nillable="true" type="xsd:string"/>
<xsd:element name="issueNumber" nillable="true" type="xsd:string"/>
<xsd:element name="address1" nillable="true" type="xsd:string"/>
<xsd:element name="address2" nillable="true" type="xsd:string"/>
<xsd:element name="address3" nillable="true" type="xsd:string"/>
<xsd:element name="address4" nillable="true" type="xsd:string"/>
<xsd:element name="town" nillable="true" type="xsd:string"/>
<xsd:element name="county" nillable="true" type="xsd:string"/>
<xsd:element name="zipCode" nillable="true" type="xsd:string"/>
<xsd:element name="country" nillable="true" type="xsd:string"/>
<xsd:element name="cardStatus" type="types:PaymentCardStatusEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="CardTypeEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="VISA"/>
<xsd:enumeration value="MASTERCARD"/>
<xsd:enumeration value="VISADELTA"/>
<xsd:enumeration value="SWITCH"/>
<xsd:enumeration value="SOLO"/>
<xsd:enumeration value="ELECTRON"/>
<xsd:enumeration value="LASER"/>
<xsd:enumeration value="MAESTRO"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="AddPaymentCardResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:AddPaymentCardErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="paymentCard" type="types:PaymentCard"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="AddPaymentCardErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_CARD_DETAILS"/>
<xsd:enumeration value="INVALID_CARD_CV2"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="ACCOUNT_INACTIVE"/>
<xsd:enumeration value="UNAUTHORIZED"/>
<xsd:enumeration value="INVALID_EXPIRY_DATE"/>
<xsd:enumeration value="INVALID_START_DATE"/>
<xsd:enumeration value="INVALID_CARD_NUMBER"/>
<xsd:enumeration value="INVALID_ZIP_CODE"/>
<xsd:enumeration value="INVALID_COUNTRY_CODE"/>
<xsd:enumeration value="INVALID_BILLING_NAME"/>
<xsd:enumeration value="INVALID_CARD_ADDRESS"/>
<xsd:enumeration value="CARD_ALREADY_EXISTS"/>
<xsd:enumeration value="AGE_VERIFICATION_REQUIRED"/>
<xsd:enumeration value="NOT_FUNDED_WITH_FIRST_CARD"/>
<xsd:enumeration value="CARD_NOT_VALID_FOR_ACCOUNT_CURRENCY"/>
<xsd:enumeration value="INVALID_CARD_TYPE"/>
<xsd:enumeration value="MAXIMUM_NUMBER_OF_CARDS_REACHED"/>
<xsd:enumeration value="INVALID_ISSUE_NUMBER"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="DeletePaymentCardErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_CARD_DETAILS"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="ACCOUNT_INACTIVE"/>
<xsd:enumeration value="UNAUTHORIZED"/>
<xsd:enumeration value="CARD_NOT_DELETED"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="DeletePaymentCardResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:DeletePaymentCardErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="nickName" type="xsd:string"/>
<xsd:element name="billingName" type="xsd:string"/>
<xsd:element name="cardShortNumber" type="xsd:string"/>
<xsd:element name="cardType" type="types:CardTypeEnum"/>
<xsd:element name="issuingCountry" nillable="true" type="xsd:string"/>
<xsd:element name="expiryDate" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="UpdatePaymentCardResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:UpdatePaymentCardErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="nickName" type="xsd:string"/>
<xsd:element name="billingName" type="xsd:string"/>
<xsd:element name="cardType" type="types:CardTypeEnum"/>
<xsd:element name="expiryDate" type="xsd:string"/>
<xsd:element name="startDate" nillable="true" type="xsd:string"/>
<xsd:element name="address1" type="xsd:string"/>
<xsd:element name="address2" nillable="true" type="xsd:string"/>
<xsd:element name="address3" nillable="true" type="xsd:string"/>
<xsd:element name="address4" nillable="true" type="xsd:string"/>
<xsd:element name="zipCode" nillable="true" type="xsd:string"/>
<xsd:element name="country" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="UpdatePaymentCardErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_CARD_DETAILS"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="ACCOUNT_INACTIVE"/>
<xsd:enumeration value="UNAUTHORIZED"/>
<xsd:enumeration value="INVALID_COUNTRY_CODE"/>
<xsd:enumeration value="INVALID_CARD_ADDRESS"/>
<xsd:enumeration value="INVALID_EXPIRY_DATE"/>
<xsd:enumeration value="INVALID_START_DATE"/>
<xsd:enumeration value="INVALID_ZIP_CODE"/>
<xsd:enumeration value="INVALID_ISSUE_NUMBER"/>
<xsd:enumeration value="API_ERROR"/>
<xsd:enumeration value="CARD_NOT_FOUND"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="GetPaymentCardResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:GetPaymentCardErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="paymentCardItems" nillable="true" type="types:ArrayOfPaymentCard"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="GetPaymentCardErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="ACCOUNT_INACTIVE"/>
<xsd:enumeration value="UNAUTHORIZED"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="PaymentCard">
<xsd:sequence>
<xsd:element name="nickName" type="xsd:string"/>
<xsd:element name="cardShortNumber" type="xsd:string"/>
<xsd:element name="expiryDate" type="xsd:string"/>
<xsd:element name="startDate" nillable="true" type="xsd:string"/>
<xsd:element name="issueNumber" nillable="true" type="xsd:string"/>
<xsd:element name="cardType" type="types:CardTypeEnum"/>
<xsd:element name="issuingCountryIso3" nillable="true" type="xsd:string"/>
<xsd:element name="totalDeposits" nillable="true" type="xsd:double"/>
<xsd:element name="totalWithdrawals" nillable="true" type="xsd:double"/>
<xsd:element name="netDeposits" nillable="true" type="xsd:double"/>
<xsd:element name="validationStatus" nillable="true" type="xsd:string"/>
<xsd:element name="billingName" type="xsd:string"/>
<xsd:element name="billingAddress1" nillable="true" type="xsd:string"/>
<xsd:element name="billingAddress2" nillable="true" type="xsd:string"/>
<xsd:element name="billingAddress3" nillable="true" type="xsd:string"/>
<xsd:element name="billingAddress4" nillable="true" type="xsd:string"/>
<xsd:element name="town" nillable="true" type="xsd:string"/>
<xsd:element name="county" nillable="true" type="xsd:string"/>
<xsd:element name="postcode" nillable="true" type="xsd:string"/>
<xsd:element name="billingCountryIso3" nillable="true" type="xsd:string"/>
<xsd:element name="cardStatus" type="types:PaymentCardStatusEnum"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="PaymentCardStatusEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="LOCKED"/>
<xsd:enumeration value="UNLOCKED"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ArrayOfPaymentCard">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="PaymentCard" nillable="true" type="types:PaymentCard"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="WithdrawToPaymentCardResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="amountWithdrawn" nillable="false" type="xsd:double"/>
<xsd:element name="errorCode" type="types:PaymentsErrorEnum"/>
<xsd:element name="maxAmount" nillable="false" type="xsd:double"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="WithdrawToPaymentCardReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="amount" nillable="false" type="xsd:double"/>
<xsd:element name="cardIdentifier" nillable="true" type="xsd:string"/>
<xsd:element name="password" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="WithdrawByBankTransferReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="mode" nillable="false" type="types:WithdrawByBankTransferModeEnum"/>
<xsd:element name="amount" nillable="false" type="xsd:double"/>
<xsd:element name="bankAccountDetails" nillable="false"
type="types:BankAccountDetails"/>
<xsd:element name="expressTransfer" nillable="false" type="xsd:boolean"/>
<xsd:element name="password" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="WithdrawByBankTransferResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" nillable="false" type="types:PaymentsErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="amountWithdrawn" nillable="false" type="xsd:double"/>
<xsd:element name="minAmount" nillable="false" type="xsd:double"/>
<xsd:element name="maxAmount" nillable="false" type="xsd:double"/>
<xsd:element name="amountAvailable" nillable="true" type="xsd:double"/>
<xsd:element name="transferFee" nillable="true" type="xsd:double"/>
<xsd:element name="expressTransferFee" nillable="true" type="xsd:double"/>
<xsd:element name="expressTransferAvailable" nillable="true" type="xsd:boolean"/>
<xsd:element name="lastBankAccountDetails" nillable="true"
type="types:BankAccountDetails"/>
<xsd:element name="requiredBankAccountDetailsFields" nillable="true"
type="types:ArrayOfBankAccountDetailsField"/>
<xsd:element name="transactionId" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="WithdrawByBankTransferModeEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="VALIDATE"/>
<xsd:enumeration value="EXECUTE"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ArrayOfBankAccountDetailsField">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" name="BankAccountDetailsField"
nillable="true" type="types:BankAccountDetailsField"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="BankAccountDetailsField">
<xsd:complexContent>
<xsd:extension base="types:AbstractField">
<xsd:sequence>
<xsd:element name="type" nillable="false" type="types:BankAccountDetailsFieldEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="BankAccountDetailsFieldEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="PAYEE"/>
<xsd:enumeration value="BANK_LOCATION_ISO3"/>
<xsd:enumeration value="BANK_NAME"/>
<xsd:enumeration value="ACCOUNT_HOLDING_BRANCH"/>
<xsd:enumeration value="ACCOUNT_NUMBER"/>
<xsd:enumeration value="ACCOUNT_TYPE"/>
<xsd:enumeration value="BANK_CODE"/>
<xsd:enumeration value="SORT_CODE"/>
<xsd:enumeration value="BANK_KEY"/>
<xsd:enumeration value="BRANCH_CODE"/>
<xsd:enumeration value="ROUTING"/>
<xsd:enumeration value="BANK_BSB"/>
<xsd:enumeration value="BLZ_CODE"/>
<xsd:enumeration value="ABI_CAB"/>
<xsd:enumeration value="BANK_GIRO_CREDIT_NUMBER"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="AbstractField">
<xsd:sequence>
<xsd:element name="required" nillable="false" type="xsd:boolean"/>
<xsd:element name="readOnly" nillable="false" type="xsd:boolean"/>
<xsd:element name="size" nillable="false" type="xsd:int"/>
<xsd:element name="minLength" nillable="false" type="xsd:int"/>
<xsd:element name="maxLength" nillable="false" type="xsd:int"/>
<xsd:element name="regExp" nillable="false" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="BasicBankAccountDetails">
<xsd:sequence>
<xsd:element name="bankName" nillable="true" type="xsd:string"/>
<xsd:element name="accountHoldingBranch" nillable="true" type="xsd:string"/>
<xsd:element name="bankGiroCreditNumber" nillable="true" type="xsd:string"/>
<xsd:element name="accountNumber" nillable="true" type="xsd:string"/>
<xsd:element name="sortCode" nillable="true" type="xsd:string"/>
<xsd:element name="bankCode" nillable="true" type="xsd:string"/>
<xsd:element name="blzCode" nillable="true" type="xsd:string"/>
<xsd:element name="bankBsb" nillable="true" type="xsd:string"/>
<xsd:element name="branchCode" nillable="true" type="xsd:string"/>
<xsd:element name="bankLocationIso3" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="BankAccountDetails">
<xsd:complexContent>
<xsd:extension base="types:BasicBankAccountDetails">
<xsd:sequence>
<xsd:element name="payee" nillable="true" type="xsd:string"/>
<xsd:element name="accountType" nillable="false" type="types:BankAccountTypeEnum"/>
<xsd:element name="bankKey" nillable="true" type="xsd:string"/>
<xsd:element name="routing" nillable="true" type="xsd:string"/>
<xsd:element name="abiCab" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="BankAccountTypeEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="NotSpecified"/>
<xsd:enumeration value="CH"/>
<xsd:enumeration value="SA"/>
<xsd:enumeration value="TR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="TransferFundsReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="sourceWalletId" nillable="false" type="xsd:int" />
<xsd:element name="targetWalletId" nillable="false" type="xsd:int" />
<xsd:element name="amount" nillable="false" type="xsd:double" />
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="TransferFundsResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" nillable="false" type="types:TransferFundsErrorEnum" />
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string" />
<xsd:element name="monthlyDepositTotal" nillable="true" type="xsd:double" />
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="TransferFundsErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK" />
<xsd:enumeration value="INVALID_AMOUNT" />
<xsd:enumeration value="TRANSFER_FAILED"/>
<xsd:enumeration value="OVER_BALANCE"/>
<xsd:enumeration value="WALLETS_MUST_BE_DIFFERENT"/>
<xsd:enumeration value="SOURCE_WALLET_UNKNOWN" />
<xsd:enumeration value="SOURCE_WALLET_SUSPENDED" />
<xsd:enumeration value="SOURCE_WALLET_SUSPENDED_KYC" />
<xsd:enumeration value="SOURCE_WALLET_KYC_WITHDRAWAL" />
<xsd:enumeration value="SOURCE_WALLET_KYC_DEPOSIT_TOTAL" />
<xsd:enumeration value="SOURCE_WALLET_KYC_DEPOSIT_MONTH" />
<xsd:enumeration value="TARGET_WALLET_UNKNOWN" />
<xsd:enumeration value="TARGET_WALLET_SUSPENDED" />
<xsd:enumeration value="TARGET_WALLET_SUSPENDED_KYC" />
<xsd:enumeration value="TARGET_WALLET_KYC_WITHDRAWAL" />
<xsd:enumeration value="TARGET_WALLET_KYC_DEPOSIT_TOTAL" />
<xsd:enumeration value="TARGET_WALLET_KYC_DEPOSIT_MONTH" />
<xsd:enumeration value="API_ERROR" />
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="SelfExcludeReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="selfExclude" nillable="false" type="xsd:boolean"/>
<xsd:element name="password" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="SelfExcludeResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string" />
<xsd:element name="errorCode" type="types:SelfExcludeErrorEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="SelfExcludeErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="ACCOUNT_CLOSED"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="INVALID_SELF_EXCLUDE_VALUE"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ConvertCurrencyResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="convertedAmount" nillable="false" type="xsd:double"/>
<xsd:element name="errorCode" type="types:ConvertCurrencyErrorEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="ConvertCurrencyErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_AMOUNT"/>
<xsd:enumeration value="INVALID_FROM_CURRENCY"/>
<xsd:enumeration value="INVALID_TO_CURRENCY"/>
<xsd:enumeration value="CANNOT_CONVERT"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ConvertCurrencyReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="amount" nillable="false" type="xsd:double"/>
<xsd:element name="fromCurrency" nillable="true" type="xsd:string"/>
<xsd:element name="toCurrency" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="GetCurrenciesResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="currencyItems" nillable="true" type="types:ArrayOfCurrency"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="Currency">
<xsd:sequence>
<xsd:element name="currencyCode" nillable="true" type="xsd:string"/>
<xsd:element name="rateGBP" nillable="false" type="xsd:double"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="ArrayOfCurrency">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="Currency" nillable="true" type="types:Currency"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="GetCurrenciesReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest"/>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="GetCurrenciesV2Resp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="currencyItems" nillable="true" type="types:ArrayOfCurrencyV2"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="CurrencyV2">
<xsd:complexContent>
<xsd:extension base="types:Currency">
<xsd:sequence>
<!-- Version 2 fields -->
<xsd:element name="minimumStake" nillable="true" type="xsd:double"/>
<xsd:element name="minimumRangeStake" nillable="true" type="xsd:double"/>
<xsd:element name="minimumBSPLayLiability" nillable="true" type="xsd:double"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ArrayOfCurrencyV2">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="CurrencyV2" nillable="true" type="types:CurrencyV2"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="GetCurrenciesV2Req">
<xsd:complexContent>
<xsd:extension base="types:APIRequest"/>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ViewReferAndEarnReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest"/>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ViewReferAndEarnResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="errorCode" type="types:ViewReferAndEarnErrorEnum"/>
<xsd:element name="referAndEarnCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="ViewReferAndEarnErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="NO_RESULTS"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ViewProfileReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest"/>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ViewProfileResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="errorCode" type="types:ViewProfileErrorEnum"/>
<xsd:element name="title" type="types:TitleEnum"/>
<xsd:element name="firstName" nillable="true" type="xsd:string"/>
<xsd:element name="surname" nillable="true" type="xsd:string"/>
<xsd:element name="userName" nillable="true" type="xsd:string"/>
<xsd:element name="forumName" nillable="true" type="xsd:string"/>
<xsd:element name="address1" nillable="true" type="xsd:string"/>
<xsd:element name="address2" nillable="true" type="xsd:string"/>
<xsd:element name="address3" nillable="true" type="xsd:string"/>
<xsd:element name="townCity" nillable="true" type="xsd:string"/>
<xsd:element name="countyState" nillable="true" type="xsd:string"/>
<xsd:element name="postCode" nillable="true" type="xsd:string"/>
<xsd:element name="countryOfResidence" nillable="true" type="xsd:string"/>
<xsd:element name="homeTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="workTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="mobileTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="emailAddress" nillable="true" type="xsd:string"/>
<xsd:element name="timeZone" nillable="true" type="xsd:string"/>
<xsd:element name="currency" nillable="true" type="xsd:string"/>
<xsd:element name="gamcareLimit" nillable="true" type="xsd:int"/>
<xsd:element name="gamcareFrequency" type="types:GamcareLimitFreqEnum"/>
<xsd:element name="gamcareLossLimit" nillable="true" type="xsd:int"/>
<xsd:element name="gamcareLossLimitFrequency" type="types:GamcareLimitFreqEnum"/>
<xsd:element name="gamcareUpdateDate" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="ViewProfileV2ReqVersionEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="V1"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ViewProfileV2Req">
<xsd:complexContent>
<xsd:extension base='types:APIRequest'>
<xsd:sequence>
<xsd:element name='requestVersion' nillable='true' type='types:ViewProfileV2ReqVersionEnum'/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ViewProfileV2Resp">
<xsd:complexContent>
<xsd:extension base="types:ViewProfileResp">
<xsd:sequence>
<!-- Version 2 Fields -->
<xsd:element name="tAN" nillable="true" type="xsd:string"/>
<xsd:element name="referAndEarnCode" nillable="true" type="xsd:string"/>
<xsd:element name="earthportID" nillable="true" type="xsd:string"/>
<xsd:element name="kYCStatus" type="types:KYCStatusEnum"/>
<xsd:element name='nationalIdentifier' minOccurs='0' type='xsd:string'/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="ViewProfileErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="UNAUTHORIZED"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ModifyProfileReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="password" nillable="false" type="xsd:string"/>
<xsd:element name="address1" nillable="true" type="xsd:string"/>
<xsd:element name="address2" nillable="true" type="xsd:string"/>
<xsd:element name="address3" nillable="true" type="xsd:string"/>
<xsd:element name="townCity" nillable="true" type="xsd:string"/>
<xsd:element name="countyState" nillable="true" type="xsd:string"/>
<xsd:element name="postCode" nillable="true" type="xsd:string"/>
<xsd:element name="countryOfResidence" nillable="true" type="xsd:string"/>
<xsd:element name="homeTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="workTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="mobileTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="emailAddress" nillable="true" type="xsd:string"/>
<xsd:element name="timeZone" nillable="true" type="xsd:string"/>
<xsd:element name="depositLimit" nillable="true" type="xsd:int"/>
<xsd:element name="depositLimitFrequency" nillable='true' type="types:GamcareLimitFreqEnum"/>
<xsd:element name="lossLimit" nillable="true" type="xsd:int"/>
<xsd:element name="lossLimitFrequency" nillable='true' type="types:GamcareLimitFreqEnum"/>
<xsd:element name='nationalIdentifier' nillable='true' type='xsd:string'/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ModifyProfileResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:ModifyProfileErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="validationErrors"
nillable="true" type="types:ArrayOfValidationErrorsEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="ModifyProfileErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="VALIDATION_ERRORS"/>
<xsd:enumeration value="PROFILE_MODIFICATION_ERROR"/>
<xsd:enumeration value="UNAUTHORIZED"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="ACCOUNT_INACTIVE"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="CreateAccountResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="accountId" nillable="false" type="xsd:int"/>
<xsd:element name="accountStatus" type="types:AccountStatusEnum"/>
<xsd:element name="errorCode" type="types:CreateAccountErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="tan" nillable="true" type="xsd:string"/>
<xsd:element name="userId" nillable="false" type="xsd:int"/>
<xsd:element name="validationErrors"
nillable="true" type="types:ArrayOfValidationErrorsEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="AccountStatusEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="A"/>
<xsd:enumeration value="C"/>
<xsd:enumeration value="D"/>
<xsd:enumeration value="L"/>
<xsd:enumeration value="P"/>
<xsd:enumeration value="S"/>
<xsd:enumeration value="T"/>
<xsd:enumeration value="X"/>
<xsd:enumeration value="Z"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="CreateAccountErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="VALIDATION_ERRORS"/>
<xsd:enumeration value="ACCOUNT_CREATION_ERROR"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="ValidationErrorsEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="DUPLICATE_USERNAME"/>
<xsd:enumeration value="FUNDS_TRANSFER_CANCEL"/>
<xsd:enumeration value="FUNDS_TRANSFER_CURRENCY_MISMATCH"/>
<xsd:enumeration value="INCOMPLETE_DETAILS"/>
<xsd:enumeration value="INSUFFICIENT_FUNDS"/>
<xsd:enumeration value="INVALID_ACCOUNT_TYPE"/>
<xsd:enumeration value="INVALID_ADDRESS_LINE1"/>
<xsd:enumeration value="INVALID_ADDRESS_LINE2"/>
<xsd:enumeration value="INVALID_ADDRESS_LINE3"/>
<xsd:enumeration value="INVALID_ANSWER1"/>
<xsd:enumeration value="INVALID_ANSWER2"/>
<xsd:enumeration value="INVALID_BROWSER"/>
<xsd:enumeration value="INVALID_CITY"/>
<xsd:enumeration value="INVALID_COUNTRY_OF_RESIDENCE"/>
<xsd:enumeration value="INVALID_COUNTY_STATE"/>
<xsd:enumeration value="INVALID_CURRENCY"/>
<xsd:enumeration value="INVALID_DEPOSIT_LIMIT"/>
<xsd:enumeration value="INVALID_DEPOSIT_LIMIT_FREQUENCY"/>
<xsd:enumeration value="INVALID_DETAILS"/>
<xsd:enumeration value="INVALID_DOB"/>
<xsd:enumeration value="INVALID_EMAIL"/>
<xsd:enumeration value="INVALID_FIRSTNAME"/>
<xsd:enumeration value="INVALID_GENDER"/>
<xsd:enumeration value="INVALID_HOME_PHONE"/>
<xsd:enumeration value="INVALID_IP_ADDRESS"/>
<xsd:enumeration value="INVALID_LANGUAGE"/>
<xsd:enumeration value="INVALID_LOCALE"/>
<xsd:enumeration value="INVALID_LOSS_LIMIT"/>
<xsd:enumeration value="INVALID_LOSS_LIMIT_FREQUENCY"/>
<xsd:enumeration value="INVALID_MASTER_ID"/>
<xsd:enumeration value="INVALID_MOBILE_PHONE"/>
<xsd:enumeration value="INVALID_PARTNERID"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="INVALID_POSTCODE"/>
<xsd:enumeration value="INVALID_PRIVICY_VERSION"/>
<xsd:enumeration value="INVALID_PRODUCT_ID"/>
<xsd:enumeration value="INVALID_REFERRER_CODE"/>
<xsd:enumeration value="INVALID_REGION"/>
<xsd:enumeration value="INVALID_SECURITY_QUESTION1"/>
<xsd:enumeration value="INVALID_SECURITY_QUESTION2"/>
<xsd:enumeration value="INVALID_SUBPARTNERID"/>
<xsd:enumeration value="INVALID_SUPERPARTNERID"/>
<xsd:enumeration value="INVALID_SURNAME"/>
<xsd:enumeration value="INVALID_TC_VERSION"/>
<xsd:enumeration value="INVALID_TIMEZONE"/>
<xsd:enumeration value="INVALID_TITLE"/>
<xsd:enumeration value="INVALID_USERNAME"/>
<xsd:enumeration value="INVALID_WORK_PHONE"/>
<xsd:enumeration value="RESERVED_PASSWORD"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ArrayOfValidationErrorsEnum">
<xsd:sequence>
<xsd:element form="qualified" maxOccurs="unbounded" minOccurs="0"
name="ValidationErrorsEnum" nillable="true" type="types:ValidationErrorsEnum"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="CreateAccountReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="acceptedPrivicyPolicyVersion" nillable="false" type="xsd:int"/>
<xsd:element name="acceptedTermsAndConditionsVersion" nillable="false" type="xsd:int"/>
<xsd:element name="accountType" nillable="false" type="types:AccountTypeEnum"/>
<xsd:element name="address1" nillable="false" type="xsd:string"/>
<xsd:element name="address2" nillable="true" type="xsd:string"/>
<xsd:element name="address3" nillable="true" type="xsd:string"/>
<xsd:element name="answer1" nillable="false" type="xsd:string"/>
<xsd:element name="answer2" nillable="false" type="xsd:string"/>
<xsd:element name="browser" nillable="true" type="xsd:string"/>
<xsd:element name="countryOfResidence" nillable="true" type="xsd:string"/>
<xsd:element name="countyState" nillable="true" type="xsd:string"/>
<xsd:element name="currency" nillable="true" type="xsd:string"/>
<xsd:element name="dateOfBirth" nillable="false" type="xsd:dateTime"/>
<xsd:element name="depositLimit" nillable="false" type="xsd:double"/>
<xsd:element name="depositLimitFrequency" nillable="false" type="types:GamcareLimitFreqEnum"/>
<xsd:element name="emailAddress" nillable="false" type="xsd:string"/>
<xsd:element name="firstName" nillable="false" type="xsd:string"/>
<xsd:element name="gender" nillable="false" type="types:GenderEnum"/>
<xsd:element name="homeTelephone" nillable="false" type="xsd:string"/>
<xsd:element name="informProductsServices" nillable="false" type="xsd:boolean"/>
<xsd:element name="informSpecialOffers" nillable="false" type="xsd:boolean"/>
<xsd:element name="ipAddress" nillable="false" type="xsd:string"/>
<xsd:element name="locale" nillable="true" type="xsd:string"/>
<xsd:element name="lossLimit" nillable="false" type="xsd:double"/>
<xsd:element name="lossLimitFrequency" nillable="false" type="types:GamcareLimitFreqEnum"/>
<xsd:element name="manualAddress" nillable="false" type="xsd:boolean"/>
<xsd:element name="mobileTelephone" nillable="false" type="xsd:string"/>
<xsd:element name="partnerId" nillable="false" type="xsd:int"/>
<xsd:element name="password" nillable="true" type="xsd:string"/>
<xsd:element name="postCode" nillable="true" type="xsd:string"/>
<xsd:element name="preferredName" nillable="true" type="xsd:string"/>
<xsd:element name="productId" nillable="false" type="xsd:int"/>
<xsd:element name="question1" nillable="false" type="types:SecurityQuestion1Enum"/>
<xsd:element name="question2" nillable="false" type="types:SecurityQuestion2Enum"/>
<xsd:element name="referrerCode" nillable="true" type="xsd:string"/>
<xsd:element name="region" type="types:RegionEnum"/>
<xsd:element name="subPartnerId" nillable="false" type="xsd:int"/>
<xsd:element name="superPartnerId" nillable="false" type="xsd:int"/>
<xsd:element name="surname" nillable="false" type="xsd:string"/>
<xsd:element name="timeZone" nillable="true" type="xsd:string"/>
<xsd:element name="title" nillable="false" type="types:TitleEnum"/>
<xsd:element name="townCity" nillable="false" type="xsd:string"/>
<xsd:element name="username" nillable="true" type="xsd:string"/>
<xsd:element name="workTelephone" nillable="true" type="xsd:string"/>
<xsd:element name="nationalIdentifier" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="AccountTypeEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="STANDARD"/>
<xsd:enumeration value="MARGIN"/>
<xsd:enumeration value="TRADING"/>
<xsd:enumeration value="AGENT_CLIENT"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="GamcareLimitFreqEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="DAILY"/>
<xsd:enumeration value="WEEKLY"/>
<xsd:enumeration value="MONTHLY"/>
<xsd:enumeration value="YEARLY"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="GenderEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="M"/>
<xsd:enumeration value="F"/>
<xsd:enumeration value="UNKNOWN"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="SecurityQuestion1Enum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="SQ1A"/>
<xsd:enumeration value="SQ1B"/>
<xsd:enumeration value="SQ1C"/>
<xsd:enumeration value="SQ1D"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="SecurityQuestion2Enum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="SQ2A"/>
<xsd:enumeration value="SQ2B"/>
<xsd:enumeration value="SQ2C"/>
<xsd:enumeration value="SQ2D"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="RegionEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="ZAF"/>
<xsd:enumeration value="NA"/>
<xsd:enumeration value="NORD"/>
<xsd:enumeration value="GBR"/>
<xsd:enumeration value="IRL"/>
<xsd:enumeration value="AUS_NZL"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="TitleEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="Dr"/>
<xsd:enumeration value="Mr"/>
<xsd:enumeration value="Miss"/>
<xsd:enumeration value="Mrs"/>
<xsd:enumeration value="Ms"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="KYCStatusEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="DEFAULT"/>
<xsd:enumeration value="AGE_VERIFIED"/>
<xsd:enumeration value="KYC"/>
<xsd:enumeration value="KYC_NON_AUS"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="ForgotPasswordErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_USERNAME"/>
<xsd:enumeration value="INVALID_COUNTRY_OF_RESIDENCE"/>
<xsd:enumeration value="INVALID_EMAIL"/>
<xsd:enumeration value="INVALID_ANSWER"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="TOO_MANY_ATTEMPTS_ACCOUNT_SUSPENDED"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="ModifyPasswordErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="INVALID_NEW_PASSWORD"/>
<xsd:enumeration value="PASSWORDS_DONT_MATCH"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="SetChatNameErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_PASSWORD"/>
<xsd:enumeration value="ACCOUNT_SUSPENDED"/>
<xsd:enumeration value="ACCOUNT_NOT_FUNDED"/>
<xsd:enumeration value="CHAT_NAME_UNAVAILABLE"/>
<xsd:enumeration value="CANNOT_CHANGE_CHAT_NAME"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="ForgotPasswordReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="username" type="xsd:string" nillable="false"/>
<xsd:element name="emailAddress" type="xsd:string" nillable="false"/>
<xsd:element name="countryOfResidence" type="xsd:string" nillable="false"/>
<xsd:element name="forgottenPasswordAnswer1" type="xsd:string" nillable="true"/>
<xsd:element name="forgottenPasswordAnswer2" type="xsd:string" nillable="true"/>
<xsd:element name="newPassword" type="xsd:string" nillable="true"/>
<xsd:element name="newPasswordRepeat" type="xsd:string" nillable="true"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ForgotPasswordResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:ForgotPasswordErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="question1" type="xsd:string" nillable="true"/>
<xsd:element name="question2" type="xsd:string" nillable="true"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ModifyPasswordReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="password" type="xsd:string" nillable="false"/>
<xsd:element name="newPassword" type="xsd:string" nillable="false"/>
<xsd:element name="newPasswordRepeat" type="xsd:string" nillable="false"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ModifyPasswordResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:ModifyPasswordErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="SetChatNameReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="password" type="xsd:string" nillable="false"/>
<xsd:element name="chatName" type="xsd:string" nillable="false"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="SetChatNameResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:SetChatNameErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
</xsd:schema>
<xsd:schema elementFormDefault="qualified" targetNamespace="http://www.betfair.com/publicapi/v3/BFGlobalService/">
<xsd:import namespace="http://www.betfair.com/publicapi/types/global/v3/"/>
<xsd:element name="login">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:LoginReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="loginResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:LoginResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="retrieveLIMBMessage">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:RetrieveLIMBMessageReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="retrieveLIMBMessageResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:RetrieveLIMBMessageResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="submitLIMBMessage">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:SubmitLIMBMessageReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="submitLIMBMessageResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:SubmitLIMBMessageResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="logout">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:LogoutReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="logoutResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="false" type="types:LogoutResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="keepAlive">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:KeepAliveReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="keepAliveResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:KeepAliveResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getEvents">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetEventsReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getEventsResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetEventsResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getActiveEventTypes">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetEventTypesReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getActiveEventTypesResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetEventTypesResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getAllEventTypes">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetEventTypesReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getAllEventTypesResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetEventTypesResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getSubscriptionInfo">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetSubscriptionInfoReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getSubscriptionInfoResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetSubscriptionInfoResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="depositFromPaymentCard">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:DepositFromPaymentCardReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="depositFromPaymentCardResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:DepositFromPaymentCardResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="addPaymentCard">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:AddPaymentCardReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="addPaymentCardResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:AddPaymentCardResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="deletePaymentCard">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:DeletePaymentCardReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="deletePaymentCardResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:DeletePaymentCardResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="updatePaymentCard">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:UpdatePaymentCardReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="updatePaymentCardResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:UpdatePaymentCardResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getPaymentCard">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetPaymentCardReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getPaymentCardResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetPaymentCardResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="withdrawToPaymentCard">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:WithdrawToPaymentCardReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="withdrawToPaymentCardResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:WithdrawToPaymentCardResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="selfExclude">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:SelfExcludeReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="selfExcludeResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:SelfExcludeResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="convertCurrency">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:ConvertCurrencyReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="convertCurrencyResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:ConvertCurrencyResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getAllCurrencies">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetCurrenciesReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getAllCurrenciesResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetCurrenciesResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getAllCurrenciesV2">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetCurrenciesV2Req"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getAllCurrenciesV2Response">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetCurrenciesV2Resp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="viewReferAndEarn">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:ViewReferAndEarnReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="viewReferAndEarnResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:ViewReferAndEarnResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="viewProfile">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:ViewProfileReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="viewProfileResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:ViewProfileResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="withdrawByBankTransfer">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:WithdrawByBankTransferReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="withdrawByBankTransferResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:WithdrawByBankTransferResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="viewProfileV2">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:ViewProfileV2Req"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="viewProfileV2Response">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:ViewProfileV2Resp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="modifyProfile">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:ModifyProfileReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="modifyProfileResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:ModifyProfileResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="createAccount">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:CreateAccountReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="createAccountResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:CreateAccountResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="forgotPassword">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:ForgotPasswordReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="forgotPasswordResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:ForgotPasswordResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="modifyPassword">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:ModifyPasswordReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="modifyPasswordResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:ModifyPasswordResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="setChatName">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:SetChatNameReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="setChatNameResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:SetChatNameResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="transferFunds">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:TransferFundsReq" />
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="transferFundsResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:TransferFundsResp" />
</xsd:sequence>
</xsd:complexType>
</xsd:element>
</xsd:schema>
</wsdl:types>
<wsdl:message name="loginIn">
<wsdl:part element="tns:login" name="parameters"/>
</wsdl:message>
<wsdl:message name="loginOut">
<wsdl:part element="tns:loginResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="retrieveLIMBMessageIn">
<wsdl:part element="tns:retrieveLIMBMessage" name="parameters"/>
</wsdl:message>
<wsdl:message name="retrieveLIMBMessageOut">
<wsdl:part element="tns:retrieveLIMBMessageResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="submitLIMBMessageIn">
<wsdl:part element="tns:submitLIMBMessage" name="parameters"/>
</wsdl:message>
<wsdl:message name="submitLIMBMessageOut">
<wsdl:part element="tns:submitLIMBMessageResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="logoutIn">
<wsdl:part element="tns:logout" name="parameters"/>
</wsdl:message>
<wsdl:message name="logoutOut">
<wsdl:part element="tns:logoutResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="keepAliveIn">
<wsdl:part element="tns:keepAlive" name="parameters"/>
</wsdl:message>
<wsdl:message name="keepAliveOut">
<wsdl:part element="tns:keepAliveResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getEventsIn">
<wsdl:part element="tns:getEvents" name="parameters"/>
</wsdl:message>
<wsdl:message name="getEventsOut">
<wsdl:part element="tns:getEventsResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getActiveEventTypesIn">
<wsdl:part element="tns:getActiveEventTypes" name="parameters"/>
</wsdl:message>
<wsdl:message name="getActiveEventTypesOut">
<wsdl:part element="tns:getActiveEventTypesResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getAllEventTypesIn">
<wsdl:part element="tns:getAllEventTypes" name="parameters"/>
</wsdl:message>
<wsdl:message name="getAllEventTypesOut">
<wsdl:part element="tns:getAllEventTypesResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getSubscriptionInfoIn">
<wsdl:part element="tns:getSubscriptionInfo" name="parameters"/>
</wsdl:message>
<wsdl:message name="getSubscriptionInfoOut">
<wsdl:part element="tns:getSubscriptionInfoResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="depositFromPaymentCardIn">
<wsdl:part element="tns:depositFromPaymentCard" name="parameters"/>
</wsdl:message>
<wsdl:message name="depositFromPaymentCardOut">
<wsdl:part element="tns:depositFromPaymentCardResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="addPaymentCardIn">
<wsdl:part element="tns:addPaymentCard" name="parameters"/>
</wsdl:message>
<wsdl:message name="addPaymentCardOut">
<wsdl:part element="tns:addPaymentCardResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="deletePaymentCardIn">
<wsdl:part element="tns:deletePaymentCard" name="parameters"/>
</wsdl:message>
<wsdl:message name="deletePaymentCardOut">
<wsdl:part element="tns:deletePaymentCardResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="updatePaymentCardIn">
<wsdl:part element="tns:updatePaymentCard" name="parameters"/>
</wsdl:message>
<wsdl:message name="updatePaymentCardOut">
<wsdl:part element="tns:updatePaymentCardResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getPaymentCardIn">
<wsdl:part element="tns:getPaymentCard" name="parameters"/>
</wsdl:message>
<wsdl:message name="getPaymentCardOut">
<wsdl:part element="tns:getPaymentCardResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="withdrawToPaymentCardIn">
<wsdl:part element="tns:withdrawToPaymentCard" name="parameters"/>
</wsdl:message>
<wsdl:message name="withdrawToPaymentCardOut">
<wsdl:part element="tns:withdrawToPaymentCardResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="selfExcludeIn">
<wsdl:part element="tns:selfExclude" name="parameters"/>
</wsdl:message>
<wsdl:message name="selfExcludeOut">
<wsdl:part element="tns:selfExcludeResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="convertCurrencyIn">
<wsdl:part element="tns:convertCurrency" name="parameters"/>
</wsdl:message>
<wsdl:message name="convertCurrencyOut">
<wsdl:part element="tns:convertCurrencyResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getAllCurrenciesIn">
<wsdl:part element="tns:getAllCurrencies" name="parameters"/>
</wsdl:message>
<wsdl:message name="getAllCurrenciesOut">
<wsdl:part element="tns:getAllCurrenciesResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getAllCurrenciesV2In">
<wsdl:part element="tns:getAllCurrenciesV2" name="parameters"/>
</wsdl:message>
<wsdl:message name="getAllCurrenciesV2Out">
<wsdl:part element="tns:getAllCurrenciesV2Response" name="parameters"/>
</wsdl:message>
<wsdl:message name="viewReferAndEarnIn">
<wsdl:part element="tns:viewReferAndEarn" name="parameters"/>
</wsdl:message>
<wsdl:message name="viewReferAndEarnOut">
<wsdl:part element="tns:viewReferAndEarnResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="viewProfileIn">
<wsdl:part element="tns:viewProfile" name="parameters"/>
</wsdl:message>
<wsdl:message name="viewProfileOut">
<wsdl:part element="tns:viewProfileResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="viewProfileV2In">
<wsdl:part element="tns:viewProfileV2" name="parameters"/>
</wsdl:message>
<wsdl:message name="viewProfileV2Out">
<wsdl:part element="tns:viewProfileV2Response" name="parameters"/>
</wsdl:message>
<wsdl:message name="modifyProfileIn">
<wsdl:part element="tns:modifyProfile" name="parameters"/>
</wsdl:message>
<wsdl:message name="modifyProfileOut">
<wsdl:part element="tns:modifyProfileResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="createAccountIn">
<wsdl:part element="tns:createAccount" name="parameters"/>
</wsdl:message>
<wsdl:message name="createAccountOut">
<wsdl:part element="tns:createAccountResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="forgotPasswordIn">
<wsdl:part name="parameters" element="tns:forgotPassword"/>
</wsdl:message>
<wsdl:message name="forgotPasswordOut">
<wsdl:part name="parameters" element="tns:forgotPasswordResponse"/>
</wsdl:message>
<wsdl:message name="modifyPasswordIn">
<wsdl:part name="parameters" element="tns:modifyPassword"/>
</wsdl:message>
<wsdl:message name="modifyPasswordOut">
<wsdl:part name="parameters" element="tns:modifyPasswordResponse"/>
</wsdl:message>
<wsdl:message name="withdrawByBankTransferIn">
<wsdl:part name="parameters" element="tns:withdrawByBankTransfer"/>
</wsdl:message>
<wsdl:message name="withdrawByBankTransferOut">
<wsdl:part name="parameters" element="tns:withdrawByBankTransferResponse"/>
</wsdl:message>
<wsdl:message name="setChatNameIn">
<wsdl:part name="parameters" element="tns:setChatName"/>
</wsdl:message>
<wsdl:message name="setChatNameOut">
<wsdl:part name="parameters" element="tns:setChatNameResponse"/>
</wsdl:message>
<wsdl:message name="transferFundsIn">
<wsdl:part name="parameters" element="tns:transferFunds"/>
</wsdl:message>
<wsdl:message name="transferFundsOut">
<wsdl:part name="parameters" element="tns:transferFundsResponse"/>
</wsdl:message>
<wsdl:portType name="BFGlobalService">
<wsdl:operation name="login">
<wsdl:input message="tns:loginIn" name="loginIn"/>
<wsdl:output message="tns:loginOut" name="loginOut"/>
</wsdl:operation>
<wsdl:operation name="retrieveLIMBMessage">
<wsdl:input message="tns:retrieveLIMBMessageIn" name="retrieveLIMBMessageIn"/>
<wsdl:output message="tns:retrieveLIMBMessageOut" name="retrieveLIMBMessageOut"/>
</wsdl:operation>
<wsdl:operation name="submitLIMBMessage">
<wsdl:input message="tns:submitLIMBMessageIn" name="submitLIMBMessageIn"/>
<wsdl:output message="tns:submitLIMBMessageOut" name="submitLIMBMessageOut"/>
</wsdl:operation>
<wsdl:operation name="logout">
<wsdl:input message="tns:logoutIn" name="logoutIn"/>
<wsdl:output message="tns:logoutOut" name="logoutOut"/>
</wsdl:operation>
<wsdl:operation name="keepAlive">
<wsdl:input message="tns:keepAliveIn" name="keepAliveIn"/>
<wsdl:output message="tns:keepAliveOut" name="keepAliveOut"/>
</wsdl:operation>
<wsdl:operation name="getEvents">
<wsdl:input message="tns:getEventsIn" name="getEventsIn"/>
<wsdl:output message="tns:getEventsOut" name="getEventsOut"/>
</wsdl:operation>
<wsdl:operation name="getActiveEventTypes">
<wsdl:input message="tns:getActiveEventTypesIn" name="getActiveEventTypesIn"/>
<wsdl:output message="tns:getActiveEventTypesOut" name="getActiveEventTypesOut"/>
</wsdl:operation>
<wsdl:operation name="getAllEventTypes">
<wsdl:input message="tns:getAllEventTypesIn" name="getAllEventTypesIn"/>
<wsdl:output message="tns:getAllEventTypesOut" name="getAllEventTypesOut"/>
</wsdl:operation>
<wsdl:operation name="getSubscriptionInfo">
<wsdl:input message="tns:getSubscriptionInfoIn" name="getSubscriptionInfoIn"/>
<wsdl:output message="tns:getSubscriptionInfoOut" name="getSubscriptionInfoOut"/>
</wsdl:operation>
<wsdl:operation name="depositFromPaymentCard">
<wsdl:input message="tns:depositFromPaymentCardIn" name="depositFromPaymentCardIn"/>
<wsdl:output message="tns:depositFromPaymentCardOut" name="depositFromPaymentCardOut"/>
</wsdl:operation>
<wsdl:operation name="addPaymentCard">
<wsdl:input message="tns:addPaymentCardIn" name="addPaymentCardIn"/>
<wsdl:output message="tns:addPaymentCardOut" name="addPaymentCardOut"/>
</wsdl:operation>
<wsdl:operation name="deletePaymentCard">
<wsdl:input message="tns:deletePaymentCardIn" name="deletePaymentCardIn"/>
<wsdl:output message="tns:deletePaymentCardOut" name="deletePaymentCardOut"/>
</wsdl:operation>
<wsdl:operation name="updatePaymentCard">
<wsdl:input message="tns:updatePaymentCardIn" name="updatePaymentCardIn"/>
<wsdl:output message="tns:updatePaymentCardOut" name="updatePaymentCardOut"/>
</wsdl:operation>
<wsdl:operation name="getPaymentCard">
<wsdl:input message="tns:getPaymentCardIn" name="getPaymentCardIn"/>
<wsdl:output message="tns:getPaymentCardOut" name="getPaymentCardOut"/>
</wsdl:operation>
<wsdl:operation name="withdrawToPaymentCard">
<wsdl:input message="tns:withdrawToPaymentCardIn" name="withdrawToPaymentCardIn"/>
<wsdl:output message="tns:withdrawToPaymentCardOut" name="withdrawToPaymentCardOut"/>
</wsdl:operation>
<wsdl:operation name="selfExclude">
<wsdl:input message="tns:selfExcludeIn" name="selfExcludeIn"/>
<wsdl:output message="tns:selfExcludeOut" name="selfExcludeOut"/>
</wsdl:operation>
<wsdl:operation name="convertCurrency">
<wsdl:input message="tns:convertCurrencyIn" name="convertCurrencyIn"/>
<wsdl:output message="tns:convertCurrencyOut" name="convertCurrencyOut"/>
</wsdl:operation>
<wsdl:operation name="getAllCurrencies">
<wsdl:input message="tns:getAllCurrenciesIn" name="getAllCurrenciesIn"/>
<wsdl:output message="tns:getAllCurrenciesOut" name="getAllCurrenciesOut"/>
</wsdl:operation>
<wsdl:operation name="getAllCurrenciesV2">
<wsdl:input message="tns:getAllCurrenciesV2In" name="getAllCurrenciesV2In"/>
<wsdl:output message="tns:getAllCurrenciesV2Out" name="getAllCurrenciesV2Out"/>
</wsdl:operation>
<wsdl:operation name="viewReferAndEarn">
<wsdl:input message="tns:viewReferAndEarnIn" name="viewReferAndEarnIn"/>
<wsdl:output message="tns:viewReferAndEarnOut" name="viewReferAndEarnOut"/>
</wsdl:operation>
<wsdl:operation name="viewProfile">
<wsdl:input message="tns:viewProfileIn" name="viewProfileIn"/>
<wsdl:output message="tns:viewProfileOut" name="viewProfileOut"/>
</wsdl:operation>
<wsdl:operation name="viewProfileV2">
<wsdl:input message="tns:viewProfileV2In" name="viewProfileV2In"/>
<wsdl:output message="tns:viewProfileV2Out" name="viewProfileV2Out"/>
</wsdl:operation>
<wsdl:operation name="modifyProfile">
<wsdl:input message="tns:modifyProfileIn" name="modifyProfileIn"/>
<wsdl:output message="tns:modifyProfileOut" name="modifyProfileOut"/>
</wsdl:operation>
<wsdl:operation name="createAccount">
<wsdl:input message="tns:createAccountIn" name="createAccountIn"/>
<wsdl:output message="tns:createAccountOut" name="createAccountOut"/>
</wsdl:operation>
<wsdl:operation name="forgotPassword">
<wsdl:input name="forgotPasswordIn" message="tns:forgotPasswordIn"/>
<wsdl:output name="forgotPasswordOut" message="tns:forgotPasswordOut"/>
</wsdl:operation>
<wsdl:operation name="modifyPassword">
<wsdl:input name="modifyPasswordIn" message="tns:modifyPasswordIn"/>
<wsdl:output name="modifyPasswordOut" message="tns:modifyPasswordOut"/>
</wsdl:operation>
<wsdl:operation name="withdrawByBankTransfer">
<wsdl:input name="withdrawByBankTransferIn" message="tns:withdrawByBankTransferIn"/>
<wsdl:output name="withdrawByBankTransferOut" message="tns:withdrawByBankTransferOut"/>
</wsdl:operation>
<wsdl:operation name="setChatName">
<wsdl:input name="setChatNameIn" message="tns:setChatNameIn"/>
<wsdl:output name="setChatNameOut" message="tns:setChatNameOut"/>
</wsdl:operation>
<wsdl:operation name="transferFunds">
<wsdl:input name="transferFundsIn" message="tns:transferFundsIn" />
<wsdl:output name="transferFundsOut" message="tns:transferFundsOut" />
</wsdl:operation>
</wsdl:portType>
<wsdl:binding name="BFGlobalService" type="tns:BFGlobalService">
<soap:binding style="document" transport="http://schemas.xmlsoap.org/soap/http"/>
<wsdl:operation name="login">
<soap:operation soapAction="login" style="document"/>
<wsdl:input name="loginIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="loginOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="retrieveLIMBMessage">
<soap:operation soapAction="retrieveLIMBMessage" style="document"/>
<wsdl:input name="retrieveLIMBMessageIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="retrieveLIMBMessageOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="submitLIMBMessage">
<soap:operation soapAction="submitLIMBMessage" style="document"/>
<wsdl:input name="submitLIMBMessageIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="submitLIMBMessageOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="logout">
<soap:operation soapAction="logout" style="document"/>
<wsdl:input name="logoutIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="logoutOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="keepAlive">
<soap:operation soapAction="keepAlive" style="document"/>
<wsdl:input name="keepAliveIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="keepAliveOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getEvents">
<soap:operation soapAction="getEvents" style="document"/>
<wsdl:input name="getEventsIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getEventsOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getActiveEventTypes">
<soap:operation soapAction="getActiveEventTypes" style="document"/>
<wsdl:input name="getActiveEventTypesIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getActiveEventTypesOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getAllEventTypes">
<soap:operation soapAction="getAllEventTypes" style="document"/>
<wsdl:input name="getAllEventTypesIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getAllEventTypesOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getSubscriptionInfo">
<soap:operation soapAction="getSubscriptionInfo" style="document"/>
<wsdl:input name="getSubscriptionInfoIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getSubscriptionInfoOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="depositFromPaymentCard">
<soap:operation soapAction="depositFromPaymentCard" style="document"/>
<wsdl:input name="depositFromPaymentCardIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="depositFromPaymentCardOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="addPaymentCard">
<soap:operation soapAction="addPaymentCard" style="document"/>
<wsdl:input name="addPaymentCardIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="addPaymentCardOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="deletePaymentCard">
<soap:operation soapAction="deletePaymentCard" style="document"/>
<wsdl:input name="deletePaymentCardIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="deletePaymentCardOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="updatePaymentCard">
<soap:operation soapAction="updatePaymentCard" style="document"/>
<wsdl:input name="updatePaymentCardIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="updatePaymentCardOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getPaymentCard">
<soap:operation soapAction="getPaymentCard" style="document"/>
<wsdl:input name="getPaymentCardIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getPaymentCardOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="withdrawToPaymentCard">
<soap:operation soapAction="withdrawToPaymentCard" style="document"/>
<wsdl:input name="withdrawToPaymentCardIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="withdrawToPaymentCardOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="selfExclude">
<soap:operation soapAction="selfExclude" style="document"/>
<wsdl:input name="selfExcludeIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="selfExcludeOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="convertCurrency">
<soap:operation soapAction="convertCurrency" style="document"/>
<wsdl:input name="convertCurrencyIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="convertCurrencyOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getAllCurrencies">
<soap:operation soapAction="getAllCurrencies" style="document"/>
<wsdl:input name="getAllCurrenciesIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getAllCurrenciesOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getAllCurrenciesV2">
<soap:operation soapAction="getAllCurrenciesV2" style="document"/>
<wsdl:input name="getAllCurrenciesV2In">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getAllCurrenciesV2Out">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="viewReferAndEarn">
<soap:operation soapAction="viewReferAndEarn" style="document"/>
<wsdl:input name="viewReferAndEarnIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="viewReferAndEarnOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="viewProfile">
<soap:operation soapAction="viewProfile" style="document"/>
<wsdl:input name="viewProfileIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="viewProfileOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="viewProfileV2">
<soap:operation soapAction="viewProfileV2" style="document"/>
<wsdl:input name="viewProfileV2In">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="viewProfileV2Out">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="modifyProfile">
<soap:operation soapAction="modifyProfile" style="document"/>
<wsdl:input name="modifyProfileIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="modifyProfileOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="createAccount">
<soap:operation soapAction="createAccount" style="document"/>
<wsdl:input name="createAccountIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="createAccountOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="forgotPassword">
<soap:operation soapAction="forgotPassword" style="document"/>
<wsdl:input name="forgotPasswordIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="forgotPasswordOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="modifyPassword">
<soap:operation soapAction="modifyPassword" style="document"/>
<wsdl:input name="modifyPasswordIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="modifyPasswordOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="withdrawByBankTransfer">
<soap:operation soapAction="withdrawByBankTransfer" style="document"/>
<wsdl:input name="withdrawByBankTransferIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="withdrawByBankTransferOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="setChatName">
<soap:operation soapAction="setChatName" style="document"/>
<wsdl:input name="setChatNameIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="setChatNameOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="transferFunds">
<soap:operation soapAction="transferFunds" style="document" />
<wsdl:input name="transferFundsIn">
<soap:body use="literal" />
</wsdl:input>
<wsdl:output name="transferFundsOut">
<soap:body use="literal" />
</wsdl:output>
</wsdl:operation>
</wsdl:binding>
<wsdl:service name="BFGlobalService">
<wsdl:port binding="tns:BFGlobalService" name="BFGlobalService">
<soap:address location="https://api.betfair.com/global/v3/BFGlobalService"/>
</wsdl:port>
</wsdl:service>
</wsdl:definitions>
'''
| mit | 5,771,604,429,762,916,000 | 44.386821 | 459 | 0.635334 | false | 3.808348 | false | false | false |
MartinSoto/Seamless | src/machine/cmds.py | 1 | 4144 | # Seamless DVD Player
# Copyright (C) 2004-2005 Martin Soto <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"""Command objects to control the playback pipeline.
Any virtual machine implementation must return intances of the classes
in this module."""
class PipelineCmd(object):
"""A generic command object.
Objects of this class, when invoked with a pipeline object as
parameter, call the method named by attribute `methodName` passing
it the paremeters received by the object constructor."""
__slots__ = ('args',
'keywords')
def __init__(self, *args, **keywords):
self.args = args
self.keywords = keywords
methodName = None
def __call__(self, pipeline):
getattr(pipeline, self.methodName)(*self.args, **self.keywords)
class DoNothing(PipelineCmd):
"""A do-nothing command object."""
__slots__ = ()
def __call__(self, pipeline):
pass
class PlayVobu(PipelineCmd):
"""When constructed with parameter list `(domain, titleNr,
sectorNr)`, play the VOBU corresponding to domain `domain`, title
number `titleNr`, and sector number `sectorNr`."""
__slots__ = ()
methodName = 'playVobu'
class CancelVobu(PipelineCmd):
"""When constructed without parameters, cancel the effect of the
last `PlayVobu` operation. A new `PlayVobu` must be sent
afterwards in order for the pipeline to be able to resume
playback."""
__slots__ = ()
methodName = 'cancelVobu'
# Since accepting the playback of a VOBU is the default, `acceptVobu`
# is equivalent to doing nothing.
class AcceptVobu(DoNothing):
pass
ASPECT_RATIO_4_3 = 10
ASPECT_RATIO_16_9 = 11
class SetAspectRatio(PipelineCmd):
"""When constructed with parameter list `(aspectRatio)`, set the
aspect ratio to the one specified. `aspectRatio` must be one of
the `ASPECT_RATIO` constants in this module."""
__slots__ = ()
methodName = 'setAspectRatio'
class SetAudio(PipelineCmd):
"""When constructed with parameter list `(phys)`, set the physical
audio stream to 'phys'."""
__slots__ = ()
methodName = 'setAudio'
class SetSubpicture(PipelineCmd):
"""When constructed with parameter list `(phys, hide)`, set the
physical subpicture stream to `phys` and hide it if `hide` is
`True`."""
__slots__ = ()
methodName = 'setSubpicture'
class SetSubpictureClut(PipelineCmd):
"""When constructed with parameter list `(clut)`, set the
subpicture color lookup table to 'clut''clut' is a 16-position
array."""
__slots__ = ()
methodName = 'setSubpictureClut'
class Highlight(PipelineCmd):
"""When constructed with parameter list `(area, button, palette)`,
highlight the specified area, corresponding to the specified
button number and using the specified palette."""
__slots__ = ()
methodName = 'highlight'
class ResetHighlight(PipelineCmd):
"""When constructed without parameters, clear (reset) the highlighted
area."""
__slots__ = ()
methodName = 'resetHighlight'
class StillFrame(PipelineCmd):
"""When constructed without parameter list `(seconds)', tell the
pipeline that a still frame was sent and should be displayed for
the specified number of seconds. If `seconds` is None the still
frame should remain displayed until an external event, like user
interaction, cancels it."""
__slots__ = ()
methodName = 'stillFrame'
| gpl-2.0 | 2,746,791,735,604,076,000 | 30.876923 | 73 | 0.694015 | false | 4.13986 | false | false | false |
drjod/tUNIX | qb/qbItem.py | 1 | 3929 | #!/usr/bin/python
########
#
# qb class item by JOD
# level 3
#
class item: # level3
def __init__(self, local_ndx):
self._local_ndx = local_ndx
# file stream
def write(self, file):
for i in range (0, len(self._nodesNumber)):
file.write(" " + str(self._nodesNumber[i]))
class line(item):
_nodesNumber = [-1,-1]
def __init__(self, local_ndx):
nodesNumber = [-1,-1]
self._nodesNumber = nodesNumber
item.__init__(self, local_ndx)
# content
def fill(self, cubeNodesNumber):
for i in range (0, len(self._nodesNumber)):
self._nodesNumber[i] = int(cubeNodesNumber[i])
class tri(item):
_nodesNumber = [-1,-1,-1]
def __init__(self, local_ndx):
nodesNumber = [-1,-1,-1]
self._nodesNumber = nodesNumber
item.__init__(self, local_ndx)
# content
def fill(self, cubeNodesNumber):
if(self._local_ndx == 0):
self._nodesNumber[0] = int(cubeNodesNumber[0])
self._nodesNumber[1] = int(cubeNodesNumber[1])
self._nodesNumber[2] = int(cubeNodesNumber[3])
else:
self._nodesNumber[0] = int(cubeNodesNumber[1])
self._nodesNumber[1] = int(cubeNodesNumber[2])
self._nodesNumber[2] = int(cubeNodesNumber[3])
class quad(item):
_nodesNumber = [-1,-1,-1,-1]
def __init__(self, local_ndx):
nodesNumber = [-1,-1,-1,-1]
self._nodesNumber = nodesNumber
item.__init__(self, local_ndx)
# content
def fill(self, cubeNodesNumber):
for i in range (0, len(self._nodesNumber)):
self._nodesNumber[i] = int(cubeNodesNumber[i])
class pris(item):
_nodesNumber = [-1,-1,-1,-1,-1,-1]
def __init__(self, local_ndx):
nodesNumber = [-1,-1,-1,-1,-1,-1]
self._nodesNumber = nodesNumber
item.__init__(self, local_ndx)
# content
def fill(self, cubeNodesNumber):
if(self._local_ndx == 0):
self._nodesNumber[0] = int(cubeNodesNumber[0])
self._nodesNumber[1] = int(cubeNodesNumber[1])
self._nodesNumber[2] = int(cubeNodesNumber[3])
self._nodesNumber[3] = int(cubeNodesNumber[4])
self._nodesNumber[4] = int(cubeNodesNumber[5])
self._nodesNumber[5] = int(cubeNodesNumber[7])
else:
self._nodesNumber[0] = int(cubeNodesNumber[1])
self._nodesNumber[1] = int(cubeNodesNumber[2])
self._nodesNumber[2] = int(cubeNodesNumber[3])
self._nodesNumber[3] = int(cubeNodesNumber[4])
self._nodesNumber[4] = int(cubeNodesNumber[5])
self._nodesNumber[5] = int(cubeNodesNumber[6])
class tet(item):
_nodesNumber = [-1,-1,-1,-1]
def __init__(self, local_ndx):
nodesNumber = [-1,-1,-1,-1]
self._nodesNumber = nodesNumber
item.__init__(self, local_ndx)
# content
def fill(self, cubeNodesNumber):
pass
class hexa(item):
_nodesNumber = [-1,-1,-1,-1,-1,-1,-1,-1]
def __init__(self, local_ndx):
nodesNumber = [-1,-1,-1,-1,-1,-1,-1,-1]
self._nodesNumber = nodesNumber
item.__init__(self, local_ndx)
# content
def fill(self, cubeNodesNumber):
for i in range (0, len(self._nodesNumber)):
self._nodesNumber[i] = int(cubeNodesNumber[i])
| gpl-3.0 | 4,477,364,799,453,593,000 | 35.719626 | 71 | 0.480784 | false | 3.70311 | false | false | false |
blancltd/blanc-contentfiles | tests/test_storage.py | 1 | 3514 | import datetime
from unittest import mock
from urllib import parse
from django.test import TestCase, override_settings
from contentfiles.storage import MediaStorage, RemotePrivateStorage
class TestMediaStorage(TestCase):
def test_url(self):
storage = MediaStorage()
url = storage.url("test.txt")
self.assertEqual(url, "https://demo.contentfiles.net/media/test.txt")
def test_unicode_url(self):
storage = MediaStorage()
url = storage.url("Paris+&+Orléans.jpg")
self.assertEqual(url, "https://demo.contentfiles.net/media/Paris%2B%26%2BOrl%C3%A9ans.jpg")
@override_settings(CONTENTFILES_SSL=False)
def test_http_url(self):
storage = MediaStorage()
url = storage.url("test.txt")
self.assertEqual(url, "http://demo.contentfiles.net/media/test.txt")
@override_settings(CONTENTFILES_HOSTNAME="media.example.org")
def test_custom_hostname(self):
storage = MediaStorage()
url = storage.url("test.txt")
self.assertEqual(url, "https://media.example.org/media/test.txt")
@mock.patch("botocore.auth.HmacV1QueryAuth._get_date")
def test_private_storage(self, mock_get_date):
mock_get_date.return_value = "1234567890"
storage = RemotePrivateStorage()
storage.access_key = "AKIA1234567890ABCDEF"
storage.secret_key = "1234567890123456789012345678901234567890"
storage.bucket_name = "demo-bucket"
url = storage.url("test.txt")
parsed_url = parse.urlparse(url)
url_querystring = parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, "https")
self.assertEqual(parsed_url.netloc, "demo-bucket.s3.amazonaws.com")
self.assertEqual(parsed_url.path, "/demo/test.txt")
self.assertDictEqual(
url_querystring,
{
"AWSAccessKeyId": ["AKIA1234567890ABCDEF"],
"Signature": ["nolnfqXilquat3YAccmhEyNk/IU="],
"Expires": ["1234567890"],
},
)
@override_settings(
CONTENTFILES_S3_REGION="eu-west-2",
CONTENTFILES_S3_ENDPOINT_URL="https://s3.dualstack.eu-west-2.amazonaws.com",
)
@mock.patch("botocore.auth.datetime")
def test_private_storage_aws4(self, mock_datetime):
mock_datetime.datetime.utcnow.return_value = datetime.datetime(2020, 1, 1, 12, 34, 56, 0)
storage = RemotePrivateStorage()
storage.access_key = "AKIA1234567890ABCDEF"
storage.secret_key = "1234567890123456789012345678901234567890"
storage.bucket_name = "demo-bucket"
url = storage.url("test.txt")
parsed_url = parse.urlparse(url)
url_querystring = parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, "https")
self.assertEqual(parsed_url.netloc, "demo-bucket.s3.dualstack.eu-west-2.amazonaws.com")
self.assertEqual(parsed_url.path, "/demo/test.txt")
self.assertDictEqual(
url_querystring,
{
"X-Amz-Algorithm": ["AWS4-HMAC-SHA256"],
"X-Amz-Credential": ["AKIA1234567890ABCDEF/20200101/eu-west-2/s3/aws4_request"],
"X-Amz-Date": ["20200101T123456Z"],
"X-Amz-Expires": ["300"],
"X-Amz-Signature": [
"be39d90daf58c495bde25a607e20dbf2f75f4d01358a5bc93911a2733bd3da21"
],
"X-Amz-SignedHeaders": ["host"],
},
)
| bsd-3-clause | -8,740,605,519,531,256,000 | 34.13 | 99 | 0.628807 | false | 3.58104 | true | false | false |
RosemaryAntimony/detweeter | detweet.py | 1 | 3040 | """Make a detweet module."""
import imaginations as imags
import markovify
import tweepy
import tweet_reader as tr
def detweet(tweeter):
"""Detweet at someone."""
keys = tr.key_access()
auth = tweepy.OAuthHandler(keys['consumer_key'], keys['consumer_secret'])
auth.set_access_token(keys['access_token'], keys['access_token_secret'])
api = tweepy.API(auth)
print(" authorization: complete")
profile = api.get_user(tweeter)
pic_add = profile.profile_image_url
pic = imags.get_twitpic(pic_add.replace("_normal", ""), tweeter)
pix = pic.load()
path = "./twits/imgs/{}/".format(tweeter)
print(" image: acquired")
data = tr.twit_reader(tweeter)
print(" tweets: mcgotten")
# print(" {} data".format(len(data)))
with open("./twits/corpses/{}_corpus.txt".format(tweeter), "w") as fp:
print(" corpse: opened")
for ii in xrange(len(data)):
for jj in xrange(len(data[ii]) if len(data[ii]) < 5000 else 5000):
# print("{}".format(data[ii][jj]["text"]))
twit = data[ii][jj]["text"] + "\n"
fp.write(twit.encode("utf-8"))
# if ii > len(data):
# break
print(" corpse: ready")
with open("./twits/corpses/{}_corpus.txt".format(tweeter), 'r') as cf:
corpus = cf.read()
model = markovify.Text(corpus, state_size=2)
print(" model: super")
tweet_len = 138 - len(tweeter)
sentence = ''
temp_sentence = model.make_short_sentence(tweet_len)
try:
sentence += '{} '.format(temp_sentence)
tweet_len -= len(temp_sentence)
except Exception:
sentence = model.make_sentence()
while len(sentence) < 100:
temp_sentence = model.make_short_sentence(tweet_len)
if temp_sentence is not None:
sentence += temp_sentence + ' '
tweet_len -= len(temp_sentence) + 1
print(" sentence: commuted")
sentence += "#{}".format(tweeter)
pic_name = "{}_dtm.jpg".format(tweeter)
xx = pic.size[0]
yy = pic.size[1]
temp_sentence = model.make_short_sentence(72)
while temp_sentence is None:
temp_sentence = model.make_short_sentence(72)
while len(temp_sentence) < 46:
temp_temp = model.make_short_sentence(60)
if temp_temp is not None:
temp_sentence += " " + temp_temp
l1 = temp_sentence[0:16]
l2 = temp_sentence[16:32]
l3 = temp_sentence[32:48]
imags.xorror(pix, xx, yy, sentence)
imags.shapes(pix, xx, yy, sentence,
(ord(sentence[66]) / 8) % len(sentence) + 1)
imags.wordler(pic, l1, l2, l3)
imags.scoots(pix, xx, yy, sentence)
imags.xorror(pix, xx, yy, sentence)
pic.save(path + pic_name)
print(" glitch: art")
sentence = sentence.replace("@", "#")
sentence = sentence.replace("&", "&")
sentence = sentence.encode('utf-8')
print (" " + sentence)
try:
api.update_with_media(path + pic_name, sentence)
except Exception:
"you're banned, idiot."
| gpl-3.0 | 6,931,932,395,870,149,000 | 35.190476 | 78 | 0.590789 | false | 3.258307 | false | false | false |
julython/julython.org | july/people/badges.py | 1 | 8355 | """
---------------
Julython Badges
---------------
This is where all the logic that drives the awarding of badges.
Badges consist of a counter, metric and a badge meta info. The
badge meta data defines the look of the badge the color, text,
icon and popup text to display.
The counters and badge awards are stored in a large json blob for
each user. When either a new commit for the user is added or the
user profile is displayed the counters are updated. After the counters
are updated the badges are iterated over to see if a new one was
added or if the user completed another badge.
Counters
---------
* Game(year) commit count, the count of the current game.
* Total commit count, the overall number of commits
* Game(year) language set, set of languages in the current game.
* Total language set, set of languages over all games.
Badge Example
-------------
Here is a sample badge::
class HundredCommits(Badge):
counter = 'total_commits'
target = 100
badge_class = 'fa-trophy expert'
badge_text = '100+ Commits'
badge_popup = 'One hundredth Commit'
show_progress = True
Example badge json blob::
{
'total_commits': 1232,
'total_projects': 34,
'game_commits': 120,
'game_days': 20,
'_current_comment': "current badges are calculated everytime",
'badges': [
{
'title': 'Committed',
'subtitle': '100+ Commits',
'count': 200,
'total': 100,
'awarded': true,
'icon': "fa-trophy",
'level': "novice"
}
],
'_archived_comment': "This is the list of previous game awards",
'archived_badges': [
{
'title': 'Committed',
'badge_popup': '100+ Commits in Julython 2012'
'count': 200,
'total': 100,
'awarded': true,
'icon': "fa-trophy",
'level': "novice"
}
]
}
Badge Levels
------------
There are currently 5 levels which are differnent colored icons.
* novice
* journeyman
* expert
* rockstar
"""
import re
from django.core.cache import cache
from july.game.models import Game
from july.people.models import UserBadge, Commit
# TODO(rmyers): copied from django 1.7 remove after we update to it
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def camel_case_to_dashes(value):
return re_camel_case.sub(r' \1', value).strip().lower().replace(' ', '_')
class Badge(object):
"""Base badge class"""
counter = None
total = 0
icon = None
title = ""
subtitle = ""
level = ""
def __init__(self, user_data):
self.user_data = user_data
self.count = self.user_data.get(self.counter)
@property
def awarded(self):
return self.count >= self.total
def to_dict(self):
return {
'title': self.title,
'subtitle': self.subtitle,
'icon': self.icon,
'total': self.total,
'count': self.count,
'level': self.level,
'awarded': self.awarded,
}
class Counter(object):
"""Base Counter Class"""
query = None
metric = None
def __init__(self, user, game=None):
self.user = user
self.game = game
@property
def name(self):
return camel_case_to_dashes(self.__class__.__name__)
@property
def cache_key(self):
return '%s-%s' % (self.name, self.user.pk)
def update(self, user_data):
"Update the user json with the count from the query"
cached = cache.get(self.cache_key)
if cached:
count_dict = cached
else:
count_dict = self.run_query()
cache.set(self.cache_key, count_dict, timeout=300)
user_data.update(count_dict)
def run_query(self):
"""Return the count for this query."""
q = getattr(self.user, self.query)
return {self.name: q.count()}
class GameCounter(Counter):
"""Counter for Game Related Counts
This provides a number of counters for a single game.
* game_commits (total number of commits in the game)
* game_days (number of days in the game the user committed)
"""
metric = 'game'
def run_query(self):
if self.game is None:
self.game = Game.active_or_latest()
# Commit.calender returns a list of objects for each day a user has
# commited along with the count during the day. So we can use this
# query to get the total and the number of days.
resp = Commit.calendar(self.game, user=self.user)
objects = resp['objects']
total = 0
for obj in objects:
total += obj.get('commit_count', 0)
return {
'game_commits': total,
'game_days': len(objects)
}
class TotalCommits(Counter):
query = 'commit_set'
metric = 'commits'
class TotalProjects(Counter):
query = 'projects'
metric = 'projects'
class FirstCommit(Badge):
counter = 'total_commits'
title = 'Welcome Aboard'
subtitle = 'Thanks for Joining'
total = 1
icon = "fa-heart"
level = "novice"
class TenCommits(Badge):
counter = 'game_commits'
title = 'A Healthy Start'
subtitle = '10+ Commits'
total = 10
icon = "fa-plus-circle"
level = "novice"
class ThirtyCommits(Badge):
counter = 'game_commits'
title = '1-a-Day Average'
subtitle = '31+ Commits'
total = 31
icon = "fa-plus-circle"
level = "journeyman"
class HundredCommits(Badge):
counter = 'game_commits'
title = 'Outstanding Commitment'
subtitle = '100+ Commits'
total = 100
icon = "fa-plus-circle"
level = "expert"
class ThousandCommits(Badge):
counter = 'game_commits'
title = 'Do You Sleep at All?'
subtitle = '1000+ Commits'
total = 1000
icon = "fa-plus-circle"
level = "rockstar"
class FiveProjects(Badge):
counter = 'total_projects'
title = 'Thanks for Sharing'
subtitle = '5+ Projects'
total = 5
icon = "fa-folder-o"
level = "novice"
class TenProjects(Badge):
counter = 'total_projects'
title = 'Nice Project List'
subtitle = '10+ Projects'
total = 10
icon = "fa-folder-o"
level = "journeyman"
class FiftyProjects(Badge):
counter = 'total_projects'
title = 'You Love Sharing'
subtitle = '50+ Projects'
total = 50
icon = "fa-folder-o"
level = "expert"
class HundredProjects(Badge):
counter = 'total_projects'
title = 'Wow just wow'
subtitle = '100+ Projects'
total = 100
icon = "fa-folder-o"
level = "rockstar"
class PlayedTheGame(Badge):
counter = 'game_commits'
title = 'Played in 2014'
subtitle = 'Everyone deserves a trophy!'
total = 1
icon = "fa-trophy"
level = "novice"
class OneWeekStreak(Badge):
counter = 'game_days'
title = 'Good Start'
subtitle = '7+ days'
total = 7
icon = 'fa-trophy'
level = "journeyman"
class TwoWeekStreak(Badge):
counter = 'game_days'
title = 'Keep it going'
subtitle = '14+ days'
total = 14
icon = 'fa-trophy'
level = "expert"
class EveryDay(Badge):
counter = 'game_days'
title = 'Excellent Commitment'
subtitle = 'Committing Everyday'
total = 31
icon = 'fa-trophy'
level = "rockstar"
BADGES = [
FirstCommit,
TenCommits,
ThirtyCommits,
HundredCommits,
ThousandCommits,
FiveProjects,
TenProjects,
FiftyProjects,
HundredProjects,
PlayedTheGame,
OneWeekStreak,
TwoWeekStreak,
EveryDay,
]
COUNTERS = [
GameCounter,
TotalCommits,
TotalProjects,
]
def update_user(user, game=None):
user_badge, created = UserBadge.objects.get_or_create(user=user)
user_data = user_badge.badges or {}
# Update all the counts in user_dict
for counter in COUNTERS:
c = counter(user, game=None)
c.update(user_data)
user_badges = []
for badge in BADGES:
b = badge(user_data)
user_badges.append(b.to_dict())
user_data['badges'] = user_badges
user_badge.badges = user_data
user_badge.save()
return user_data
| mit | -798,118,022,166,656,500 | 22.535211 | 77 | 0.589707 | false | 3.516414 | false | false | false |
spamwax/goimports-sublime-text-3 | GoImports.py | 1 | 3071 | """
Sublime Text 3 plugin to update list of packages imported
in a Go (golang) source file (scope: source.go) using 'goimports'
(http://github.com/bradfitz/goimports)
Author: Hamid Ghadyani
URL: https://github.com/spamwax/goimports-sublime-text-3
"""
import sublime
import sublime_plugin
import os
import subprocess
import codecs
import tempfile
PLUGIN_FOLDER = os.path.dirname(os.path.realpath(__file__))
SETTINGS_FILE = "GoImports.sublime-settings"
SETTINGS_FILE = "GoImports.sublime-settings"
def plugin_loaded():
global s
s = sublime.load_settings(SETTINGS_FILE)
class GoImportsException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GoimportsrunCommand(sublime_plugin.TextCommand):
def run(self, edit):
global s
# check the scope and run only if view is a source.go
scope = self.view.scope_name(0).split(' ')
go_scope = False
for _v in scope:
if "source.go" in _v:
go_scope = True
break
if not go_scope:
return
# Get the path to goimports binary.
# you can install using:
# $ go get -u golang.org/x/tools/cmd/goimports
goimports_cmd = s.get("goimports_bin")
# Save current text into a buffer that we can pass as stdin to goimports
buf = buffer_text(self.view)
try:
# Run the 'goimports' command
cur_dir = os.path.dirname(self.view.file_name())
r = subprocess.Popen(goimports_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True,
cwd=cur_dir, stderr=subprocess.PIPE).communicate(input=buf)
if len(r[1]) != 0:
raise GoImportsException(r[1])
newtext = r[0].decode("utf-8")
if self.view.settings().get("ensure_newline_at_eof_on_save"):
if not newtext.endswith("\n"):
newtext += "\n"
# replace the content of the whole file
selection = sublime.Region(0, self.view.size())
self.view.replace(edit, selection, newtext)
except Exception:
import sys
exc = sys.exc_info()[1]
sublime.status_message(str(exc))
class OpenGoimportsSublimeSettings(sublime_plugin.TextCommand):
"""docstring for OpenGoimportsSublimeSettings"""
def run(self, edit):
open_goimports_sublime_settings(self.view.window())
class Goimportsrun(sublime_plugin.EventListener):
"""Will be executed just before saving"""
def on_pre_save(self, view):
if s.get("goimports_enabled",
view.settings().get("goimports_enabled", True)):
view.run_command("goimportsrun")
def buffer_text(view):
file_text = sublime.Region(0, view.size())
return view.substr(file_text).encode('utf-8')
def open_goimports_sublime_settings(window):
fn = os.path.join(PLUGIN_FOLDER, SETTINGS_FILE)
window.open_file(fn)
| mit | 3,636,342,935,638,052,000 | 30.020202 | 106 | 0.617714 | false | 3.686675 | false | false | false |
kawadia/estrangement | EstrangementDemo.py | 1 | 4689 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script demonstrating the use of the estrangement library to detect and
visualize temporal communities.
"""
__author__ = """\n""".join(['Vikas Kawadia ([email protected])',
'Sameet Sreenivasan <[email protected]>',
'Stephen Dabideen <[email protected]>'])
# Copyright (C) 2012 by
# Vikas Kawadia <[email protected]>
# Sameet Sreenivasan <[email protected]>
# Stephen Dabideen <[email protected]>
# All rights reserved.
import sys
import os
from Estrangement import estrangement
from Estrangement import plots
from Estrangement import options_parser
import multiprocessing
def detect_and_plot_temporal_communities():
""" Function to run simulations, based on a specified dataset, and created
tiled plots of the temporal communities.
Parameters can be specified at the command line, when calling this script.
Alternatively, a config file specifed at the command line can be used to set
the parameter. At the very minimum, a path to the data set must be specified.
Each experiment requires a name, which is used to create a folder to store the
results of the simulation. If the results already exist in the folder specified
by the experiment name, plots are created using these existing results and the
simulation is not run on subsequent calls to EstrangementDemo.py.
To run the simulation again, delete the experiment folder before running this script,
or use a different experiment name.
Examples
--------
>>> # To see all configuarable parameters use the -h option
>>> EstrangementDemo.py -h
>>> # Configurable parameters can be specified at the command line
>>> EstrangementDemo.py --dataset_dir ./data --display_on True --exp_name my_experiment
>>> # A config file can be used, but it must be preceeded by an '@'
>>> # Three config files are provided as examples, check that that path to the dataset is valid.
>>> EstrangementDemo.py @senate.conf
>>> EstrangementDemo.py @markovian.conf
>>> EstrangementDemo.py @realitymining.conf
"""
# use argparse to parse command-line arguments using optionsadder.py
opt = options_parser.parse_args()
# A dir is created, specified by the --exp_name argument in
# the current working directory to place all output from the experiment
if(not os.path.exists(opt.exp_name)):
os.mkdir(opt.exp_name)
expdir = os.path.abspath(opt.exp_name)
# set the values of delta to find communities for
deltas = opt.delta
datadir = os.path.abspath(opt.dataset_dir)
# we use the multiprocessing module to run computations for the different
# deltas in parallel.
process_dict = {}
for d in deltas:
output_dir = os.path.join(expdir, "task_delta_" + str(d))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
results_filename = os.path.join(output_dir, "matched_labels.log")
if not os.path.exists(results_filename):
print("Detecting temporal communities for delta=%s"%d)
kwargs={'dataset_dir' : datadir,
'delta' : d,
'results_filename' : results_filename,
'minrepeats' : opt.minrepeats,
'increpeats' : opt.increpeats,
'write_stats': True,
}
os.chdir(output_dir)
process_dict[d] = multiprocessing.Process(target = estrangement.ECA, kwargs = kwargs)
process_dict[d].start()
else:
print("Seems like communities have already been computed for delta=%f; to recompute del dir %s"
%(d, output_dir))
for k in process_dict.keys():
process_dict[k].join()
print("\nDone computing all temporal communities, now producing some visualizations")
# dictionary to pass the output to the plot function
matched_labels_dict = {}
for d in deltas:
results_filename = os.path.join(expdir, "task_delta_" + str(d), "matched_labels.log")
with open(results_filename, 'r') as fr:
result = eval(fr.read())
matched_labels_dict[d] = result
os.chdir(expdir)
# plot the temporal communities
plots.plot_temporal_communities(matched_labels_dict)
os.chdir('..')
# to plot other parameters, set write_stats=True in estrangement.ECA()
# and use plots.plot_function(). For example,
# estrangement.plots.plot_function(['Estrangement'])
if __name__ == "__main__":
detect_and_plot_temporal_communities()
| bsd-3-clause | -4,231,162,085,789,204,000 | 39.076923 | 108 | 0.648113 | false | 3.846596 | false | false | false |
devlights/try-python | trypython/extlib/forwindows/pyautogui01.py | 1 | 1229 | # coding: utf-8
"""
pyautogui モジュールのサンプルです。
マウスの移動について
"""
import pyautogui as autogui
from trypython.common.commoncls import SampleBase
class Sample(SampleBase):
def exec(self):
# ---------------------------------------------------------------------
# pyautogui モジュールは、GUIオートメーションをpythonで行うためのモジュール
# ---------------------------------------------------------------------
# http://pyautogui.readthedocs.io/en/latest/cheatsheet.html
# ---------------------------------------------------------------------
# size() で メインモニタのスクリーンサイズを、moveTo() で指定位置にマウスカーソル
# の移動が行える。
# ---------------------------------------------------------------------
screen_width, screen_height = autogui.size()
autogui.moveTo(100, 100, duration=2)
autogui.moveTo(screen_width - 100, 100, duration=1)
autogui.moveTo(screen_width - 100, screen_height - 100, duration=1)
autogui.moveTo(100, screen_height - 100, duration=2)
def go():
obj = Sample()
obj.exec()
| mit | 8,430,466,980,223,815,000 | 32.903226 | 79 | 0.469077 | false | 3.028818 | false | false | false |
airtonix/django-server-status | server_status/contrib/filesystem/status_report.py | 1 | 2437 | from time import sleep
from datetime import datetime, timedelta
from django.utils.translation import ugettext_lazy as _
from django.contrib.webdesign import lorem_ipsum
from server_status.conf import settings
from server_status.base import BaseServerStatusPlugin
from server_status.registry import plugins
from server_status import exceptions
class BaseFileSystemTest(BaseServerStatusPlugin):
_name = "Filesystems"
_group = "Storage"
storage = None
filename_pattern = 'health_check_storage_test/test-{}-{}.txt'
def get_storage(self):
if isinstance(self.storage, basestring):
return get_storage_class(self.storage)()
else:
return self.storage
def get_file_name(self):
return self.filename_pattern.format(datetime.datetime.now(),
random.randint(10000, 99999))
def get_file_content(self):
# select 64 random lorem lipsum words.
return lorem_lipsum.words(64)
def check_status(self):
try:
# write the file to the storage backend
storage = self.get_storage()
file_name = self.get_file_name()
file_content = self.get_file_content()
# save the file
file_name = storage.save(
file_name, ContentFile(content=file_content))
# read the file and compare
f = storage.open(file_name)
if not storage.exists(file_name):
raise exceptions.ServiceUnavailable(
code="error",
description=_("Filesystem is currently in a readonly state."))
if not f.read() == file_content:
raise exceptions.ServiceUnavailable(
code="error",
message=_("Filesystem is content doesn't match"))
# delete the file and make sure it is gone
storage.delete(file_name)
if storage.exists(file_name):
raise exceptions.ServiceUnavailable(
code="error",
message=_("File was not deleted"))
return True
except Exception:
raise exceptions.ServiceUnstable(
code="error",
message="unknown problems")
@plugins.register
class DefaultFileStorageHealthCheck(BaseFileSystemTest):
storage = settings.DEFAULT_FILE_STORAGE
| mit | 3,975,088,700,019,809,300 | 32.383562 | 82 | 0.601559 | false | 4.741245 | false | false | false |
CristianCantoro/thes2loc | thes2lcsh.py | 1 | 1836 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Autore: Cristian Consonni <[email protected]>
# Inspired by this gist by atomotic:
# https://gist.github.com/atomotic/7229203
#
# The code is released with an MIT license
# please see the LICENSE file for details.
import sys
import csv
import requests
import urlparse
import StringIO
import pickle
from produce_enwiki_titles import PICKLE_FILE
FIELDNAMES_WIKIMAP = ('LC_head', 'relation', 'enwiki')
FIELDNAMES_THES2LSCH = ('thes_id', 'relation', 'lc_head_id', 'wikidata')
LOCH_BASEURL = 'http://id.loc.gov/authorities/label/'
OUTFILE = 'thes2lcsh.map'
with open(PICKLE_FILE, 'r') as infile:
enwiki_titles = pickle.load(infile)
f = StringIO.StringIO(sys.argv[1])
csvin = csv.DictReader(
filter(lambda row: row[0]!='#', f),
FIELDNAMES_WIKIMAP,
delimiter='|'
)
wikimap = [line for line in csvin]
if len(wikimap) == 1:
line = wikimap[0]
print "Process line: ", line
elif len(wikimap) == 0:
print "Discard comments or empty lines", wikimap
exit(0)
else:
print "Error! Line too long: ", wikimap
exit(-1)
finalout = open(OUTFILE, 'a+')
writer = csv.DictWriter(finalout, FIELDNAMES_THES2LSCH)
enwiki = line['enwiki']
if enwiki in enwiki_titles:
resolv = enwiki_titles[enwiki]
req = requests.get(LOCH_BASEURL+line['LC_head'])
if req.ok:
urlpath = urlparse.urlparse(req.url).path.split('/')[-1]
lc_head_no = urlpath.replace('.html', '')
fields = (resolv['thes_id'].strip().strip('"'),
line['relation'],
lc_head_no,
resolv['wikidata']
)
diz =dict(zip(FIELDNAMES_THES2LSCH, fields))
print "Writing: ", diz
writer.writerow(diz)
else:
print "Error with request: ", line
finalout.close()
| mit | -3,876,432,197,401,447,400 | 23.157895 | 72 | 0.635076 | false | 3.024712 | false | false | false |
uwescience/myria | python/MyriaPythonWorker.py | 1 | 6820 | from __future__ import print_function
import os
import sys
import socket
import traceback
import struct
import cPickle
import base64
class SpecialLengths(object):
PYTHON_EXCEPTION_THROWN = -3
END_OF_STREAM = -4
NULL = -5
class DataType(object):
INT = 1
LONG = 2
FLOAT = 3
DOUBLE = 4
BLOB = 5
EXCEPTION = 6
class Serializer(object):
@staticmethod
def read_long(stream):
obj = stream.read(8)
if not obj:
raise EOFError
return struct.unpack("!q", obj)[0]
@staticmethod
def read_float(stream):
obj = stream.read(4)
if not obj:
raise EOFError
return struct.unpack("!f", obj)[0]
@staticmethod
def read_double(stream):
obj = stream.read(8)
if not obj:
raise EOFError
return struct.unpack("!d", obj)[0]
@staticmethod
def read_int(stream):
obj = stream.read(4)
if not obj:
raise EOFError
return struct.unpack("!i", obj)[0]
@staticmethod
def write_int(value, stream):
stream.write(struct.pack("!i", value))
@staticmethod
def write_float(value, stream):
stream.write(struct.pack("!f", value))
@staticmethod
def write_double(value, stream):
stream.write(struct.pack("!d", value))
@staticmethod
def write_long(value, stream):
stream.write(struct.pack("!q", value))
class PickleSerializer(Serializer):
@classmethod
def read_item(cls, stream, item_type, length):
obj = None
if item_type == DataType.INT:
obj = cls.read_int(stream)
elif item_type == DataType.LONG:
obj = cls.read_long(stream)
elif item_type == DataType.FLOAT:
obj = cls.read_float(stream)
elif item_type == DataType.DOUBLE:
obj = cls.read_double(stream)
elif item_type == DataType.BLOB:
obj = cls.loads(stream.read(length))
return obj
@classmethod
def read_tuple(cls, stream, tuplesize):
datalist = []
for _ in range(tuplesize):
# first element read type
element_type = cls.read_int(stream)
# Second read the length
length = cls.read_int(stream)
if length == SpecialLengths.NULL or length == 0:
datalist.append(0)
# length is > 0, read the item now
elif length > 0:
obj = cls.read_item(stream, element_type, length)
datalist.append(obj)
else:
raise ValueError("Invalid length for item.")
return datalist
@classmethod
def write_with_length(cls, obj, stream, output_type):
if output_type == DataType.INT:
cls.write_int(DataType.INT, stream)
cls.write_int(obj, stream)
elif output_type == DataType.LONG:
cls.write_int(DataType.LONG, stream)
cls.write_long(obj, stream)
elif output_type == DataType.FLOAT:
cls.write_int(DataType.FLOAT, stream)
cls.write_float(obj, stream)
elif output_type == DataType.DOUBLE:
cls.write_int(DataType.DOUBLE, stream)
cls.write_double(obj, stream)
elif output_type == DataType.BLOB:
cls.write_int(DataType.BLOB, stream)
cls.pickle_and_write(obj, stream)
elif output_type == DataType.EXCEPTION:
assert type(obj) is str
cls.write_int(len(obj), stream)
stream.write(obj)
@classmethod
def read_command(cls, stream):
length = cls.read_int(stream)
if length < 0:
raise ValueError("Command length cannot be less than zero.")
s = stream.read(length)
if len(s) < length:
raise EOFError
unenc = base64.urlsafe_b64decode(s)
return cls.loads(unenc)
@staticmethod
def dumps(obj):
protocol = 2
return cPickle.dumps(obj, protocol)
@staticmethod
def loads(obj):
return cPickle.loads(obj)
@classmethod
def pickle_and_write(cls, obj, stream):
serialized = cls.dumps(obj)
if serialized is None:
raise ValueError("Serialized value should not be None.")
elif len(serialized) > (1 << 31):
raise ValueError("Cannot serialize object larger than 2G.")
cls.write_int(len(serialized), stream)
stream.write(serialized)
def main(in_file, out_file):
pickle_serializer = PickleSerializer()
try:
func = pickle_serializer.read_command(in_file)
tuple_size = pickle_serializer.read_int(in_file)
output_type = pickle_serializer.read_int(in_file)
is_flatmap = pickle_serializer.read_int(in_file)
if tuple_size < 1:
raise ValueError("Size of tuple should not be less than 1.")
while True:
num_tuples = pickle_serializer.read_int(in_file)
if num_tuples == SpecialLengths.END_OF_STREAM:
break
tuple_list = []
for _ in range(num_tuples):
tuple_list.append(
pickle_serializer.read_tuple(in_file, tuple_size))
retval = func(tuple_list)
if is_flatmap:
count = len(retval)
pickle_serializer.write_int(count, out_file)
for i in range(count):
pickle_serializer.write_with_length(
retval[i], out_file, output_type)
else:
pickle_serializer.write_with_length(
retval, out_file, output_type)
out_file.flush()
except Exception:
try:
pickle_serializer.write_int(
SpecialLengths.PYTHON_EXCEPTION_THROWN, out_file)
pickle_serializer.write_with_length(traceback.format_exc().encode("utf-8"),
out_file, DataType.EXCEPTION)
print(traceback.format_exc(), file=sys.stderr)
except IOError:
# JVM closed the socket
print("IOError:\n{}".
format(traceback.format_exc()), file=sys.stderr)
except Exception:
print("Python worker process failed with exception:\n{}".
format(traceback.format_exc()), file=sys.stderr)
sys.exit(-1)
if __name__ == '__main__':
# Read a local port to connect to from stdin
port_number = int(sys.stdin.readline())
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", port_number))
with os.fdopen(os.dup(sock.fileno()), "rb", 65536) as infile,\
os.fdopen(os.dup(sock.fileno()), "wb", 65536) as outfile:
main(infile, outfile)
| bsd-3-clause | 5,339,379,725,559,334,000 | 29.311111 | 87 | 0.569208 | false | 4.059524 | false | false | false |
GAIA-GMU/PAR | actions/Wag.py | 1 | 1196 | #wag.v.01
#Body_movement
#This frame contains words for motions or actions an Agent p
#erforms using some part of his/her body. A number of word
#s in this frame occur as blends with Communication, in whic
#h the action has an Addressee. For example, 'Pat nodded
#at Kim.' These examples differ from Communication.Gesture
#in that no specific message need be expressed, as in 'She
#nodded to him to sit down.' Since this frame involves a pa
#rticular type of motion, it contains the frame elements Sou
#rce, Path, Goal and Area, which originate in the motion fra
#me. All of these frame elements are generally expressed in
# PP Complements. 'The boy swung his legs from under the ta
#ble.'
def applicability_condition(self,agent,Addressee=-1,Place=-1):
if not checkCapability(agent,self.id):
return FAILURE
return SUCCESS
def preparatory_spec(self,agent,Addressee=-1,Place=-1):
return SUCCESS
def execution_steps(self,agent,Addressee=-1,Place=-1):
return {'PRIMITIVE':('jiggle',{'agents':agent,'objects':(Addressee,Place)})}
def culmination_condition(self,agent,Addressee=-1,Place=-1):
if finishedAction(self.id):
return SUCCESS
return INCOMPLETE
| apache-2.0 | 8,410,052,050,833,781,000 | 37.580645 | 77 | 0.743311 | false | 3.232432 | false | false | false |
H7DE/PervasiveComputingCW1 | grid_analytics.py | 1 | 1360 | from test import *
import matplotlib.pyplot as plt
resultsTable = [] #Table of number of nodes and packet transmitted(%)
#Generate a frequency table of (number of node in simulation, percentage of packets transmitted)
with sqlite3.connect("sample_db/grid.db") as conn:
cursor = conn.cursor()
exprRootName="grid_topo_node_"
for i in range(2, 9):
numNodes = i*i
amount_pkt = "select count(*) from readings where\
readings.experiment_id = '{exprName}{exprNo}'".format(exprName=exprRootName, exprNo=numNodes)
cursor.execute(amount_pkt)
noPkts = cursor.fetchall()[0][0];
maxExpect_pkts = "select experiments.expected_no_transmission_per_node \
* (experiments.no_nodes - 1) from experiments\
where experiments.experiment_id = '{exprName}{exprNo}'".format(exprName=exprRootName, exprNo=numNodes)
cursor.execute(maxExpect_pkts)
maxPkts = cursor.fetchall()[0][0];
resultsTable.append((numNodes, float(noPkts)/float(maxPkts)*100))
#Create plot
plt.plot(*zip(*resultsTable))
plt.title("Comparing WSN network size with %success rate\n of pkt transmission for grid topology")
plt.xlabel('Number of node in simulation')
plt.ylabel('% of Pkts successfully transmitted')
plt.grid(True)
plt.xlim(0, 80)
plt.xticks([x*x for x in range(2, 10)])
plt.show()
| mit | -1,322,628,626,651,396,000 | 37.857143 | 110 | 0.690441 | false | 3.514212 | false | false | false |
dwhswenson/contact_map | contact_map/version.py | 4 | 4347 | # This file vendored from Autorelease
import os
import subprocess
try:
from configparser import ConfigParser, NoSectionError, NoOptionError
except ImportError:
# py2
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
try:
from ._installed_version import _installed_version
from ._installed_version import _installed_git_hash
from ._installed_version import _version_setup_depth
except ImportError:
_installed_version = "Unknown"
_installed_git_hash = "Unknown"
_version_setup_depth = -1
def get_git_version():
"""
Return the git hash as a string.
Apparently someone got this from numpy's setup.py. It has since been
modified a few times.
"""
# Return the git revision as a string
# copied from numpy setup.py
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
with open(os.devnull, 'w') as err_out:
out = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=err_out, # maybe debug later?
env=env).communicate()[0]
return out
try:
git_dir = os.path.dirname(os.path.realpath(__file__))
out = _minimal_ext_cmd(['git', '-C', git_dir, 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
return GIT_REVISION
def _seek_parent_dirs_for_file(filename):
rel_directory = None
my_dir = os.path.dirname(os.path.abspath(__file__))
rel_directory_arr = []
while not rel_directory:
expected_dir = os.path.join(*rel_directory_arr) \
if rel_directory_arr else '.'
expected = os.path.join(expected_dir, filename)
if os.path.isfile(os.path.normpath(expected)):
rel_directory = expected_dir
else:
rel_directory_arr.append('..')
if len(rel_directory_arr) > len(my_dir.split(os.sep)):
rel_directory_arr = []
break
return rel_directory
def _find_rel_path_for_file(depth, filename):
rel_directory = None
if depth == 0:
rel_directory = '.'
elif depth >= 1:
rel_directory = os.sep.join(['..'] * depth)
else:
rel_directory = _seek_parent_dirs_for_file(filename)
if rel_directory:
return os.path.normpath(os.path.join(rel_directory, filename))
else:
return None
def get_setup_cfg(directory, filename="setup.cfg"):
"""Load the setup.cfg as a dict-of-dict.
Parameters
----------
directory : str
directory for setup.cfg, relative to cwd; default '.'
filename : str
filename for setup.cfg; default 'setup.cfg'
"""
if isinstance(directory, int):
rel_path = _find_rel_path_for_file(directory, filename)
start_dir = os.path.abspath(os.path.dirname(__file__))
setup_cfg = os.path.normpath(os.path.join(start_dir, rel_path))
else:
setup_cfg = os.path.join(directory, filename)
conf = None
if os.path.exists(setup_cfg):
conf = ConfigParser()
conf.read(setup_cfg)
return conf
def get_setup_version(default_version, directory, filename="setup.cfg"):
version = default_version
conf = get_setup_cfg(directory, filename)
try:
version = conf.get('metadata', 'version')
except (NoSectionError, NoOptionError):
pass # version (or metadata) not defined in setup.cfg
except AttributeError:
pass # no setup.cfg found (conf is None)
return version
short_version = get_setup_version(_installed_version,
directory=_version_setup_depth)
_git_version = get_git_version()
_is_repo = (_git_version != '' and _git_version != "Unknown")
if _is_repo:
git_hash = _git_version
full_version = short_version + "+g" + _git_version[:7]
version = full_version
else:
git_hash = "Unknown"
full_version = short_version + "+g" + _installed_git_hash[:7] + '.install'
version = short_version
| lgpl-2.1 | -7,850,998,979,511,318,000 | 29.829787 | 78 | 0.599954 | false | 3.78 | false | false | false |
sehoonha/optskills | optskills/problems/gp_bow.py | 1 | 3696 | import numpy as np
from numpy.linalg import norm
from sim_problem import SimProblem, PDController, STR
class GPBow(SimProblem):
def __init__(self):
super(GPBow, self).__init__('urdf/BioloidGP/BioloidGP.URDF')
self.__init__simulation__()
desc = []
desc.append([('l_thigh', 1.0), ('r_thigh', 1.0), ])
desc.append([('l_shin', 1.0), ('r_shin', 1.0), ])
desc.append([('l_heel', 1.0), ('r_heel', 1.0), ])
self.desc = desc
self.dim = len(self.desc)
self.eval_counter = 0 # Well, increasing when simulated
self.params = None
def __init__simulation__(self):
self.init_state = self.skel().x
self.init_state[0] = -0.50 * 3.14
self.init_state[4] = 0.230
self.init_state[5] = 0.230
self.reset()
self.controller = PDController(self.skel(), 60, 3.0, 0.3)
self.controller.target = self.skel().q
def simulate(self, sample):
self.eval_counter += 1
self.reset()
self.set_params(sample)
while not self.terminated():
self.step()
# print 'result:', self.params, self.collect_result()
return self.collect_result()
def evaluate(self, result, task):
# Calculate the validity of COM
C = result['C']
lo = np.array([0.0, 0.10, 0.0])
hi = np.array([0.0, 0.15, 0.0])
w = task
C_hat = lo * (1 - w) + hi * w
weight = np.array([1.0, 1.0, 1.0]) * 2.0
obj = norm((C - C_hat) * weight) ** 2
# Calculate parameter penalty
params = result['params']
penalty = 0.0
if params is not None:
for i in range(self.dim):
v = params[i]
penalty += max(0.0, v - 1.0) ** 2
penalty += min(0.0, v - (-1.0)) ** 2
return obj + penalty
def set_random_params(self):
# self.set_params(0.45 + 0.1 * np.random.rand(self.dim))
# self.set_params(2.0 * (np.random.rand(self.dim) - 0.5))
# self.set_params([0.5, -1.0, 0.7])
self.set_params([0.5, -0.5, 0.1])
def set_params(self, x):
self.params = x
ndofs = self.skel().ndofs
q = np.array(self.init_state[:ndofs])
lo = np.array([-2.0] * ndofs)
hi = -lo
for i, dofs in enumerate(self.desc):
v = (x[i] - (-1.0)) / 2.0 # Change to 0 - 1 scale
for (d, w) in dofs:
index = d if isinstance(d, int) else self.skel().dof_index(d)
vv = v if w > 0.0 else 1.0 - v
q[index] = lo[index] + (hi[index] - lo[index]) * vv
self.controller.target = q
def collect_result(self):
res = {}
res['C'] = self.skel().C
res['params'] = self.params
return res
def terminated(self):
return (self.world.t > 0.5)
def __str__(self):
res = self.collect_result()
status = ""
status += '[GPBow at %.4f' % self.world.t
# if self.params is not None:
# status += ' params = %s ' % self.params
for key, value in self.collect_result().iteritems():
if key == 'C':
status += ' %s : %s' % (key, STR(value, 3))
elif key == 'params':
status += ' %s : %s' % (key, STR(value, 4))
else:
status += ' %s : %s' % (key, value)
status += ' value = {'
tasks = np.linspace(0.0, 1.0, 6)
values = [self.evaluate(res, t) for t in tasks]
status += ' '.join(['%.4f' % v for v in values])
status += '}]'
return status
def __repr__(self):
return 'problems.GPBow()'
| mit | -1,646,535,759,857,623,600 | 32.6 | 77 | 0.491613 | false | 3.205551 | false | false | false |
markpasc/makerbase | makerbase/forms.py | 1 | 2776 | # coding=utf-8
from datetime import datetime
from itertools import chain
from wtforms import Form, DateTimeField, FieldList, FormField, HiddenField, TextField, validators
class MonthField(DateTimeField):
def __init__(self, label=None, validators=None, format='%Y-%m', **kwargs):
super(MonthField, self).__init__(label, validators, format, **kwargs)
def process_formdata(self, valueslist):
if not valueslist:
return
date_str = u' '.join(valueslist).strip().lower()
dt = None
formats = (self.format, '%Y/%m', '%m-%Y', '%m/%Y', '%b %Y', '%Y %b', '%B %Y', '%Y %B')
formats = chain(formats, (format.replace('%Y', '%y') for format in formats))
for format in formats:
try:
dt = datetime.strptime(date_str, format)
except ValueError:
pass
else:
break
if dt is None:
self.data = None
raise ValueError(u"Field should be in YYYY-MM format (such as “2012-01”).")
self.data = dt.date()
class WikiForm(Form):
reason = TextField(u'Notes (optional)', [validators.Length(max=140), validators.Optional()])
class MakerForm(WikiForm):
name = TextField(u'Name', [validators.Required(), validators.Length(max=100)])
avatar_url = TextField(u'Avatar URL', [validators.URL(require_tld=True), validators.Optional()],
description=u'Avatar images should display at 150×150 and 75×75 pixel sizes.')
html_url = TextField(u'Web URL', [validators.URL(require_tld=True), validators.Required()],
description=u"Web URLs should be the address of the person's main personal web site.")
class ParticipationForm(WikiForm):
role = TextField(u'Role', [validators.Required(), validators.Length(max=140)])
start_date = MonthField(u'Start month')
end_date = MonthField(u'End month', [validators.Optional()],
description=u'Enter months like “2012-01”. Leave the end month blank for current ongoing projects.')
class ProjectForm(WikiForm):
name = TextField(u'Name', [validators.Length(min=1, max=50), validators.Required()])
html_url = TextField(u'Web URL', [validators.URL(require_tld=True), validators.Required()],
description=u'Web URLs should be the address of a hosted web app or the official web site for a project of some other kind.')
description = TextField(u'Description', [validators.Length(max=140)])
avatar_url = TextField(u'Avatar URL', [validators.URL(require_tld=True), validators.Optional()],
description=u'Avatar images should display at 150×150 and 75×75 pixel sizes.')
class ProjectAddParticipationForm(ParticipationForm):
maker = TextField(u'Maker ID', [validators.Required()])
| mit | 4,444,932,073,110,923,300 | 38.485714 | 133 | 0.658828 | false | 3.765668 | false | false | false |
adrienbrault/home-assistant | tests/components/androidtv/test_media_player.py | 5 | 43441 | """The tests for the androidtv platform."""
import base64
import copy
import logging
from unittest.mock import patch
from androidtv.constants import APPS as ANDROIDTV_APPS
from androidtv.exceptions import LockNotAcquiredException
import pytest
from homeassistant.components.androidtv.media_player import (
ANDROIDTV_DOMAIN,
ATTR_COMMAND,
ATTR_DEVICE_PATH,
ATTR_LOCAL_PATH,
CONF_ADB_SERVER_IP,
CONF_ADBKEY,
CONF_APPS,
CONF_EXCLUDE_UNNAMED_APPS,
CONF_TURN_OFF_COMMAND,
CONF_TURN_ON_COMMAND,
KEYS,
SERVICE_ADB_COMMAND,
SERVICE_DOWNLOAD,
SERVICE_LEARN_SENDEVENT,
SERVICE_UPLOAD,
)
from homeassistant.components.media_player import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_STOP,
SERVICE_SELECT_SOURCE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
)
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_CLASS,
CONF_HOST,
CONF_NAME,
CONF_PLATFORM,
EVENT_HOMEASSISTANT_STOP,
STATE_OFF,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
from tests.components.androidtv import patchers
SHELL_RESPONSE_OFF = ""
SHELL_RESPONSE_STANDBY = "1"
# Android TV device with Python ADB implementation
CONFIG_ANDROIDTV_PYTHON_ADB = {
DOMAIN: {
CONF_PLATFORM: ANDROIDTV_DOMAIN,
CONF_HOST: "127.0.0.1",
CONF_NAME: "Android TV",
CONF_DEVICE_CLASS: "androidtv",
}
}
# Android TV device with ADB server
CONFIG_ANDROIDTV_ADB_SERVER = {
DOMAIN: {
CONF_PLATFORM: ANDROIDTV_DOMAIN,
CONF_HOST: "127.0.0.1",
CONF_NAME: "Android TV",
CONF_DEVICE_CLASS: "androidtv",
CONF_ADB_SERVER_IP: "127.0.0.1",
}
}
# Fire TV device with Python ADB implementation
CONFIG_FIRETV_PYTHON_ADB = {
DOMAIN: {
CONF_PLATFORM: ANDROIDTV_DOMAIN,
CONF_HOST: "127.0.0.1",
CONF_NAME: "Fire TV",
CONF_DEVICE_CLASS: "firetv",
}
}
# Fire TV device with ADB server
CONFIG_FIRETV_ADB_SERVER = {
DOMAIN: {
CONF_PLATFORM: ANDROIDTV_DOMAIN,
CONF_HOST: "127.0.0.1",
CONF_NAME: "Fire TV",
CONF_DEVICE_CLASS: "firetv",
CONF_ADB_SERVER_IP: "127.0.0.1",
}
}
def _setup(config):
"""Perform common setup tasks for the tests."""
if CONF_ADB_SERVER_IP not in config[DOMAIN]:
patch_key = "python"
else:
patch_key = "server"
if config[DOMAIN].get(CONF_DEVICE_CLASS) != "firetv":
entity_id = "media_player.android_tv"
else:
entity_id = "media_player.fire_tv"
return patch_key, entity_id
async def _test_reconnect(hass, caplog, config):
"""Test that the error and reconnection attempts are logged correctly.
"Handles device/service unavailable. Log a warning once when
unavailable, log once when reconnected."
https://developers.home-assistant.io/docs/en/integration_quality_scale_index.html
"""
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_KEYGEN, patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
caplog.clear()
caplog.set_level(logging.WARNING)
with patchers.patch_connect(False)[patch_key], patchers.patch_shell(error=True)[
patch_key
], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
for _ in range(5):
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_UNAVAILABLE
assert len(caplog.record_tuples) == 2
assert caplog.record_tuples[0][1] == logging.ERROR
assert caplog.record_tuples[1][1] == logging.WARNING
caplog.set_level(logging.DEBUG)
with patchers.patch_connect(True)[patch_key], patchers.patch_shell(
SHELL_RESPONSE_STANDBY
)[patch_key], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_STANDBY
if patch_key == "python":
assert (
"ADB connection to 127.0.0.1:5555 successfully established"
in caplog.record_tuples[2]
)
else:
assert (
"ADB connection to 127.0.0.1:5555 via ADB server 127.0.0.1:5037 successfully established"
in caplog.record_tuples[2]
)
return True
async def _test_adb_shell_returns_none(hass, config):
"""Test the case that the ADB shell command returns `None`.
The state should be `None` and the device should be unavailable.
"""
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_KEYGEN, patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state != STATE_UNAVAILABLE
with patchers.patch_shell(None)[patch_key], patchers.patch_shell(error=True)[
patch_key
], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_UNAVAILABLE
return True
async def test_reconnect_androidtv_python_adb(hass, caplog):
"""Test that the error and reconnection attempts are logged correctly.
* Device type: Android TV
* ADB connection method: Python ADB implementation
"""
assert await _test_reconnect(hass, caplog, CONFIG_ANDROIDTV_PYTHON_ADB)
async def test_adb_shell_returns_none_androidtv_python_adb(hass):
"""Test the case that the ADB shell command returns `None`.
* Device type: Android TV
* ADB connection method: Python ADB implementation
"""
assert await _test_adb_shell_returns_none(hass, CONFIG_ANDROIDTV_PYTHON_ADB)
async def test_reconnect_firetv_python_adb(hass, caplog):
"""Test that the error and reconnection attempts are logged correctly.
* Device type: Fire TV
* ADB connection method: Python ADB implementation
"""
assert await _test_reconnect(hass, caplog, CONFIG_FIRETV_PYTHON_ADB)
async def test_adb_shell_returns_none_firetv_python_adb(hass):
"""Test the case that the ADB shell command returns `None`.
* Device type: Fire TV
* ADB connection method: Python ADB implementation
"""
assert await _test_adb_shell_returns_none(hass, CONFIG_FIRETV_PYTHON_ADB)
async def test_reconnect_androidtv_adb_server(hass, caplog):
"""Test that the error and reconnection attempts are logged correctly.
* Device type: Android TV
* ADB connection method: ADB server
"""
assert await _test_reconnect(hass, caplog, CONFIG_ANDROIDTV_ADB_SERVER)
async def test_adb_shell_returns_none_androidtv_adb_server(hass):
"""Test the case that the ADB shell command returns `None`.
* Device type: Android TV
* ADB connection method: ADB server
"""
assert await _test_adb_shell_returns_none(hass, CONFIG_ANDROIDTV_ADB_SERVER)
async def test_reconnect_firetv_adb_server(hass, caplog):
"""Test that the error and reconnection attempts are logged correctly.
* Device type: Fire TV
* ADB connection method: ADB server
"""
assert await _test_reconnect(hass, caplog, CONFIG_FIRETV_ADB_SERVER)
async def test_adb_shell_returns_none_firetv_adb_server(hass):
"""Test the case that the ADB shell command returns `None`.
* Device type: Fire TV
* ADB connection method: ADB server
"""
assert await _test_adb_shell_returns_none(hass, CONFIG_FIRETV_ADB_SERVER)
async def test_setup_with_adbkey(hass):
"""Test that setup succeeds when using an ADB key."""
config = copy.deepcopy(CONFIG_ANDROIDTV_PYTHON_ADB)
config[DOMAIN][CONF_ADBKEY] = hass.config.path("user_provided_adbkey")
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER, patchers.PATCH_ISFILE, patchers.PATCH_ACCESS:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
async def _test_sources(hass, config0):
"""Test that sources (i.e., apps) are handled correctly for Android TV and Fire TV devices."""
config = copy.deepcopy(config0)
config[DOMAIN][CONF_APPS] = {
"com.app.test1": "TEST 1",
"com.app.test3": None,
"com.app.test4": SHELL_RESPONSE_OFF,
}
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
if config[DOMAIN].get(CONF_DEVICE_CLASS) != "firetv":
patch_update = patchers.patch_androidtv_update(
"playing",
"com.app.test1",
["com.app.test1", "com.app.test2", "com.app.test3", "com.app.test4"],
"hdmi",
False,
1,
"HW5",
)
else:
patch_update = patchers.patch_firetv_update(
"playing",
"com.app.test1",
["com.app.test1", "com.app.test2", "com.app.test3", "com.app.test4"],
"HW5",
)
with patch_update:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_PLAYING
assert state.attributes["source"] == "TEST 1"
assert sorted(state.attributes["source_list"]) == ["TEST 1", "com.app.test2"]
if config[DOMAIN].get(CONF_DEVICE_CLASS) != "firetv":
patch_update = patchers.patch_androidtv_update(
"playing",
"com.app.test2",
["com.app.test2", "com.app.test1", "com.app.test3", "com.app.test4"],
"hdmi",
True,
0,
"HW5",
)
else:
patch_update = patchers.patch_firetv_update(
"playing",
"com.app.test2",
["com.app.test2", "com.app.test1", "com.app.test3", "com.app.test4"],
"HW5",
)
with patch_update:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_PLAYING
assert state.attributes["source"] == "com.app.test2"
assert sorted(state.attributes["source_list"]) == ["TEST 1", "com.app.test2"]
return True
async def test_androidtv_sources(hass):
"""Test that sources (i.e., apps) are handled correctly for Android TV devices."""
assert await _test_sources(hass, CONFIG_ANDROIDTV_ADB_SERVER)
async def test_firetv_sources(hass):
"""Test that sources (i.e., apps) are handled correctly for Fire TV devices."""
assert await _test_sources(hass, CONFIG_FIRETV_ADB_SERVER)
async def _test_exclude_sources(hass, config0, expected_sources):
"""Test that sources (i.e., apps) are handled correctly when the `exclude_unnamed_apps` config parameter is provided."""
config = copy.deepcopy(config0)
config[DOMAIN][CONF_APPS] = {
"com.app.test1": "TEST 1",
"com.app.test3": None,
"com.app.test4": SHELL_RESPONSE_OFF,
}
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
if config[DOMAIN].get(CONF_DEVICE_CLASS) != "firetv":
patch_update = patchers.patch_androidtv_update(
"playing",
"com.app.test1",
[
"com.app.test1",
"com.app.test2",
"com.app.test3",
"com.app.test4",
"com.app.test5",
],
"hdmi",
False,
1,
"HW5",
)
else:
patch_update = patchers.patch_firetv_update(
"playing",
"com.app.test1",
[
"com.app.test1",
"com.app.test2",
"com.app.test3",
"com.app.test4",
"com.app.test5",
],
"HW5",
)
with patch_update:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_PLAYING
assert state.attributes["source"] == "TEST 1"
assert sorted(state.attributes["source_list"]) == expected_sources
return True
async def test_androidtv_exclude_sources(hass):
"""Test that sources (i.e., apps) are handled correctly for Android TV devices when the `exclude_unnamed_apps` config parameter is provided as true."""
config = copy.deepcopy(CONFIG_ANDROIDTV_ADB_SERVER)
config[DOMAIN][CONF_EXCLUDE_UNNAMED_APPS] = True
assert await _test_exclude_sources(hass, config, ["TEST 1"])
async def test_firetv_exclude_sources(hass):
"""Test that sources (i.e., apps) are handled correctly for Fire TV devices when the `exclude_unnamed_apps` config parameter is provided as true."""
config = copy.deepcopy(CONFIG_FIRETV_ADB_SERVER)
config[DOMAIN][CONF_EXCLUDE_UNNAMED_APPS] = True
assert await _test_exclude_sources(hass, config, ["TEST 1"])
async def _test_select_source(hass, config0, source, expected_arg, method_patch):
"""Test that the methods for launching and stopping apps are called correctly when selecting a source."""
config = copy.deepcopy(config0)
config[DOMAIN][CONF_APPS] = {
"com.app.test1": "TEST 1",
"com.app.test3": None,
"com.youtube.test": "YouTube",
}
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
with method_patch as method_patch_:
await hass.services.async_call(
DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: entity_id, ATTR_INPUT_SOURCE: source},
blocking=True,
)
method_patch_.assert_called_with(expected_arg)
return True
async def test_androidtv_select_source_launch_app_id(hass):
"""Test that an app can be launched using its app ID."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"com.app.test1",
"com.app.test1",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_launch_app_name(hass):
"""Test that an app can be launched using its friendly name."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"TEST 1",
"com.app.test1",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_launch_app_id_no_name(hass):
"""Test that an app can be launched using its app ID when it has no friendly name."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"com.app.test2",
"com.app.test2",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_launch_app_hidden(hass):
"""Test that an app can be launched using its app ID when it is hidden from the sources list."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"com.app.test3",
"com.app.test3",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_overridden_app_name(hass):
"""Test that when an app name is overridden via the `apps` configuration parameter, the app is launched correctly."""
# Evidence that the default YouTube app ID will be overridden
assert "YouTube" in ANDROIDTV_APPS.values()
assert "com.youtube.test" not in ANDROIDTV_APPS
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"YouTube",
"com.youtube.test",
patchers.PATCH_LAUNCH_APP,
)
async def test_androidtv_select_source_stop_app_id(hass):
"""Test that an app can be stopped using its app ID."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"!com.app.test1",
"com.app.test1",
patchers.PATCH_STOP_APP,
)
async def test_androidtv_select_source_stop_app_name(hass):
"""Test that an app can be stopped using its friendly name."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"!TEST 1",
"com.app.test1",
patchers.PATCH_STOP_APP,
)
async def test_androidtv_select_source_stop_app_id_no_name(hass):
"""Test that an app can be stopped using its app ID when it has no friendly name."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"!com.app.test2",
"com.app.test2",
patchers.PATCH_STOP_APP,
)
async def test_androidtv_select_source_stop_app_hidden(hass):
"""Test that an app can be stopped using its app ID when it is hidden from the sources list."""
assert await _test_select_source(
hass,
CONFIG_ANDROIDTV_ADB_SERVER,
"!com.app.test3",
"com.app.test3",
patchers.PATCH_STOP_APP,
)
async def test_firetv_select_source_launch_app_id(hass):
"""Test that an app can be launched using its app ID."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"com.app.test1",
"com.app.test1",
patchers.PATCH_LAUNCH_APP,
)
async def test_firetv_select_source_launch_app_name(hass):
"""Test that an app can be launched using its friendly name."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"TEST 1",
"com.app.test1",
patchers.PATCH_LAUNCH_APP,
)
async def test_firetv_select_source_launch_app_id_no_name(hass):
"""Test that an app can be launched using its app ID when it has no friendly name."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"com.app.test2",
"com.app.test2",
patchers.PATCH_LAUNCH_APP,
)
async def test_firetv_select_source_launch_app_hidden(hass):
"""Test that an app can be launched using its app ID when it is hidden from the sources list."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"com.app.test3",
"com.app.test3",
patchers.PATCH_LAUNCH_APP,
)
async def test_firetv_select_source_stop_app_id(hass):
"""Test that an app can be stopped using its app ID."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"!com.app.test1",
"com.app.test1",
patchers.PATCH_STOP_APP,
)
async def test_firetv_select_source_stop_app_name(hass):
"""Test that an app can be stopped using its friendly name."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"!TEST 1",
"com.app.test1",
patchers.PATCH_STOP_APP,
)
async def test_firetv_select_source_stop_app_id_no_name(hass):
"""Test that an app can be stopped using its app ID when it has no friendly name."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"!com.app.test2",
"com.app.test2",
patchers.PATCH_STOP_APP,
)
async def test_firetv_select_source_stop_hidden(hass):
"""Test that an app can be stopped using its app ID when it is hidden from the sources list."""
assert await _test_select_source(
hass,
CONFIG_FIRETV_ADB_SERVER,
"!com.app.test3",
"com.app.test3",
patchers.PATCH_STOP_APP,
)
async def _test_setup_fail(hass, config):
"""Test that the entity is not created when the ADB connection is not established."""
patch_key, entity_id = _setup(config)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(False)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_KEYGEN, patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is None
return True
async def test_setup_fail_androidtv(hass):
"""Test that the Android TV entity is not created when the ADB connection is not established."""
assert await _test_setup_fail(hass, CONFIG_ANDROIDTV_PYTHON_ADB)
async def test_setup_fail_firetv(hass):
"""Test that the Fire TV entity is not created when the ADB connection is not established."""
assert await _test_setup_fail(hass, CONFIG_FIRETV_PYTHON_ADB)
async def test_setup_two_devices(hass):
"""Test that two devices can be set up."""
config = {
DOMAIN: [
CONFIG_ANDROIDTV_ADB_SERVER[DOMAIN],
copy.deepcopy(CONFIG_FIRETV_ADB_SERVER[DOMAIN]),
]
}
config[DOMAIN][1][CONF_HOST] = "127.0.0.2"
patch_key = "server"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
for entity_id in ["media_player.android_tv", "media_player.fire_tv"]:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
async def test_setup_same_device_twice(hass):
"""Test that setup succeeds with a duplicated config entry."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert hass.services.has_service(ANDROIDTV_DOMAIN, SERVICE_ADB_COMMAND)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
async def test_adb_command(hass):
"""Test sending a command via the `androidtv.adb_command` service."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
command = "test command"
response = "test response"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_shell", return_value=response
) as patch_shell:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_ADB_COMMAND,
{ATTR_ENTITY_ID: entity_id, ATTR_COMMAND: command},
blocking=True,
)
patch_shell.assert_called_with(command)
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] == response
async def test_adb_command_unicode_decode_error(hass):
"""Test sending a command via the `androidtv.adb_command` service that raises a UnicodeDecodeError exception."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
command = "test command"
response = b"test response"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_shell",
side_effect=UnicodeDecodeError("utf-8", response, 0, len(response), "TEST"),
):
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_ADB_COMMAND,
{ATTR_ENTITY_ID: entity_id, ATTR_COMMAND: command},
blocking=True,
)
# patch_shell.assert_called_with(command)
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] is None
async def test_adb_command_key(hass):
"""Test sending a key command via the `androidtv.adb_command` service."""
patch_key = "server"
entity_id = "media_player.android_tv"
command = "HOME"
response = None
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_shell", return_value=response
) as patch_shell:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_ADB_COMMAND,
{ATTR_ENTITY_ID: entity_id, ATTR_COMMAND: command},
blocking=True,
)
patch_shell.assert_called_with(f"input keyevent {KEYS[command]}")
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] is None
async def test_adb_command_get_properties(hass):
"""Test sending the "GET_PROPERTIES" command via the `androidtv.adb_command` service."""
patch_key = "server"
entity_id = "media_player.android_tv"
command = "GET_PROPERTIES"
response = {"test key": "test value"}
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties_dict",
return_value=response,
) as patch_get_props:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_ADB_COMMAND,
{ATTR_ENTITY_ID: entity_id, ATTR_COMMAND: command},
blocking=True,
)
patch_get_props.assert_called()
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] == str(response)
async def test_learn_sendevent(hass):
"""Test the `androidtv.learn_sendevent` service."""
patch_key = "server"
entity_id = "media_player.android_tv"
response = "sendevent 1 2 3 4"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.learn_sendevent",
return_value=response,
) as patch_learn_sendevent:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_LEARN_SENDEVENT,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
patch_learn_sendevent.assert_called()
state = hass.states.get(entity_id)
assert state is not None
assert state.attributes["adb_response"] == response
async def test_update_lock_not_acquired(hass):
"""Test that the state does not get updated when a `LockNotAcquiredException` is raised."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
with patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.update",
side_effect=LockNotAcquiredException,
), patchers.patch_shell(SHELL_RESPONSE_STANDBY)[patch_key]:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
with patchers.patch_shell(SHELL_RESPONSE_STANDBY)[patch_key]:
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_STANDBY
async def test_download(hass):
"""Test the `androidtv.download` service."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
device_path = "device/path"
local_path = "local/path"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
# Failed download because path is not whitelisted
with patch("androidtv.basetv.basetv_async.BaseTVAsync.adb_pull") as patch_pull:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_DOWNLOAD,
{
ATTR_ENTITY_ID: entity_id,
ATTR_DEVICE_PATH: device_path,
ATTR_LOCAL_PATH: local_path,
},
blocking=True,
)
patch_pull.assert_not_called()
# Successful download
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_pull"
) as patch_pull, patch.object(hass.config, "is_allowed_path", return_value=True):
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_DOWNLOAD,
{
ATTR_ENTITY_ID: entity_id,
ATTR_DEVICE_PATH: device_path,
ATTR_LOCAL_PATH: local_path,
},
blocking=True,
)
patch_pull.assert_called_with(local_path, device_path)
async def test_upload(hass):
"""Test the `androidtv.upload` service."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
device_path = "device/path"
local_path = "local/path"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
# Failed upload because path is not whitelisted
with patch("androidtv.basetv.basetv_async.BaseTVAsync.adb_push") as patch_push:
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_UPLOAD,
{
ATTR_ENTITY_ID: entity_id,
ATTR_DEVICE_PATH: device_path,
ATTR_LOCAL_PATH: local_path,
},
blocking=True,
)
patch_push.assert_not_called()
# Successful upload
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_push"
) as patch_push, patch.object(hass.config, "is_allowed_path", return_value=True):
await hass.services.async_call(
ANDROIDTV_DOMAIN,
SERVICE_UPLOAD,
{
ATTR_ENTITY_ID: entity_id,
ATTR_DEVICE_PATH: device_path,
ATTR_LOCAL_PATH: local_path,
},
blocking=True,
)
patch_push.assert_called_with(local_path, device_path)
async def test_androidtv_volume_set(hass):
"""Test setting the volume for an Android TV device."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.set_volume_level", return_value=0.5
) as patch_set_volume_level:
await hass.services.async_call(
DOMAIN,
SERVICE_VOLUME_SET,
{ATTR_ENTITY_ID: entity_id, ATTR_MEDIA_VOLUME_LEVEL: 0.5},
blocking=True,
)
patch_set_volume_level.assert_called_with(0.5)
async def test_get_image(hass, hass_ws_client):
"""Test taking a screen capture.
This is based on `test_get_image` in tests/components/media_player/test_init.py.
"""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patchers.patch_shell("11")[patch_key]:
await hass.helpers.entity_component.async_update_entity(entity_id)
client = await hass_ws_client(hass)
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_screencap", return_value=b"image"
):
await client.send_json(
{"id": 5, "type": "media_player_thumbnail", "entity_id": entity_id}
)
msg = await client.receive_json()
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]["content_type"] == "image/png"
assert msg["result"]["content"] == base64.b64encode(b"image").decode("utf-8")
with patch(
"androidtv.basetv.basetv_async.BaseTVAsync.adb_screencap",
side_effect=RuntimeError,
):
await client.send_json(
{"id": 6, "type": "media_player_thumbnail", "entity_id": entity_id}
)
msg = await client.receive_json()
# The device is unavailable, but getting the media image did not cause an exception
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_UNAVAILABLE
async def _test_service(
hass,
entity_id,
ha_service_name,
androidtv_method,
additional_service_data=None,
return_value=None,
):
"""Test generic Android TV media player entity service."""
service_data = {ATTR_ENTITY_ID: entity_id}
if additional_service_data:
service_data.update(additional_service_data)
androidtv_patch = (
"androidtv.androidtv_async.AndroidTVAsync"
if "android" in entity_id
else "firetv.firetv_async.FireTVAsync"
)
with patch(
f"androidtv.{androidtv_patch}.{androidtv_method}", return_value=return_value
) as service_call:
await hass.services.async_call(
DOMAIN,
ha_service_name,
service_data=service_data,
blocking=True,
)
assert service_call.called
async def test_services_androidtv(hass):
"""Test media player services for an Android TV device."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[patch_key]:
with patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(
hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER
)
await hass.async_block_till_done()
with patchers.patch_shell(SHELL_RESPONSE_STANDBY)[patch_key]:
await _test_service(
hass, entity_id, SERVICE_MEDIA_NEXT_TRACK, "media_next_track"
)
await _test_service(hass, entity_id, SERVICE_MEDIA_PAUSE, "media_pause")
await _test_service(hass, entity_id, SERVICE_MEDIA_PLAY, "media_play")
await _test_service(
hass, entity_id, SERVICE_MEDIA_PLAY_PAUSE, "media_play_pause"
)
await _test_service(
hass, entity_id, SERVICE_MEDIA_PREVIOUS_TRACK, "media_previous_track"
)
await _test_service(hass, entity_id, SERVICE_MEDIA_STOP, "media_stop")
await _test_service(hass, entity_id, SERVICE_TURN_OFF, "turn_off")
await _test_service(hass, entity_id, SERVICE_TURN_ON, "turn_on")
await _test_service(
hass, entity_id, SERVICE_VOLUME_DOWN, "volume_down", return_value=0.1
)
await _test_service(
hass,
entity_id,
SERVICE_VOLUME_MUTE,
"mute_volume",
{ATTR_MEDIA_VOLUME_MUTED: False},
)
await _test_service(
hass,
entity_id,
SERVICE_VOLUME_SET,
"set_volume_level",
{ATTR_MEDIA_VOLUME_LEVEL: 0.5},
0.5,
)
await _test_service(
hass, entity_id, SERVICE_VOLUME_UP, "volume_up", return_value=0.2
)
async def test_services_firetv(hass):
"""Test media player services for a Fire TV device."""
patch_key, entity_id = _setup(CONFIG_FIRETV_ADB_SERVER)
config = copy.deepcopy(CONFIG_FIRETV_ADB_SERVER)
config[DOMAIN][CONF_TURN_OFF_COMMAND] = "test off"
config[DOMAIN][CONF_TURN_ON_COMMAND] = "test on"
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[patch_key]:
with patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
with patchers.patch_shell(SHELL_RESPONSE_STANDBY)[patch_key]:
await _test_service(hass, entity_id, SERVICE_MEDIA_STOP, "back")
await _test_service(hass, entity_id, SERVICE_TURN_OFF, "adb_shell")
await _test_service(hass, entity_id, SERVICE_TURN_ON, "adb_shell")
async def test_connection_closed_on_ha_stop(hass):
"""Test that the ADB socket connection is closed when HA stops."""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_ADB_SERVER)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[patch_key]:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_ADB_SERVER)
await hass.async_block_till_done()
with patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.adb_close"
) as adb_close:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert adb_close.called
async def test_exception(hass):
"""Test that the ADB connection gets closed when there is an unforeseen exception.
HA will attempt to reconnect on the next update.
"""
patch_key, entity_id = _setup(CONFIG_ANDROIDTV_PYTHON_ADB)
with patchers.PATCH_ADB_DEVICE_TCP, patchers.patch_connect(True)[
patch_key
], patchers.patch_shell(SHELL_RESPONSE_OFF)[
patch_key
], patchers.PATCH_KEYGEN, patchers.PATCH_ANDROIDTV_OPEN, patchers.PATCH_SIGNER:
assert await async_setup_component(hass, DOMAIN, CONFIG_ANDROIDTV_PYTHON_ADB)
await hass.async_block_till_done()
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
# When an unforessen exception occurs, we close the ADB connection and raise the exception
with patchers.PATCH_ANDROIDTV_UPDATE_EXCEPTION, pytest.raises(Exception):
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_UNAVAILABLE
# On the next update, HA will reconnect to the device
await hass.helpers.entity_component.async_update_entity(entity_id)
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OFF
| mit | -7,712,628,284,357,007,000 | 33.669593 | 155 | 0.638982 | false | 3.58365 | true | false | false |
joristork/milovision | admin_modules/argparse.py | 1 | 2145 | #
# Milovision: A camera pose estimation programme
#
# Copyright (C) 2013 Joris Stork
# See LICENSE.txt
#
# argparse.py
"""
:synopsis: Parses command line arguments using the optparse library.
Note that we use the now deprecated optparse library to maintain
compatibility with the pydc1394 library. This application and the
pydc1394 library it uses should eventually be refactored to the
newer argparse library.
.. moduleauthor:: Joris Stork <[email protected]>
"""
__author__ = "Joris Stork"
from optparse import OptionParser
from pydc1394.cmdline import add_common_options
def run():
""" parses command line args; adds to options defined in pydc/cmdline.py """
usage = "usage: %prog [options] file"
parser = OptionParser(usage)
add_common_options(parser)
parser.add_option("-v", "--verbosity", dest="verbosity",
help="set stdout verbosity (0: critical, 1: error, 2: warning, 3: info, 4: debug)",
type="int")
parser.add_option("-n", "--modules", dest="nr_modules", default=1,
help="set number of pipeline stages to run (1: edge detection; 2: ellipse fitting; 3: pose-1; 4: identify markers; 5: pose-2; 6: register data), default is all",
type="int")
parser.add_option("-s", "--simulate", dest="simulate",
help="set simulation mode (-2: linear generated markers; -1: random generated markers; 0<:preset marker configurations by index nr)",
type="int")
parser.add_option("-w", "--windows", dest="windows",
help="set image display (0: off; 1: on [default])",
type="int")
parser.add_option("-d", "--disk", dest="disk",
help="load marker poses from disk (0: off [default]; 1: on)",
type="int")
parser.add_option("-t", "--simtime", dest="simtime",
help="number of seconds to run simulation (default: 60)",
type="int")
(options, args) = parser.parse_args()
if not options.verbosity:
options.verbosity = 2
if not options.simulate:
options.simulate = 0
return options, args
| mit | 8,910,487,035,085,457,000 | 35.982759 | 173 | 0.629837 | false | 3.9 | false | false | false |
bsmedberg/socorro | socorro/cron/jobs/laglog.py | 2 | 2297 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""an app to monitor and report on replication lag in PG databases"""
from crontabber.base import BaseCronApp
from socorro.external.postgresql.dbapi2_util import (
execute_no_results,
execute_query_fetchall,
)
from crontabber.mixins import with_postgres_transactions
#==============================================================================
@with_postgres_transactions()
class LagLog(BaseCronApp):
app_name = 'LagLog'
app_version = '0.1'
app_description = __doc__
#--------------------------------------------------------------------------
insert_sql = (
"INSERT INTO lag_log (replica_name, moment, lag, master) "
"VALUES (%s, %s, %s, %s)"
)
each_server_sql = (
"SELECT NOW(), client_addr, sent_location, replay_location "
"FROM pg_stat_replication"
)
#--------------------------------------------------------------------------
@staticmethod
def xlog_transform(xlog):
logid, offset = xlog.split('/')
return int('ffffffff', 16) * int(logid, 16) + int(offset, 16)
#--------------------------------------------------------------------------
def run(self):
each_server = self.database_transaction_executor(
execute_query_fetchall,
self.each_server_sql
)
self.config.logger.debug(
'replication database servers: %s',
each_server
)
for now, client_addr, sent_location, replay_location in each_server:
sent_location = self.xlog_transform(sent_location)
replay_location = self.xlog_transform(replay_location)
lag = sent_location - replay_location
self.config.logger.debug(
'%s %s %s %s',
client_addr,
now,
lag,
self.config.database.database_name
)
self.database_transaction_executor(
execute_no_results,
self.insert_sql,
(client_addr, now, lag, self.config.database.database_name)
)
| mpl-2.0 | 204,953,890,628,145,500 | 34.338462 | 79 | 0.511537 | false | 4.375238 | false | false | false |
dufferzafar/critiquebrainz | critiquebrainz/frontend/artist/views_test.py | 1 | 1298 | from critiquebrainz.frontend.testing import FrontendTestCase
class ArtistViewsTestCase(FrontendTestCase):
def test_artist_page(self):
# Basic artist page should be available.
response = self.client.get("/artist/aef06569-098f-4218-a577-b413944d9493")
self.assert200(response)
self.assertIn("HAIM", response.data)
# Album tab
response = self.client.get("/artist/aef06569-098f-4218-a577-b413944d9493?release_type=album")
self.assert200(response)
self.assertIn("Days Are Gone", response.data)
# Singles tab
response = self.client.get("/artist/aef06569-098f-4218-a577-b413944d9493?release_type=single")
self.assert200(response)
self.assertIn("The Wire", response.data)
# EPs tab
response = self.client.get("/artist/aef06569-098f-4218-a577-b413944d9493?release_type=ep")
self.assert200(response)
self.assertIn("Forever", response.data)
# Broadcasts tab
response = self.client.get("/artist/aef06569-098f-4218-a577-b413944d9493?release_type=broadcast")
self.assert200(response)
# Other releases tab
response = self.client.get("/artist/aef06569-098f-4218-a577-b413944d9493?release_type=other")
self.assert200(response)
| gpl-2.0 | -5,407,329,594,796,540,000 | 38.333333 | 105 | 0.684129 | false | 3.253133 | false | false | false |
app-registry/appr-cli | cnrclient/commands/show.py | 1 | 1334 | from cnrclient.display import print_package_info
from cnrclient.commands.command_base import CommandBase
class ShowCmd(CommandBase):
name = 'show'
help_message = "print the package manifest"
def __init__(self, options):
super(ShowCmd, self).__init__(options)
self.package = options.package
self.registry_host = options.registry_host
self.version = options.version
self.verbose = options.wide
self.media_type = options.media_type
self.result = None
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
cls._add_packagename_option(parser)
cls._add_packageversion_option(parser)
cls._add_mediatype_option(parser, default=None, required=False)
parser.add_argument("-w", "--wide", help="Extend display informations",
action="store_true", default=False)
def _call(self):
client = self.RegistryClient(self.registry_host)
self.result = client.show_package(self.package, version=self.version,
media_type=self.media_type)
def _render_dict(self):
return self.result
def _render_console(self):
return "Info: %s\n\n" % self.package + print_package_info(self.result, self.verbose)
| apache-2.0 | 4,840,114,590,102,224,000 | 36.055556 | 92 | 0.636432 | false | 3.970238 | false | false | false |
harshays/southwest | southwest/multiple_check_ins.py | 1 | 1642 | import sched
import csv
from southwest import SouthwestCheckIn
import time
class MultipleSouthwestCheckIns(object):
"""
parses csv file and
schedules multiple check-ins
@params fname - user information csv filename
@info - CSV File Headers:
first name, last name, code, mm/dd/yyyy, hh:mm (24 hr)
"""
def __init__(self, filename):
self.filename = filename
self.users_csv = None
self.users = []
self._assert()
self.scheduler = sched.scheduler(time.time, time.sleep)
self._parse_file()
def _parse_file(self):
try:
with open(self.filename, 'r+') as f:
self.users_csv = list(csv.reader(f, skipinitialspace = True))
self.users = map(lambda user: SouthwestCheckIn(*user), self.users_csv)
except IOError:
print ("IO Error. Check file and filename parameter")
def _schedule(self):
for i, user in enumerate(self.users):
seconds = user._get_seconds()
print ("{0} is scheduled to check-in in {1:.1f} seconds"
.format(user.name, seconds))
self.scheduler.enter(seconds, 1, user.check_in, ())
def _assert(self):
try:
f = open(self.filename, 'r')
except IOError as e:
print e
exit(0)
try:
csv_reader = csv.reader(f, skipinitialspace = True)
except csv.Error as e:
print e
exit(0)
finally:
f.close()
def run(self):
self._schedule()
self.scheduler.run()
| mit | -3,050,364,477,025,062,000 | 23.507463 | 82 | 0.550548 | false | 4.074442 | false | false | false |
PinkInk/upylib | rfb/rfb/clientmsgs.py | 1 | 3839 | # TODO: consider using u/struct
from rfb.utils import bytes_to_int
def dispatch_msgs(self, msg):
# handle multiple messages
ptr = 0
while ptr < len(msg):
# ClientSetPixelFormat(self, bpp, depth, big, true, masks, shifts)
if msg[ptr] == 0:
# if ClientSetPixelFormat is received, post init
# over-rules ServerSetPixelFormat sent, during init
self.bpp = msg[ptr+4]
self.depth = msg[ptr+5]
self.big = msg[ptr+6] == 1
self.true = msg[ptr+7] == 1
self.masks = (
bytes_to_int( msg[ptr+8:ptr+10] ),
bytes_to_int( msg[ptr+10:ptr+12] ),
bytes_to_int( msg[ptr+12:ptr+14] ),
)
self.shifts = (
msg[ptr+14],
msg[ptr+15],
msg[ptr+16]
)
if hasattr(self, 'ClientSetPixelFormat'):
self.ClientSetPixelFormat(
self.bpp,
self.depth,
self.big,
self.true,
self.masks,
self.shifts
)
# Colourmap (not currently implemented):
# If this msg is recv'd from client svr colourmap sent during
# init is cleared, therefore svr must send again before sending
# any framebuffer updates
ptr += 20 # includes trailing padding
# ClientSetEncodings(self, encodings)
elif msg[ptr] == 2:
count = bytes_to_int( msg[ptr+2:ptr+4] )
encodings = [
bytes_to_int( msg[ptr+4+i : ptr+8+i] )
for i in range(0, count*4, 4)
]
# session encodings are sent/set by client post init
self.encodings = encodings
if hasattr(self, 'ClientSetEncodings'):
self.ClientSetEncodings(encodings)
ptr += 4 + (count*4)
# ClientFrameBufferUpdateRequest(self, incr, x, y, w, h)
elif msg[ptr] == 3:
if hasattr(self, 'ClientFrameBufferUpdateRequest'):
self.ClientFrameBufferUpdateRequest(
msg[ptr+1] == 1,
bytes_to_int( msg[ptr+2:ptr+4] ),
bytes_to_int( msg[ptr+4:ptr+6] ),
bytes_to_int( msg[ptr+6:ptr+8] ),
bytes_to_int( msg[ptr+8:ptr+10] )
)
ptr += 10
# ClientKeyEvent(self, down, key)
elif msg[ptr] == 4:
if hasattr(self, 'ClientKeyEvent'):
self.ClientKeyEvent(
msg[ptr+1] == 1,
bytes_to_int( msg[ptr+4:ptr+8] )
)
ptr += 8
# ClientPointerEvent(self, buttons, x, y)
elif msg[ptr] == 5:
if hasattr(self, 'ClientPointerEvent'):
self.ClientPointerEvent(
msg[ptr+1],
bytes_to_int( msg[ptr+2:ptr+4] ),
bytes_to_int( msg[ptr+4:ptr+6] )
)
ptr += 6
# ClientCutText(self, text)
elif msg[ptr] == 6:
l = bytes_to_int( msg[2:6] )
if hasattr(self, 'ClientCutText'):
self.ClientCutText(
msg[ptr+6 : ptr+l]
)
ptr += 6 + l
elif msg[ptr] > 6:
if hasattr(self, 'ClientOtherMsg'):
# ClientOtherMsg must return len of 1st msg
ptr += ClientOtherMsg(msg)
else:
# skip all messages
# ... no way to tell how long the msg is ...
ptr = len(msg)
| mit | -2,182,950,824,678,368,800 | 34.913462 | 77 | 0.44647 | false | 4.105882 | false | false | false |
essamjoubori/girder | girder/utility/gridfs_assetstore_adapter.py | 1 | 10578 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import bson
import cherrypy
import pymongo
import six
import uuid
from six import BytesIO
from girder import logger
from girder.models import getDbConnection
from girder.models.model_base import ValidationException
from hashlib import sha512
from . import hash_state
from .abstract_assetstore_adapter import AbstractAssetstoreAdapter
# 2MB chunks. Clients must not send any chunks that are smaller than this
# unless they are sending the final chunk.
CHUNK_SIZE = 2097152
class GridFsAssetstoreAdapter(AbstractAssetstoreAdapter):
"""
This assetstore type stores files within MongoDB using the GridFS data
model.
"""
@staticmethod
def validateInfo(doc):
"""
Validate the assetstore -- make sure we can connect to it and that the
necessary indexes are set up.
"""
if not doc.get('db', ''):
raise ValidationException('Database name must not be empty.', 'db')
if '.' in doc['db'] or ' ' in doc['db']:
raise ValidationException('Database name cannot contain spaces'
' or periods.', 'db')
chunkColl = getDbConnection(
doc.get('mongohost', None), doc.get('replicaset', None),
autoRetry=False, serverSelectionTimeoutMS=10000)[doc['db']].chunk
try:
chunkColl.create_index([
('uuid', pymongo.ASCENDING),
('n', pymongo.ASCENDING)
], unique=True)
except pymongo.errors.ServerSelectionTimeoutError as e:
raise ValidationException(
'Could not connect to the database: %s' % str(e))
return doc
@staticmethod
def fileIndexFields():
return ['sha512']
def __init__(self, assetstore):
"""
:param assetstore: The assetstore to act on.
"""
super(GridFsAssetstoreAdapter, self).__init__(assetstore)
try:
self.chunkColl = getDbConnection(
self.assetstore.get('mongohost', None),
self.assetstore.get('replicaset', None)
)[self.assetstore['db']].chunk
except pymongo.errors.ConnectionFailure:
logger.error('Failed to connect to GridFS assetstore %s',
self.assetstore['db'])
self.chunkColl = 'Failed to connect'
self.unavailable = True
return
except pymongo.errors.ConfigurationError:
logger.exception('Failed to configure GridFS assetstore %s',
self.assetstore['db'])
self.chunkColl = 'Failed to configure'
self.unavailable = True
return
def initUpload(self, upload):
"""
Creates a UUID that will be used to uniquely link each chunk to
"""
upload['chunkUuid'] = uuid.uuid4().hex
upload['sha512state'] = hash_state.serializeHex(sha512())
return upload
def uploadChunk(self, upload, chunk):
"""
Stores the uploaded chunk in fixed-sized pieces in the chunks
collection of this assetstore's database.
"""
# If we know the chunk size is too large or small, fail early.
self.checkUploadSize(upload, self.getChunkSize(chunk))
if isinstance(chunk, six.text_type):
chunk = chunk.encode('utf8')
if isinstance(chunk, six.binary_type):
chunk = BytesIO(chunk)
# Restore the internal state of the streaming SHA-512 checksum
checksum = hash_state.restoreHex(upload['sha512state'], 'sha512')
# This bit of code will only do anything if there is a discrepancy
# between the received count of the upload record and the length of
# the file stored as chunks in the database. This code simply updates
# the sha512 state with the difference before reading the bytes sent
# from the user.
if self.requestOffset(upload) > upload['received']:
cursor = self.chunkColl.find({
'uuid': upload['chunkUuid'],
'n': {'$gte': upload['received'] // CHUNK_SIZE}
}, projection=['data']).sort('n', pymongo.ASCENDING)
for result in cursor:
checksum.update(result['data'])
cursor = self.chunkColl.find({
'uuid': upload['chunkUuid']
}, projection=['n']).sort('n', pymongo.DESCENDING).limit(1)
if cursor.count(True) == 0:
n = 0
else:
n = cursor[0]['n'] + 1
size = 0
startingN = n
while not upload['received']+size > upload['size']:
data = chunk.read(CHUNK_SIZE)
if not data:
break
# If a timeout occurs while we are trying to load data, we might
# have succeeded, in which case we will get a DuplicateKeyError
# when it automatically retries. Therefore, log this error but
# don't stop.
try:
self.chunkColl.insert_one({
'n': n,
'uuid': upload['chunkUuid'],
'data': bson.binary.Binary(data)
})
except pymongo.errors.DuplicateKeyError:
logger.info('Received a DuplicateKeyError while uploading, '
'probably because we reconnected to the database '
'(chunk uuid %s part %d)', upload['chunkUuid'], n)
n += 1
size += len(data)
checksum.update(data)
chunk.close()
try:
self.checkUploadSize(upload, size)
except ValidationException:
# The user tried to upload too much or too little. Delete
# everything we added
self.chunkColl.delete_many({
'uuid': upload['chunkUuid'],
'n': {'$gte': startingN}
}, multi=True)
raise
# Persist the internal state of the checksum
upload['sha512state'] = hash_state.serializeHex(checksum)
upload['received'] += size
return upload
def requestOffset(self, upload):
"""
The offset will be the CHUNK_SIZE * total number of chunks in the
database for this file. We return the max of that and the received
count because in testing mode we are uploading chunks that are smaller
than the CHUNK_SIZE, which in practice will not work.
"""
cursor = self.chunkColl.find({
'uuid': upload['chunkUuid']
}, projection=['n']).sort('n', pymongo.DESCENDING).limit(1)
if cursor.count(True) == 0:
offset = 0
else:
offset = cursor[0]['n'] * CHUNK_SIZE
return max(offset, upload['received'])
def finalizeUpload(self, upload, file):
"""
Grab the final state of the checksum and set it on the file object,
and write the generated UUID into the file itself.
"""
hash = hash_state.restoreHex(upload['sha512state'],
'sha512').hexdigest()
file['sha512'] = hash
file['chunkUuid'] = upload['chunkUuid']
file['chunkSize'] = CHUNK_SIZE
return file
def downloadFile(self, file, offset=0, headers=True, endByte=None,
contentDisposition=None, **kwargs):
"""
Returns a generator function that will be used to stream the file from
the database to the response.
"""
if endByte is None or endByte > file['size']:
endByte = file['size']
if headers:
cherrypy.response.headers['Accept-Ranges'] = 'bytes'
self.setContentHeaders(file, offset, endByte, contentDisposition)
# If the file is empty, we stop here
if endByte - offset <= 0:
return lambda: ''
n = 0
chunkOffset = 0
# We must "seek" to the correct chunk index and local offset
if offset > 0:
n = offset // file['chunkSize']
chunkOffset = offset % file['chunkSize']
cursor = self.chunkColl.find({
'uuid': file['chunkUuid'],
'n': {'$gte': n}
}, projection=['data']).sort('n', pymongo.ASCENDING)
def stream():
co = chunkOffset # Can't assign to outer scope without "nonlocal"
position = offset
shouldBreak = False
for chunk in cursor:
chunkLen = len(chunk['data'])
if position + chunkLen > endByte:
chunkLen = endByte - position + co
shouldBreak = True
yield chunk['data'][co:chunkLen]
if shouldBreak:
break
position += chunkLen - co
if co > 0:
co = 0
return stream
def deleteFile(self, file):
"""
Delete all of the chunks in the collection that correspond to the
given file.
"""
q = {
'chunkUuid': file['chunkUuid'],
'assetstoreId': self.assetstore['_id']
}
matching = self.model('file').find(q, limit=2, projection=[])
if matching.count(True) == 1:
try:
self.chunkColl.delete_many({'uuid': file['chunkUuid']})
except pymongo.errors.AutoReconnect:
# we can't reach the database. Go ahead and return; a system
# check will be necessary to remove the abandoned file
pass
def cancelUpload(self, upload):
"""
Delete all of the chunks associated with a given upload.
"""
self.chunkColl.delete_many({'uuid': upload['chunkUuid']})
| apache-2.0 | 3,210,352,515,292,480,000 | 34.736486 | 79 | 0.563528 | false | 4.615183 | false | false | false |
rwightman/pytorch-image-models | timm/data/dataset.py | 1 | 4546 | """ Quick n Simple Image Folder, Tarfile based DataSet
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.utils.data as data
import os
import torch
import logging
from PIL import Image
from .parsers import create_parser
_logger = logging.getLogger(__name__)
_ERROR_RETRY = 50
class ImageDataset(data.Dataset):
def __init__(
self,
root,
parser=None,
class_map='',
load_bytes=False,
transform=None,
):
if parser is None or isinstance(parser, str):
parser = create_parser(parser or '', root=root, class_map=class_map)
self.parser = parser
self.load_bytes = load_bytes
self.transform = transform
self._consecutive_errors = 0
def __getitem__(self, index):
img, target = self.parser[index]
try:
img = img.read() if self.load_bytes else Image.open(img).convert('RGB')
except Exception as e:
_logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}')
self._consecutive_errors += 1
if self._consecutive_errors < _ERROR_RETRY:
return self.__getitem__((index + 1) % len(self.parser))
else:
raise e
self._consecutive_errors = 0
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
return img, target
def __len__(self):
return len(self.parser)
def filename(self, index, basename=False, absolute=False):
return self.parser.filename(index, basename, absolute)
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class IterableImageDataset(data.IterableDataset):
def __init__(
self,
root,
parser=None,
split='train',
is_training=False,
batch_size=None,
class_map='',
load_bytes=False,
repeats=0,
transform=None,
):
assert parser is not None
if isinstance(parser, str):
self.parser = create_parser(
parser, root=root, split=split, is_training=is_training, batch_size=batch_size, repeats=repeats)
else:
self.parser = parser
self.transform = transform
self._consecutive_errors = 0
def __iter__(self):
for img, target in self.parser:
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
yield img, target
def __len__(self):
if hasattr(self.parser, '__len__'):
return len(self.parser)
else:
return 0
def filename(self, index, basename=False, absolute=False):
assert False, 'Filename lookup by index not supported, use filenames().'
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix or other clean/augmentation mixes"""
def __init__(self, dataset, num_splits=2):
self.augmentation = None
self.normalize = None
self.dataset = dataset
if self.dataset.transform is not None:
self._set_transforms(self.dataset.transform)
self.num_splits = num_splits
def _set_transforms(self, x):
assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms'
self.dataset.transform = x[0]
self.augmentation = x[1]
self.normalize = x[2]
@property
def transform(self):
return self.dataset.transform
@transform.setter
def transform(self, x):
self._set_transforms(x)
def _normalize(self, x):
return x if self.normalize is None else self.normalize(x)
def __getitem__(self, i):
x, y = self.dataset[i] # all splits share the same dataset base transform
x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split)
# run the full augmentation on the remaining splits
for _ in range(self.num_splits - 1):
x_list.append(self._normalize(self.augmentation(x)))
return tuple(x_list), y
def __len__(self):
return len(self.dataset)
| apache-2.0 | 6,783,638,191,954,104,000 | 30.136986 | 112 | 0.592609 | false | 4.110307 | false | false | false |
chainer/chainercv | chainercv/links/model/vgg/vgg16.py | 3 | 6078 | from __future__ import division
import numpy as np
import chainer
from chainer.functions import dropout
from chainer.functions import max_pooling_2d
from chainer.functions import relu
from chainer.functions import softmax
from chainer.initializers import constant
from chainer.initializers import normal
from chainer.links import Linear
from chainercv.links.connection.conv_2d_activ import Conv2DActiv
from chainercv.links.model.pickable_sequential_chain import \
PickableSequentialChain
from chainercv import utils
# RGB order
_imagenet_mean = np.array(
[123.68, 116.779, 103.939], dtype=np.float32)[:, np.newaxis, np.newaxis]
class VGG16(PickableSequentialChain):
"""VGG-16 Network.
This is a pickable sequential link.
The network can choose output layers from set of all
intermediate layers.
The attribute :obj:`pick` is the names of the layers that are going
to be picked by :meth:`forward`.
The attribute :obj:`layer_names` is the names of all layers
that can be picked.
Examples:
>>> model = VGG16()
# By default, forward returns a probability score (after Softmax).
>>> prob = model(imgs)
>>> model.pick = 'conv5_3'
# This is layer conv5_3 (after ReLU).
>>> conv5_3 = model(imgs)
>>> model.pick = ['conv5_3', 'fc6']
>>> # These are layers conv5_3 (after ReLU) and fc6 (before ReLU).
>>> conv5_3, fc6 = model(imgs)
.. seealso::
:class:`chainercv.links.model.PickableSequentialChain`
When :obj:`pretrained_model` is the path of a pre-trained chainer model
serialized as a :obj:`.npz` file in the constructor, this chain model
automatically initializes all the parameters with it.
When a string in the prespecified set is provided, a pretrained model is
loaded from weights distributed on the Internet.
The list of pretrained models supported are as follows:
* :obj:`imagenet`: Loads weights trained with ImageNet and distributed \
at `Model Zoo \
<https://github.com/BVLC/caffe/wiki/Model-Zoo>`_.
Args:
n_class (int): The number of classes. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the number of classes used to train the pretrained model
is used. Otherwise, the number of classes in ILSVRC 2012 dataset
is used.
pretrained_model (string): The destination of the pre-trained
chainer model serialized as a :obj:`.npz` file.
If this is one of the strings described
above, it automatically loads weights stored under a directory
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`,
where :obj:`$CHAINER_DATASET_ROOT` is set as
:obj:`$HOME/.chainer/dataset` unless you specify another value
by modifying the environment variable.
mean (numpy.ndarray): A mean value. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the mean value used to train the pretrained model is used.
Otherwise, the mean value calculated from ILSVRC 2012 dataset
is used.
initialW (callable): Initializer for the weights.
initial_bias (callable): Initializer for the biases.
"""
_models = {
'imagenet': {
'param': {'n_class': 1000, 'mean': _imagenet_mean},
'overwritable': ('mean',),
'url': 'https://chainercv-models.preferred.jp/'
'vgg16_imagenet_converted_2017_07_18.npz'
}
}
def __init__(self,
n_class=None, pretrained_model=None, mean=None,
initialW=None, initial_bias=None):
param, path = utils.prepare_pretrained_model(
{'n_class': n_class, 'mean': mean},
pretrained_model, self._models,
{'n_class': 1000, 'mean': _imagenet_mean})
self.mean = param['mean']
if initialW is None:
# Employ default initializers used in the original paper.
initialW = normal.Normal(0.01)
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
initialW = constant.Zero()
kwargs = {'initialW': initialW, 'initial_bias': initial_bias}
super(VGG16, self).__init__()
with self.init_scope():
self.conv1_1 = Conv2DActiv(None, 64, 3, 1, 1, **kwargs)
self.conv1_2 = Conv2DActiv(None, 64, 3, 1, 1, **kwargs)
self.pool1 = _max_pooling_2d
self.conv2_1 = Conv2DActiv(None, 128, 3, 1, 1, **kwargs)
self.conv2_2 = Conv2DActiv(None, 128, 3, 1, 1, **kwargs)
self.pool2 = _max_pooling_2d
self.conv3_1 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs)
self.conv3_2 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs)
self.conv3_3 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs)
self.pool3 = _max_pooling_2d
self.conv4_1 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.conv4_2 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.conv4_3 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.pool4 = _max_pooling_2d
self.conv5_1 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.conv5_2 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.conv5_3 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.pool5 = _max_pooling_2d
self.fc6 = Linear(None, 4096, **kwargs)
self.fc6_relu = relu
self.fc6_dropout = dropout
self.fc7 = Linear(None, 4096, **kwargs)
self.fc7_relu = relu
self.fc7_dropout = dropout
self.fc8 = Linear(None, param['n_class'], **kwargs)
self.prob = softmax
if path:
chainer.serializers.load_npz(path, self)
def _max_pooling_2d(x):
return max_pooling_2d(x, ksize=2)
| mit | 8,276,762,520,706,792,000 | 39.251656 | 76 | 0.611056 | false | 3.641702 | false | false | false |
tinloaf/home-assistant | homeassistant/components/switch/verisure.py | 5 | 2402 | """
Support for Verisure Smartplugs.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.verisure/
"""
import logging
from time import time
from homeassistant.components.verisure import HUB as hub
from homeassistant.components.verisure import CONF_SMARTPLUGS
from homeassistant.components.switch import SwitchDevice
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure switch platform."""
if not int(hub.config.get(CONF_SMARTPLUGS, 1)):
return False
hub.update_overview()
switches = []
switches.extend([
VerisureSmartplug(device_label)
for device_label in hub.get('$.smartPlugs[*].deviceLabel')])
add_entities(switches)
class VerisureSmartplug(SwitchDevice):
"""Representation of a Verisure smartplug."""
def __init__(self, device_id):
"""Initialize the Verisure device."""
self._device_label = device_id
self._change_timestamp = 0
self._state = False
@property
def name(self):
"""Return the name or location of the smartplug."""
return hub.get_first(
"$.smartPlugs[?(@.deviceLabel == '%s')].area",
self._device_label)
@property
def is_on(self):
"""Return true if on."""
if time() - self._change_timestamp < 10:
return self._state
self._state = hub.get_first(
"$.smartPlugs[?(@.deviceLabel == '%s')].currentState",
self._device_label) == "ON"
return self._state
@property
def available(self):
"""Return True if entity is available."""
return hub.get_first(
"$.smartPlugs[?(@.deviceLabel == '%s')]",
self._device_label) is not None
def turn_on(self, **kwargs):
"""Set smartplug status on."""
hub.session.set_smartplug_state(self._device_label, True)
self._state = True
self._change_timestamp = time()
def turn_off(self, **kwargs):
"""Set smartplug status off."""
hub.session.set_smartplug_state(self._device_label, False)
self._state = False
self._change_timestamp = time()
# pylint: disable=no-self-use
def update(self):
"""Get the latest date of the smartplug."""
hub.update_overview()
| apache-2.0 | 796,525,481,526,199,900 | 29.794872 | 74 | 0.622398 | false | 3.837061 | false | false | false |
rc0r/afl-utils | db_connectors/con_sqlite.py | 1 | 5158 | """
Copyright 2015-2016 @_rc0r <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sqlite3 as lite
from afl_utils.AflPrettyPrint import *
class sqliteConnector:
def __init__(self, database_path, verbose=True):
self.database_path = database_path
self.dbcon = lite.connect(database_path, isolation_level='Exclusive')
self.dbcur = self.dbcon.cursor()
self.dbcur.execute('PRAGMA synchronous = 0')
# self.dbcur.execute('PRAGMA journal_mode = OFF')
self.verbose = verbose
def init_database(self, table, table_spec):
"""
Prepares a sqlite3 database for data set storage. If the file specified in database_path doesn't exist a new
sqlite3 database with table 'Data' will be created. Otherwise the existing database is used to store additional
data sets.
DO NOT USE WITH USER SUPPLIED `table` AND `table_spec` PARAMS!
!!! THIS METHOD IS *NOT* SQLi SAFE !!!
:param table: Name of the table to create.
:param table_spec: String containing the SQL table specification
:return: None
"""
table_data_exists = False
if os.path.isfile(self.database_path):
try:
self.dbcur.execute("SELECT Count(*) FROM {}".format(table))
if self.verbose:
print_warn("Using existing database to store results, %s entries in this database so far." %
str(self.dbcur.fetchone()[0]))
table_data_exists = True
except lite.OperationalError:
if self.verbose:
print_warn("Table \'{}\' not found in existing database!".format(table))
if not table_data_exists: # If the database doesn't exist, we'll create it.
if self.verbose:
print_ok("Creating new table \'{}\' in database \'{}\' to store data!".format(table, self.database_path))
self.dbcur.execute("CREATE TABLE `{}` ({})".format(table, table_spec))
def dataset_exists(self, table, dataset, compare_fields):
"""
Check if dataset was already submitted into database.
DO NOT USE WITH USER SUPPLIED `table`, `dataset` or `compare_fields` PARAMS!
!!! THIS METHOD IS *NOT* SQLi SAFE !!!
:param table: Name of table to perform the check on.
:param dataset: A dataset dict consisting of sample filename, sample classification
and classification description.
:param compare_fields: List containing field names that will be checked using logical AND operation.
:return: True if the data set is already present in database, False otherwise.
"""
# The nice thing about using the SQL DB is that I can just have it make
# a query to make a duplicate check. This can likely be done better but
# it's "good enough" for now.
output = False
# check sample by its name (we could check by hash to avoid dupes in the db)
single_compares = []
for compare_field in compare_fields:
single_compares.append("({} IS '{}')".format(compare_field, dataset[compare_field]))
qstring = "SELECT * FROM {} WHERE {}".format(table, " AND ".join(single_compares))
self.dbcur.execute(qstring)
if self.dbcur.fetchone() is not None: # We should only have to pull one.
output = True
return output
def insert_dataset(self, table, dataset):
"""
Insert a dataset into the database.
DO NOT USE WITH USER SUPPLIED `table` AND `table_spec` PARAMS!
!!! THIS METHOD IS *NOT* SQLi SAFE !!!
:param table: Name of the table to insert data into.
:param dataset: A dataset dict consisting of sample filename, sample classification and classification
description.
:return: None
"""
# Just a simple function to write the results to the database.
if len(dataset) <= 0:
return
field_names_string = ", ".join(["`{}`".format(k) for k in dataset.keys()])
field_values_string = ", ".join(["'{}'".format(v) for v in dataset.values()])
qstring = "INSERT INTO {} ({}) VALUES({})".format(table, field_names_string, field_values_string)
self.dbcur.execute(qstring)
def commit_close(self):
"""
Write database changes to disk and close cursor and connection.
:return: None
"""
self.dbcon.commit()
self.dbcur.close()
self.dbcon.close()
| apache-2.0 | 3,754,788,818,714,356,700 | 41.278689 | 121 | 0.621171 | false | 4.245267 | false | false | false |
GoogleCloudPlatform/tensorflow-without-a-phd | tensorflow-planespotting/trainer/datagen/pickle32.py | 1 | 1074 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Convert from pickle format 3 to pickle format 2 for use with Python 2.7
# Run this under Python 3
import sys
import gzip
import pickle
def main(argv):
if len(argv)<2:
print("usage: python pickle32.py file")
return -1
filename = argv[1]
with gzip.open(filename, mode='rb') as f:
unpickled = pickle.load(f)
with gzip.open(filename + '2', mode='wb') as d:
pickle.dump(unpickled, d, protocol=2)
if __name__ == '__main__':
main(sys.argv) | apache-2.0 | 7,241,288,575,465,329,000 | 29.714286 | 74 | 0.693669 | false | 3.78169 | false | false | false |
blesscat/flux_line_bot | fluxclient/upnp/task.py | 1 | 7567 |
import logging
from fluxclient.utils.version import StrictVersion
from fluxclient.upnp.discover import UpnpDiscover
from .abstract_backend import UpnpError, UpnpException, NotSupportError
from .udp1_backend import UpnpUdp1Backend
from .ssl1_backend import UpnpSSL1Backend
__all__ = ["UpnpTask", "UpnpError", "UpnpException"]
BACKENDS = [
UpnpSSL1Backend,
UpnpUdp1Backend]
logger = logging.getLogger(__name__)
class UpnpTask(object):
"""UpnpTask provides some configuration methods for the device. When creating \
a UpnpTask instance, the argument **uuid** is required. If parameter \
**device_metadata** is not given, UpnpTask will use lookup_callback and \
lookup_timeout to create a UpnpDiscover instance and try to get metadata from \
network.
:param uuid.UUID uuid: Device uuid, set UUID(int=0) while trying to connect \
via ip address.
:param encrypt.KeyObject client_key: Client key to connect to device.
:param str ipaddr: IP Address of the machine.
:param dict device_metadata: This is an internal parameter, which is not \
recommended to provide because it may has different definitions in \
different versions.
:param dict backend_options: More configuration for UpnpTask.
:param callable lookup_callback: Invoke repeatedly while looking for device.
:param float lookup_timeout: Raise an error if the program can not find the device in a limited time.
:raises UpnpError: For protocol or operation error.
:raises socket.error: For system defined socket error.
"""
name = None
uuid = None
serial = None
model_id = None
version = None
ipaddr = None
meta = None
_backend = None
def __init__(self, uuid, client_key, ipaddr=None, device_metadata=None,
remote_profile=None, backend_options={}, lookup_callback=None,
lookup_timeout=float("INF")):
self.uuid = uuid
self.ipaddr = ipaddr
self.client_key = client_key
self.backend_options = backend_options
if device_metadata:
if 'uuid' in device_metadata:
device_metadata.pop('uuid')
self.update_remote_profile(uuid, **device_metadata)
elif remote_profile:
self.update_remote_profile(uuid, **remote_profile)
else:
self.reload_remote_profile(lookup_callback, lookup_timeout)
self.initialize_backend()
def reload_remote_profile(self, lookup_callback=None,
lookup_timeout=float("INF")):
def on_discovered(instance, device, **kw):
self.update_remote_profile(**(device.to_old_dict()))
instance.stop()
if self.uuid.int:
d = UpnpDiscover(uuid=self.uuid)
else:
d = UpnpDiscover(device_ipaddr=self.ipaddr)
d.discover(on_discovered, lookup_callback, lookup_timeout)
def update_remote_profile(self, uuid, name, serial, model_id, version,
ipaddr, **meta):
if not self.uuid or self.uuid.int == 0:
self.uuid = uuid
self.name = name
self.serial = serial
self.model_id = model_id
self.version = StrictVersion(str(version))
self.ipaddr = ipaddr
self.device_meta = meta
def initialize_backend(self):
for klass in BACKENDS:
if klass.support_device(self.model_id, self.version):
self._backend = klass(self.client_key, self.uuid, self.version,
self.model_id, self.ipaddr,
self.device_meta, self.backend_options)
# TODO: debug information, remove after bugfix
logger.info("Backend %s selected", klass.__name__)
return
# TODO: debug information, remove after bugfix
logger.warn("Backend %s does not support device version `%s`",
klass.__name__, self.version)
raise NotSupportError(self.model_id, self.version)
def close(self):
"""Closes the upnp socket connection. After close(), any other method \
should not be called anymore."""
self._backend.close()
@property
def authorized(self):
"Indicates whether the connection has been authorized with a correct password or RSA key. If the connection is not authorized, you must \
call `authorize_with_password` first to authorize."
return self._backend.authorized
@property
def connected(self):
"""Indicates whether the upnp connection is connected with the device"""
return self._backend.connected
def authorize_with_password(self, password):
"""Authorizes via password, only use when the RSA key has not been trusted \
from device.
:param str password: Device password"""
if not self._backend.connected:
raise UpnpError("Disconnected")
if self._backend.authorized:
raise UpnpError("Already authorized")
self._backend.authorize_with_password(password)
def add_trust(self, label, key):
"""Adds a client_key to device trust list
:param str label: Key label will show for human only
:param object key: A vaild RSA key object or pem
:return: Key hash
:rtype: str"""
if isinstance(key, str):
pem = key
elif isinstance(key, bytes):
pem = key.decode("ascii")
else:
pem = key.public_key_pem.decode("ascii")
self._backend.add_trust(label, pem)
def list_trust(self):
"""Gets all trusted key in the device
:return: ((label, key hash), (label, key hash), ...)"""
return self._backend.list_trust()
def remove_trust(self, access_id):
"""Removes a trusted key
:param str access_id: Key hash which will be removed"""
return self._backend.remove_trust(access_id)
def rename(self, new_name):
"""Renames the device
:param str new_name: New device name"""
if not self._backend.connected:
raise UpnpError("Disconnected")
if not self._backend.authorized:
raise UpnpError("Authorize required")
self._backend.rename(new_name)
def modify_password(self, old_password, new_password, reset_acl=True):
"""Changes the device password, if **reset_acl** set to True, all other \
authorized user will be deauthorized.
:param str old_password: Old device password
:param str new_password: New device password
:param bool reset_acl: Clear authorized user list in device"""
if not self._backend.connected:
raise UpnpError("Disconnected")
if not self._backend.authorized:
raise UpnpError("Authorize required")
self._backend.modify_password(old_password, new_password, reset_acl)
def modify_network(self, **settings):
"""Modifies the device network, details will be revealed in future documentation."""
if not self._backend.connected:
raise UpnpError("Disconnected")
if not self._backend.authorized:
raise UpnpError("Authorize required")
self._backend.modify_network(**settings)
def get_wifi_list(self):
"""Gets wifi lists discovered from the device"""
if not self._backend.connected:
raise UpnpError("Disconnected")
if not self._backend.authorized:
raise UpnpError("Authorize required")
return self._backend.get_wifi_list()
| agpl-3.0 | -7,426,698,394,104,833,000 | 35.033333 | 145 | 0.636051 | false | 4.275141 | false | false | false |
xgds/xgds_notes2 | xgds_notes2/views.py | 1 | 27274 | #__BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#__END_LICENSE__
import traceback
import cgi
import re
from datetime import datetime, timedelta
import itertools
import json
import pytz
import csv
import ast
from dateutil.parser import parse as dateparser
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.views.decorators.cache import never_cache
from django.http import HttpResponse, JsonResponse
from django.core.urlresolvers import reverse
from django.shortcuts import redirect, render
from django.template.loader import render_to_string
from geocamUtil.datetimeJsonEncoder import DatetimeJsonEncoder
from geocamUtil.loader import LazyGetModelByName, getClassByName
from geocamUtil.modelJson import modelToDict
from geocamUtil import TimeUtil
from geocamTrack.utils import getClosestPosition
from treebeard.mp_tree import MP_Node
from xgds_notes2.forms import NoteForm, UserSessionForm, TagForm, ImportNotesForm
from xgds_core.views import getTimeZone, addRelay, getDelay
from xgds_core.flightUtils import getFlight
from xgds_map_server.views import getSearchPage, getSearchForms, buildFilterDict
from models import HierarchichalTag
from httplib2 import ServerNotFoundError
from apps.xgds_notes2.forms import SearchNoteForm
if False and settings.XGDS_SSE:
from sse_wrapper.events import send_event
UNSET_SESSION = 'Unset Session'
Note = LazyGetModelByName(getattr(settings, 'XGDS_NOTES_NOTE_MODEL'))
Tag = LazyGetModelByName(getattr(settings, 'XGDS_NOTES_TAG_MODEL'))
def serverTime(request):
return HttpResponse(
datetime.now(pytz.utc).strftime('%Y-%m-%d %H:%M:%S'),
content_type="text"
)
def editUserSession(request, ajax=False):
# display a form to edit the content of the UserSession object in request.session['notes_user_session']
existing_data = request.session.get('notes_user_session', None)
if request.method == 'POST':
form = UserSessionForm(request.POST)
if form.is_valid():
request.session['notes_user_session'] = form.data.dict()
#persist the session data in user preferences, if that feature is available (see plrpExplorer.models.UserPreferences)
if hasattr(request.user, 'preferences'):
for field in form.fields:
request.user.preferences['default_' + field] = form.data[field]
if not ajax:
return redirect('search_xgds_notes_map')
else:
resultDict = {'success': True}
for key, value in form.cleaned_data.iteritems():
resultDict[key] = str(value);
return HttpResponse(json.dumps(resultDict),
content_type='application/json')
else:
return HttpResponse(json.dumps(form.errors),
content_type='application/json',
status=406)
else:
defaults = {}
if hasattr(request.user, 'preferences'):
empty_form = UserSessionForm() # Used as a source of enum choices
for fieldname in empty_form.fields:
if 'default_' + fieldname in itertools.chain(request.user.preferences.keys(), getattr(settings, 'DEFAULT_USER_PREFERENCES', [])):
value = request.user.preferences.get('default_' + fieldname)
defaults[fieldname] = value
if existing_data:
defaults.update(existing_data) # merge anything in the session store with the user preferences
form = UserSessionForm(initial=defaults)
template = 'xgds_notes2/user_session.html'
return render(
request,
template,
{
'form': form,
'title': settings.XGDS_NOTES_MONIKER,
'help_content_path': 'xgds_notes2/help/recordSession.rst'
},
)
def update_note_request_post(request):
"""
Take the author and the notes user session from the request session and put it into the request post
:param request:
:return: the request with more stuff in the post
"""
request.POST._mutable = True
# Hijack the UserSessionForm's validation method to translate enumerations to objects in session data
if 'notes_user_session' in request.session.keys():
session_form = UserSessionForm()
session_data = {'%s' % (k): str(session_form.fields[k].clean(v).id)
for k, v in request.session['notes_user_session'].iteritems()
if k in session_form.fields}
request.POST.update(session_data)
if request.user:
request.POST['author'] = str(request.user.id)
request.POST._mutable = False
return request.POST
def populateNoteData(request, form):
""" Populate the basic data dictionary for a new note from a submitted form
Form must already be valid
"""
errors = []
data = form.cleaned_data
if data['app_label'] and data['model_type']:
data['content_type'] = ContentType.objects.get(app_label=data['app_label'], model=data['model_type'])
data.pop('app_label')
data.pop('model_type')
tags = data.pop('tags')
# handle extras
try:
extras = data.pop('extras')
if str(extras) != 'undefined':
extrasDict = ast.literal_eval(extras)
data.update(extrasDict)
except:
pass
# This is for relay purposes
if 'id' in request.POST:
data['id'] = request.POST['id']
return data, tags, errors
def linkTags(note, tags):
if tags:
note.tags.clear()
for t in tags:
try:
tag = HierarchichalTag.objects.get(pk=int(t))
note.tags.add(tag)
except:
tag = HierarchichalTag.objects.get(slug=t)
note.tags.add(tag)
note.save()
def createNoteFromData(data, delay=True, serverNow=False):
NOTE_MODEL = Note.get()
empty_keys = [k for k,v in data.iteritems() if v is None]
for k in empty_keys:
del data[k]
try:
del data['note_submit_url']
except:
pass
note = NOTE_MODEL(**data)
for (key, value) in data.items():
setattr(note, key, value)
note.creation_time = datetime.now(pytz.utc)
note.modification_time = note.creation_time
# if we are taking a note on an object, get the flight and position from the object
if note.content_object:
try:
if hasattr(note, 'flight'):
note.flight = note.content_object.flight
note.position = note.content_object.getPosition()
except:
pass
else:
if delay:
# this is to handle delay state shifting of event time by default it does not change event time
note.event_time = note.calculateDelayedEventTime(data['event_time'])
elif serverNow:
note.event_time = note.calculateDelayedEventTime(note.creation_time)
if not note.event_timezone:
note.event_timezone = getTimeZone(note.event_time)
if hasattr(note, 'flight') and not note.flight:
# hook up the flight, this should always be true
note.flight = getFlight(note.event_time)
# TODO handle using the vehicle that came in from session
# hook up the position if it can have one
if hasattr(note, 'position') and not note.position:
note.lookupPosition()
note.save()
return note
def record(request):
if request.method == 'POST':
update_note_request_post(request)
form = NoteForm(request.POST)
if form.is_valid():
data, tags, errors = getClassByName(settings.XGDS_NOTES_POPULATE_NOTE_DATA)(request, form)
data = {str(k): v
for k, v in data.items()}
if 'author_id' in request.POST:
data['author'] = User.objects.get(id=request.POST['author_id'])
delay = getDelay()
note = createNoteFromData(data, delay=delay>0)
linkTags(note, tags)
jsonNote = json.dumps([note.toMapDict()], cls=DatetimeJsonEncoder)
# Right now we are using relay for the show on map
if note.show_on_map:
if settings.XGDS_CORE_REDIS and settings.XGDS_SSE:
note.broadcast()
mutable = request.POST._mutable
request.POST._mutable = True
request.POST['id'] = note.pk
request.POST['author_id'] = note.author.id
request.POST._mutable = mutable
addRelay(note, None, json.dumps(request.POST, cls=DatetimeJsonEncoder), reverse('xgds_notes_record'))
return HttpResponse(jsonNote,
content_type='application/json')
# if not settings.XGDS_SSE:
# return HttpResponse(jsonNote,
# content_type='application/json')
# else:
# return HttpResponse(json.dumps({'success': 'true'}), content_type='application/json')
else:
return HttpResponse(str(form.errors), status=400) # Bad Request
else:
raise Exception("Request method %s not supported." % request.method)
def recordSimple(request):
if request.method != 'POST':
return HttpResponse(json.dumps({'error': {'code': -32099,
'message': 'You must post, cheater.'}
}),
content_type='application/json')
update_note_request_post(request)
form = NoteForm(request.POST)
if form.is_valid():
data, tags, errors = getClassByName(settings.XGDS_NOTES_POPULATE_NOTE_DATA)(request, form)
note = createNoteFromData(data, False, 'serverNow' in request.POST)
linkTags(note, tags)
json_data = json.dumps([note.toMapDict()], cls=DatetimeJsonEncoder)
# Right now we are using relay for the show on map
if note.show_on_map:
if settings.XGDS_CORE_REDIS and settings.XGDS_SSE:
note.broadcast()
mutable = request.POST._mutable
request.POST._mutable = True
request.POST['id'] = note.pk
request.POST['author_id'] = note.author.id
request.POST._mutable = mutable
addRelay(note, None, json.dumps(request.POST, cls=DatetimeJsonEncoder), reverse('xgds_notes_record'))
return HttpResponse(json_data,
content_type='application/json')
else:
return JsonResponse({'error': {'code': -32099,
'message': 'problem submitting note',
'data': form.errors}
},
safe=False,
status=406)
def editNote(request, note_pk=None):
try:
tags_list = []
note = Note.get().objects.get(pk=int(note_pk))
tags_changed = False
if len(request.POST) == 1:
note.tags.clear()
else:
for key, value in request.POST.iteritems():
strkey = str(key)
if strkey.startswith('data'):
p = re.compile(r'^data\[(?P<pk>\d+)\]\[(?P<attr>\w*)\]')
m = p.match(strkey)
if m:
attr = m.group('attr')
if attr == 'content':
setattr(note, attr, cgi.escape(str(value)))
elif attr == 'tag_names':
tags_changed = True
tag_regex = re.compile(r'^data\[(?P<pk>\d+)\]\[(?P<attr>\w*)\]\[(?P<index>\d+)\]\[(?P<tag_attr>\w*)\]')
tag_match = tag_regex.match(strkey)
if tag_match:
tag_attr = tag_match.group('tag_attr')
if tag_attr == 'id':
tags_list.append(int(value))
else:
setattr(note, attr, str(value))
note.modification_time = datetime.now(pytz.utc)
if tags_changed:
linkTags(note, tags_list)
else:
note.save()
return HttpResponse(json.dumps({'data': [note.toMapDict()]}, cls=DatetimeJsonEncoder),
content_type='application/json')
except:
traceback.print_exc()
return HttpResponse(json.dumps({'error': {'code': -32099,
'message': 'problem submitting note'
}
}),
content_type='application/json')
def getSortOrder():
if settings.XGDS_NOTES_SORT_FUNCTION:
noteSortFn = getClassByName(settings.XGDS_NOTES_SORT_FUNCTION)
return noteSortFn()
else:
return getattr(settings, 'XGDS_NOTES_REVIEW_DEFAULT_SORT', '-event_time')
def editTags(request):
return render(
request,
'xgds_notes2/tags_tree.html',
{'addTagForm': TagForm(),
'title': settings.XGDS_NOTES_MONIKER,
'help_content_path': 'xgds_notes2/help/editTags.rst'},
)
def tagsGetRootTreesJson(root):
if root is None:
return []
root_json = root.get_tree_json()
return root_json
def tagsJsonArray(request):
allTags = Tag.get().objects.all()
return HttpResponse(json.dumps([tag.toSimpleDict() for tag in allTags], separators=(', ',': ')).replace("},","},\n").replace("}]","}\n]"),
content_type="application/json"
)
def tagsSearchJsonArray(request):
search_term = request.GET.get('term', '')
# TODO: execute a prefix search with Sphinx, if available
tfilter = Tag.get().objects.filter
result = []
for tag in tfilter(name__istartswith=search_term):
result.append(tag.name)
for tag in tfilter(abbreviation__istartswith=search_term):
result.append(tag.abbreviation)
result.sort()
return HttpResponse(json.dumps(result),
content_type="application/json"
)
@never_cache
def tagsget_tree_json(request, root=None):
"""
json tree of children
note that this does json for jstree
"""
root = Tag.get().objects.get(pk=root)
children_json = []
if root.numchild:
for child in root.get_children():
children_json.append(child.get_tree_json())
json_data = json.dumps(children_json)
return HttpResponse(content=json_data,
content_type="application/json")
@never_cache
def tagsGetOneLevelTreeJson(request, root=None):
"""
json tree of tags one level deep
note that this does json for jstree
"""
roots = []
if not root:
roots = Tag.get().get_root_nodes()
else:
roots.append(Tag.get().objects.get(pk=root))
keys_json = []
for root in roots:
keys_json.append(tagsGetRootTreesJson(root))
json_data = json.dumps(keys_json)
return HttpResponse(content=json_data,
content_type="application/json")
@never_cache
def deleteTag(request, tag_id):
found_tag = Tag.get().objects.get(pk=tag_id)
if found_tag:
if found_tag.numchild > 0:
# TODO need to check all the descendant tags; right now this is disabled.
return HttpResponse(json.dumps({'failed': found_tag.name + " has children, cannot delete."}), content_type='application/json', status=406)
elif LazyGetModelByName(settings.XGDS_NOTES_TAGGED_NOTE_MODEL).get().objects.filter(tag=found_tag):
# cannot delete, this tag is in use
return HttpResponse(json.dumps({'failed': found_tag.name + ' is in use; cannot delete.'}), content_type='application/json', status=406)
else:
found_tag.delete()
return HttpResponse(json.dumps({'success': 'true'}), content_type='application/json')
def addRootTag(request):
if request.method == 'POST':
form = TagForm(request.POST)
if form.is_valid():
new_root = Tag.get().add_root(**form.cleaned_data)
return HttpResponse(json.dumps(new_root.get_tree_json()), content_type='application/json')
else:
return HttpResponse(json.dumps({'failed': 'Problem adding root: ' + form.errors}), content_type='application/json', status=406)
def makeRootTag(request, tag_id):
if request.method == 'POST':
tag = Tag.get().objects.get(pk=tag_id)
if not tag.is_root():
tag.move(Tag.get().get_root_nodes()[0], 'sorted-sibling')
return HttpResponse(json.dumps({'success': 'true'}), content_type='application/json')
else:
return HttpResponse(json.dumps({'failed': 'Problem making root'}), content_type='application/json', status=406)
def addTag(request):
if request.method == 'POST':
parent_id = request.POST.get('parent_id')
parent = Tag.get().objects.get(pk=parent_id)
form = TagForm(request.POST)
if form.is_valid():
new_child = parent.add_child(**form.cleaned_data)
return HttpResponse(json.dumps(new_child.get_tree_json()), content_type='application/json')
else:
return HttpResponse(json.dumps({'failed': 'Problem adding tag: ' + str(form.errors)}), content_type='application/json', status=406)
def editTag(request, tag_id):
if request.method == 'POST':
tag = Tag.get().objects.get(pk=tag_id)
form = TagForm(request.POST, instance=tag)
if form.is_valid():
form.save()
return HttpResponse(json.dumps(tag.get_tree_json()), content_type='application/json')
else:
return HttpResponse(json.dumps({'failed': 'Problem editing tag: ' + form.errors}), content_type='application/json', status=406)
def moveTag(request):
if request.method == 'POST':
parent_id = request.POST.get('parent_id')
tag_id = request.POST.get('tag_id')
found_tag = Tag.get().objects.get(pk=tag_id)
found_parent = Tag.get().objects.get(pk=parent_id)
if found_tag and found_parent:
try:
found_tag.move(found_parent, 'sorted-child')
return HttpResponse(json.dumps({'success': 'true'}), content_type='application/json')
except:
return HttpResponse(json.dumps({'failed': 'badness.'}), content_type='application/json', status=406)
def doImportNotes(request, sourceFile, tz, vehicle):
dictreader = csv.DictReader(sourceFile)
for row in dictreader:
row['author'] = request.user
if row['content'] or row['tags']:
if 'first_name' in row and 'last_name' in row:
if row['first_name'] and row['last_name']:
try:
row['author'] = User.objects.get(first_name=row['first_name'], last_name=row['last_name'])
del row['first_name']
del row['last_name']
except:
pass
if row['event_time']:
event_time = dateparser(row['event_time'])
if tz != pytz.utc:
localized_time = tz.localize(event_time)
event_time = TimeUtil.timeZoneToUtc(localized_time)
row['event_time'] = event_time
try:
# TODO implement tags when ready
del row['tags']
except:
pass
NOTE_MODEL = Note.get()
note = NOTE_MODEL(**row)
note.creation_time = datetime.now(pytz.utc)
note.modification_time = datetime.now(pytz.utc)
if vehicle:
note.position = getClosestPosition(timestamp=note.event_time, vehicle=vehicle)
note.save()
def importNotes(request):
errors = None
if request.method == 'POST':
form = ImportNotesForm(request.POST, request.FILES)
if form.is_valid():
doImportNotes(request, request.FILES['sourceFile'], form.getTimezone(), form.getVehicle())
return redirect('search_xgds_notes_map')
else:
errors = form.errors
return render(
request,
'xgds_notes2/import_notes.html',
{
'form': ImportNotesForm(),
'errorstring': errors,
'title': settings.XGDS_NOTES_MONIKER,
'help_content_path': 'xgds_notes2/help/import.rst'
},
)
def getObjectNotes(request, app_label, model_type, obj_pk):
"""
For a given object, get the notes on that object and return as a json dictionary from oldest to newest
"""
ctype = ContentType.objects.get(app_label=app_label, model=model_type)
result = Note.get().objects.filter(content_type__pk = ctype.id, object_id=obj_pk).order_by('event_time', 'creation_time')
resultList = []
for n in result:
resultList.append(n.toMapDict())
json_data = json.dumps(resultList, cls=DatetimeJsonEncoder)
return HttpResponse(content=json_data,
content_type="application/json")
def buildNotesForm(args):
theForm = SearchNoteForm(args)
return theForm
def notesSearchMap(request, filter=None):
noteType = Note.get().cls_type()
return getSearchPage(request, noteType, 'xgds_notes2/map_record_notes.html', True, getSearchForms(noteType, filter))
# @never_cache
# def getNotesJson(request, filter=None, range=0, isLive=1):
# """ Get the note json information to show in table or map views.
# """
# try:
# isLive = int(isLive)
# if filter:
# splits = str(filter).split(":")
# filterDict = {splits[0]: splits[1]}
#
# range = int(range)
# if isLive or range:
# if range==0:
# range = 6
# now = datetime.now(pytz.utc)
# yesterday = now - timedelta(seconds=3600 * range)
# if not filter:
# notes = Note.get().objects.filter(creation_time__lte=now).filter(creation_time__gte=yesterday)
# else:
# allNotes = Note.get().objects.filter(**filterDict)
# notes = allNotes.filter(creation_time__lte=now).filter(creation_time__gte=yesterday)
# elif filter:
# notes = Note.get().objects.filter(**filterDict)
# else:
# notes = Note.get().objects.all()
# except:
# return HttpResponse(json.dumps({'error': {'message': 'I think you passed in an invalid filter.',
# 'filter': filter}
# }),
# content_type='application/json')
#
# if notes:
# keepers = []
# for note in notes:
# resultDict = note.toMapDict()
# keepers.append(resultDict)
# json_data = json.dumps(keepers, indent=4, cls=DatetimeJsonEncoder)
# return HttpResponse(content=json_data,
# content_type="application/json")
# else:
# return HttpResponse(json.dumps({'error': {'message': 'No notes found.',
# 'filter': filter}
# }),
# content_type='application/json')
# @never_cache
# def note_json_extens(request, extens, today=False):
# """ Get the note json information to show in the fancy tree. this gets all notes in the mapped area
# """
# splits = str(extens).split(',')
# minLon = float(splits[0])
# minLat = float(splits[1])
# maxLon = float(splits[2])
# maxLat = float(splits[3])
#
# queryString = Note.get().getMapBoundedQuery(minLon, minLat, maxLon, maxLat)
# if queryString:
# found_notes = Note.get().objects.raw(queryString)
# if found_notes:
# keepers = []
# for note in found_notes:
# resultDict = note.toMapDict()
# keepers.append(resultDict)
# json_data = json.dumps(keepers, indent=4, cls=DatetimeJsonEncoder)
# return HttpResponse(content=json_data,
# content_type="application/json")
# return ""
if settings.XGDS_NOTES_ENABLE_GEOCAM_TRACK_MAPPING:
from geocamUtil.KmlUtil import wrapKmlDjango, djangoResponse
def getKmlNetworkLink(request):
''' This refreshes note_map_kml every 5 seconds'''
url = request.build_absolute_uri(settings.SCRIPT_NAME + 'notes/rest/notes.kml')
return djangoResponse('''
<NetworkLink>
<name>%(name)s</name>
<Link>
<href>%(url)s</href>
<refreshMode>onInterval</refreshMode>
<refreshInterval>5</refreshInterval>
</Link>
</NetworkLink>
''' % dict(name=settings.XGDS_NOTES_MONIKER,
url=url))
@never_cache
def note_map_kml(request, range=12):
now = datetime.now(pytz.utc)
yesterday = now - timedelta(seconds=3600 * range)
objects = Note.get().objects.filter(show_on_map=True).filter(creation_time__lte=now).filter(creation_time__gte=yesterday)
days = []
if objects:
days.append({'date': now,
'notes': objects
})
if days:
kml_document = render_to_string(
'xgds_notes2/notes_placemark_document.kml',
{'days': days},
request
)
return wrapKmlDjango(kml_document)
return wrapKmlDjango("")
def getSseNoteChannels(request):
# Look up the note channels we are using for SSE
return JsonResponse(settings.XGDS_SSE_NOTE_CHANNELS, safe=False)
def defaultCurrentMapNotes(request):
return HttpResponseRedirect(reverse('xgds_map_server_objectsJson', kwargs={'object_name': 'XGDS_NOTES_NOTE_MODEL',
'filter':{'show_on_map': True}}))
def getCurrentMapNotes(request):
getNotesFunction = getClassByName(settings.XGDS_NOTES_CURRENT_MAPPED_FUNCTION)
return getNotesFunction(request)
| apache-2.0 | -4,331,591,429,205,203,000 | 37.522599 | 150 | 0.585393 | false | 3.993265 | false | false | false |
kawie/pybikes | extractors/domoblue.py | 3 | 5629 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2012, eskerda <[email protected]>
# Distributed under the AGPL license, see LICENSE.txt
import os
import sys
import time
import json
import argparse
from collections import namedtuple
import re
from lxml import etree
from googlegeocoder import GoogleGeocoder
from slugify import slugify
from pybikes.utils import PyBikesScraper
from pybikes.domoblue import Domoblue
MAIN = 'http://clientes.domoblue.es/onroll/'
TOKEN_URL = 'generaMapa.php?cliente={service}&ancho=500&alto=700'
XML_URL = 'generaXml.php?token={token}&cliente={service}'
TOKEN_RE = 'generaXml\.php\?token\=(.*?)\&cliente'
geocoder = GoogleGeocoder()
CityRecord = namedtuple('CityRecord', 'city, country, lat, lng')
description = 'Extract DomoBlue instances from the main site'
parser = argparse.ArgumentParser(description = description)
parser.add_argument('-o', metavar = "file", dest = 'outfile', default = None,
help="Save output to the specified file")
parser.add_argument('-g','--geocode', action="store_true",
help="Use Google GeoCoder for lat/lng and better names")
parser.add_argument('--proxy', metavar = "host:proxy", dest = 'proxy',
default = None, help="Use host:port as a proxy for site calls")
parser.add_argument('-v', action="store_true", dest = 'verbose',
default = False, help="Verbose output for debugging (no progress)")
args = parser.parse_args()
outfile = args.outfile
proxies = {}
user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19'
scraper = PyBikesScraper()
scraper.setUserAgent(user_agent)
sysdef = {
"system": "domoblue",
"class": "Domoblue",
"instances": []
}
if args.proxy is not None:
proxies['http'] = args.proxy
scraper.setProxies(proxies)
scraper.enableProxy()
def get_token(client_id):
if 'Referer' in scraper.headers:
del(scraper.headers['Referer'])
url = MAIN + TOKEN_URL.format(service = client_id)
data = scraper.request(url)
token = re.findall(TOKEN_RE, data)
scraper.headers['Referer'] = url
return token[0]
def get_xml(client_id):
token = get_token(client_id)
url = MAIN + XML_URL.format(token = token, service = client_id)
return scraper.request(url).encode('raw_unicode_escape').decode('utf-8')
def test_system_health(domo_sys):
online = False
for s in domo_sys.stations:
online = s.extra['status']['online']
if online:
break
return online
def google_reverse_geocode(lat, lng):
country_info = lambda lst: lst[len(lst) - 1].short_name
target = 'locality'
if args.verbose:
print "--- Javascript code for debugging output ---"
print " var geocoder = new google.maps.Geocoder()"
print " latlng = new google.maps.LatLng(%s,%s)" % (str(lat), str(lng))
print " geocoder.geocode({latLng:latlng}, function(res){console.log(res)})"
info = geocoder.get((lat, lng),language = 'es')
city_info = [i for i in info if target in i.types]
if len(city_info) == 0:
target = 'political'
city_info = [i for i in info if target in i.types]
if len(city_info) == 0:
raise Exception
else:
city_info = city_info[0]
city = city_info.address_components[0].long_name
country = country_info(city_info.address_components)
latitude = city_info.geometry.location.lat
longitude = city_info.geometry.location.lng
return CityRecord(city, country, latitude, longitude)
def extract_systems():
xml_data = get_xml('todos')
xml_dom = etree.fromstring(xml_data)
systems = []
for marker in xml_dom.xpath('//marker'):
if marker.get('tipo') == 'pendiente':
continue
sys = Domoblue('foo', {}, int(marker.get('codigoCliente')))
sys.update()
online = True #test_system_health(sys)
if args.verbose:
print "--- %s --- " % repr(marker.get('nombre'))
print " Total stations: %d" % len(sys.stations)
print " Health: %s" % (lambda b: 'Online' if b else 'Offline')(online)
if not online:
if args.verbose:
print " %s is Offline, ignoring!\n" % repr(marker.get('nombre'))
continue
name = 'Onroll %s' % marker.get('nombre')
slug = slugify(name)
city = marker.get('nombre')
latitude = marker.get('lat')
longitude = marker.get('lng')
country = 'ES'
if args.geocode:
time.sleep(1)
try:
city, country, latitude, longitude = google_reverse_geocode(latitude, longitude)
name = 'Onroll %s' % city
except Exception:
print " No geocoding results for %s!!" % repr(name)
system = {
'tag': slug,
'system_id': int(marker.get('codigoCliente')),
'meta': {
'name': name,
'latitude': latitude,
'longitude': longitude,
'city': city,
'country': 'ES'
}
}
systems.append(system)
if args.verbose:
print " Appended!\n"
return systems
instances = extract_systems()
sysdef['instances'] = sorted(instances, key = lambda inst: inst['tag'])
data = json.dumps(sysdef, sort_keys = False, indent = 4)
if outfile is not None:
f = open(outfile, 'w')
f.write(data)
f.close()
print "%s file written" % outfile
else:
print "---- OUTPUT ----"
print data
| lgpl-3.0 | 1,773,835,244,100,720,400 | 30.623596 | 118 | 0.609167 | false | 3.504981 | false | false | false |
itkovian/vsc-ldap | lib/vsc/ldap/timestamp.py | 1 | 3108 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
##
# Copyright 2009-2012 Ghent University
#
# This file is part of vsc-ldap
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/vsc-ldap
#
# vsc-ldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# vsc-ldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with vsc-ldap. If not, see <http://www.gnu.org/licenses/>.
##
"""Timestamp tools for this LDAP library.
@author: Andy Georges
@author: Stijn De Weirdt
"""
import datetime
from vsc.utils.cache import FileCache
from vsc.utils.dateandtime import Local, utc
LDAP_DATETIME_TIMEFORMAT = "%Y%m%d%H%M%SZ"
def convert_timestamp(timestamp=None):
"""Convert a timestamp, yielding a string and a datetime.datetime instance.
@type timestamp: either a string or a datetime.datetime instance. Default value is None, in which case the
local time is returned.
@returns: tuple with the timestamp as a
- LDAP formatted timestamp on GMT in the yyyymmddhhmmssZ format
- A datetime.datetime instance representing the timestamp
"""
if timestamp is None:
timestamp = datetime.datetime.today()
if isinstance(timestamp, datetime.datetime):
if timestamp.tzinfo is None:
timestamp = timestamp.replace(tzinfo=Local)
return (timestamp, timestamp.astimezone(utc).strftime(LDAP_DATETIME_TIMEFORMAT))
elif isinstance(timestamp, basestring):
tmp = datetime.datetime.strptime(timestamp, LDAP_DATETIME_TIMEFORMAT)
return (tmp.replace(tzinfo=utc).astimezone(Local), timestamp)
def read_timestamp(filename):
"""Read the stored timestamp value from a pickled file.
@returns: string representing a timestamp in the proper LDAP time format
"""
cache = FileCache(filename)
(_, timestamp) = cache.load('timestamp')
return timestamp
def write_timestamp(filename, timestamp):
"""Write the given timestamp to a pickled file.
@type timestamp: datetime.datetime timestamp
"""
if isinstance(timestamp, datetime.datetime) and timestamp.tzinfo is None:
# add local timezoneinfo
timestamp_ = timestamp.replace(tzinfo=Local)
(_, timestamp_) = convert_timestamp(timestamp)
else:
timestamp_ = timestamp
cache = FileCache(filename)
cache.update('timestamp', timestamp_, 0)
cache.close()
| gpl-2.0 | -4,553,595,259,723,494,000 | 33.153846 | 110 | 0.710425 | false | 3.88015 | false | false | false |
morelab/appcomposer | appcomposer/i18n.py | 3 | 1189 | import traceback
try:
USE_BABELEX = True
if USE_BABELEX:
# Use regular Babelex instead of Babel
from flask_babelex import Babel as Babel_ex, gettext as gettext_ex, lazy_gettext as lazy_gettext_ex, ngettext as ngettext_ex, get_domain as get_domain
gettext = gettext_ex
ngettext = ngettext_ex
lazy_gettext = lazy_gettext_ex
get_domain = get_domain
Babel = Babel_ex
else:
# Use regular Babel instead of Babelex
from flask_babel import Babel as Babel_reg, gettext as gettext_reg, lazy_gettext as lazy_gettext_reg, ngettext as ngettext_reg, get_domain as get_domain
gettext = gettext_reg
ngettext = ngettext_reg
lazy_gettext = lazy_gettext_reg
get_domain = get_domain
Babel = Babel_reg
except ImportError:
DEBUG = True
if DEBUG:
traceback.print_exc()
Babel = None
def gettext(string, **variables):
return string % variables
def ngettext(singular, plural, num, **variables):
return (singular if num == 1 else plural) % variables
def lazy_gettext(string, **variables):
return gettext(string, **variables)
| bsd-2-clause | -2,346,879,147,747,376,600 | 28 | 160 | 0.647603 | false | 3.963333 | false | false | false |
punitvanjani/test1 | api/common.py | 1 | 21972 | import socket
import fcntl
import struct
import requests
from requests.auth import HTTPBasicAuth
import os
import time
from flask import current_app, g, jsonify
from models import db, Hub, Endpoint, User, EndpointTypes, SectionTypes, EndpointGroup, Schedule, Properties, EndpointSchema, HubSchema
from datetime import datetime
import uuid
import interface
from errors import invalid_operation, no_records
import inspect
import traceback
from debugger import debug_msg
# from api.v1.interfaces import server_update_hub
def is_admin(user):
is_admin = False
users = User.query.filter_by(username=user).first()
if users.group == 'ADMIN':
is_admin = True
else:
is_admin = False
return is_admin
def unique_endpoint(section_id, node_id, endpoint_id):
unique = False
endpoint = Endpoint.query.filter_by(internal_sec_id=section_id,internal_nod_id=node_id,internal_end_id=endpoint_id).first()
if endpoint == None:
unique = True
else:
unique = False
return unique
def unique_endpoint_type(node_type, endpoint_type):
unique = False
endpointtypes = EndpointTypes.query.filter_by(node_type=node_type,endpoint_type=endpoint_type).first()
if endpointtypes == None:
unique = True
else:
unique = False
return unique
def unique_section_type(section_type):
unique = False
sectiontypes = SectionTypes.query.filter_by(section_type=section_type).first()
if sectiontypes == None:
unique = True
else:
unique = False
return unique
def unique_user(username):
user = User.query.filter_by(username=username).first()
if user == None:
unique = True
else:
unique = False
return unique
def valid_user(username):
user = User.query.filter_by(username=username).first()
if user != None:
valid = True
else:
valid = False
return valid
def endpoint_validation(data):
valid = False
endpointtypes = EndpointTypes.query.filter_by(node_type=data['node_type'],endpoint_type=data['endpoint_type']).first()
if endpointtypes == None:
valid = False
else:
valid = True
return valid
def schedule_validation(data):
valid = False
endpoint = Endpoint.query.filter_by(endpoint_uuid = data['uuid_id']).first()
if endpoint == None:
group = EndpointGroup.query.filter_by(group_uuid = data['uuid_id']).first()
if group == None:
valid = False
else:
valid = True
else:
# As the endpoint is found, then check expected_status is according to endpoint types
endpointtypes = EndpointTypes.query.filter_by(node_type=endpoint.node_type,endpoint_type=endpoint.endpoint_type).first()
if (endpointtypes.status_min <= data['expected_status']) and (endpointtypes.status_max >= data['expected_status']):
valid = True
else:
valid = False
return valid
def operate_validation(endpoint_uuid, status):
valid = False
endpoint = Endpoint.query.filter_by(endpoint_uuid = endpoint_uuid).first()
if endpoint == None:
valid = False
return valid
else:
valid = True
endpoint_types = EndpointTypes.query.filter_by(node_type=endpoint.node_type,endpoint_type=endpoint.endpoint_type).first()
if endpoint_types != None:
if status == endpoint_types.status_min:
valid = True
elif status == endpoint_types.status_max:
valid = True
elif (status > endpoint_types.status_min) and (status < endpoint_types.status_max):
valid = True
else:
valid = False
else:
valid = False
debug_msg('endpoint_types', endpoint.node_type,endpoint.endpoint_type,endpoint_types.status_min,endpoint_types.status_max,status,valid)
return valid
def unique_property(key):
property = Properties.query.filter_by(key = key).first()
if property == None:
unique = True
else:
unique = False
return unique
def unique_group_desc(group_desc):
group = EndpointGroup.query.filter_by(group_desc = group_desc).first()
if group == None:
unique = True
else:
unique = False
return unique
# def debug_msg(message,keyword1=-99,keyword2=-99,keyword3=-99,keyword4=-99,keyword5=-99,keyword6=-99,keyword7=-99,keyword8=-99,keyword9=-99,keyword10=-99):
# msg = ''
# property = Properties.query.filter_by(key = 'DEBUG').first()
# if property.value != None and property.value == 'true':
# callerframerecord = inspect.stack()[1] # 0 represents this line
# # 1 represents line at caller
# frame = callerframerecord[0]
# info = inspect.getframeinfo(frame)
# try:
# msg += '\t' + 'USER:' + str(g.user.username)
# except:
# msg += '\t' + 'USER:' + str("BackendUser")
# msg += '\t' + 'FILE:' + str(info.filename)
# msg += '\t' + 'FUNC:' + str(info.function)
# msg += '\t' + 'LINE:' + str(info.lineno)
# msg += '\t' + 'CALL:' + str(traceback.format_stack(limit=5))
#
# msg += '\t'
# if(keyword1!=-99):
# msg += 'KEY1:' + str(keyword1)
# msg += '\t'
# if(keyword2!=-99):
# msg += 'KEY2:' + str(keyword2)
# msg += '\t'
# if(keyword3!=-99):
# msg += 'KEY3:' + str(keyword3)
# msg += '\t'
# if(keyword4!=-99):
# msg += 'KEY4:' + str(keyword4)
# msg += '\t'
# if(keyword5!=-99):
# msg += 'KEY5:' + str(keyword5)
# msg += '\t'
# if(keyword6!=-99):
# msg += 'KEY6:' + str(keyword6)
# msg += '\t'
# if(keyword7!=-99):
# msg += 'KEY7:' + str(keyword7)
# msg += '\t'
# if(keyword8!=-99):
# msg += 'KEY8:' + str(keyword8)
# msg += '\t'
# if(keyword9!=-99):
# msg += 'KEY9:' + str(keyword9)
# msg += '\t'
# if(keyword10!=-99):
# msg += 'KEY10:' + str(keyword10)
# msg += '\t' + 'MSG:' + str(message)
# # Open log file in append mode
# f = open(current_app.config['LOG_FILE'],'a')
# f.write(str(datetime.today()))
# f.write(msg)
# print msg
# f.write('\n')
# f.close()
def get_intranet_ip_address(ifname):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except:
return "0.0.0.0"
def get_server_credentials():
prop = Properties.query.filter_by(key='ServerUsr').first()
user = prop.value
prop = Properties.query.filter_by(key='ServerPwd').first()
password = prop.value
return (user, password)
def get_serial():
# Extract serial from cpuinfo file
cpuserial = "0000000000000000"
try:
f = open('/proc/cpuinfo','r')
for line in f:
if line[0:6]=='Serial':
cpuserial = line[10:26]
f.close()
cpuserial_int = int(cpuserial,16)
except:
cpuserial = "ERROR000000000"
cpuserial_int = 9999999999999999
return cpuserial_int
def get_external_url():
try:
r = requests.get('http://localhost:4040/api/tunnels')
datajson = r.json()
msg=None
for i in datajson['tunnels']:
# populate only https ngrok url
if 'https' in i['public_url']:
msg = i['public_url']
except requests.exceptions.ConnectionError:
r = None
msg = "Error"
except requests.exceptions.RequestException:
r = None
msg = "ERROR"
#
return msg
def server_hub_string(hubdetails):
# hub_schema_custom = HubSchema(exclude=('last_changed_on', 'last_changed_by'))
# hubstring = hub_schema_custom.dump(hubdetails).data
hubstring = '{"description":"'+ str(hubdetails.description) +'", "external_url":"'+str(hubdetails.external_url)+'","hub_id":"'+str(hubdetails.hub_id)+'","internal_url":"'+str(hubdetails.internal_url)+'"}'
# hubstring = ''
# hubstring = hubstring + '{"description":"'+ str(hubdetails.description)
# hubstring = hubstring +'", "external_url":"'+str(hubdetails.external_url)
# hubstring = hubstring +'","hub_id":"'+str(hubdetails.hub_id)
# hubstring = hubstring +'","internal_url":"'+str(hubdetails.internal_url)+'"}'
return hubstring
def server_update_hub(hubdetails):
resp = None
server = Properties.query.filter_by(key='ServerAPI').first()
serverurl = server.value
serverurl = serverurl + 'et_update_hub_info.php?arg={"hub":'
url = serverurl + str(server_hub_string(hubdetails)) + '}'
debug_msg('hub_defined__server_updated', url)
try:
user,password = get_server_credentials()
req = requests.get(url,auth=HTTPBasicAuth(user, password)).json()
debug_msg('response', req)
# resp = req['success']
except requests.exceptions.ConnectionError:
req = None
msg = "Error"
resp = None
except requests.exceptions.RequestException:
req = None
msg = "ERROR"
resp = None
except:
req = None
msg = "ERROR"
resp = None
return resp
def endpoint_update_status(endpoint_uuid,status):
resp = None
server = Properties.query.filter_by(key='ServerAPI').first()
serverurl = server.value
serverurl = serverurl + 'et_change_endpoint_status.php?arg={"etct_endpoint_id":"'
url = serverurl + str(endpoint_uuid)+'","status":"' + status +'"}'
debug_msg('endpoint_update_status, url')
try:
user,password = get_server_credentials()
req = requests.get(url,auth=HTTPBasicAuth(user, password)).json()
debug_msg('response', req)
# resp = req['success']
except requests.exceptions.ConnectionError:
req = None
msg = "Error"
resp = None
except requests.exceptions.RequestException:
req = None
msg = "ERROR"
resp = None
except:
req = None
msg = "ERROR"
resp = None
return resp
# http://shubansolutions.com/etct/ws/et_change_endpoint_status.php?arg={"etct_endpoint_id":"8a38f241-7c1d-4580-a9fa-6debd3f03061","status":"A"}
def server_endpoint_string(endpoints):
# endpoint_schemas_custom = EndpointSchema(exclude=('last_changed_on', 'last_changed_by'), many = True, extra={"qwe":'123',"qbc":1234})
# endpoint_schemas_custom = EndpointSchema(exclude=('last_changed_on', 'last_changed_by'), many = True)
# endpointstring = endpoint_schemas_custom.dump(endpoints).data
# # endpointstring = jsonify({'endpoints':endpointstring})
# debug_msg('endpoint_defined__server_updated', endpointstring)
endpointstring = ''
for endpoint_single in endpoints:
endpointstring += '{"internal_sec_id":"'+ str(endpoint_single.internal_sec_id) +'", "section_type":"' + str(endpoint_single.section_type)+'","internal_sec_desc":"'+str(endpoint_single.internal_sec_desc)+'","internal_nod_id":"'+str(endpoint_single.internal_nod_id)+'","node_type":"'+str(endpoint_single.node_type)+'","internal_nod_desc":"'+str(endpoint_single.internal_nod_desc)+'","internal_end_id":"'+str(endpoint_single.internal_end_id)+'","endpoint_type":"'+str(endpoint_single.endpoint_type)+'","endpoint_uuid":"'+str(endpoint_single.endpoint_uuid)+'","internal_end_desc":"'+str(endpoint_single.internal_end_desc)+'"}'
endpointstring += ','
endpointstring = endpointstring[:-1]
return endpointstring
def server_sync_endpoints():
server = Properties.query.filter_by(key='ServerAPI').first()
serverurl = server.value
serverurl = serverurl + 'et_update_hub_info.php?arg='
endpoints = Endpoint.query.all()
hubdetails = Hub.query.first()
url = serverurl + '{"endpoints":[['+ str(server_endpoint_string(endpoints)) + '],{}],"hub":' + str(server_hub_string(hubdetails)) + '}'
debug_msg('endpoint_defined__server_updated', url)
try:
user,password = get_server_credentials()
req = requests.get(url,auth=HTTPBasicAuth(user, password)).json()
debug_msg('response', req)
# resp = req['success']
except requests.exceptions.ConnectionError:
req = None
msg = "Error"
resp = None
except requests.exceptions.RequestException:
req = None
msg = "ERROR"
resp = None
except:
req = None
msg = "ERROR"
resp = None
# return resp
def get_scheduler_current_timestamp():
year = int(str(datetime.today())[:4])
month = int(str(datetime.today())[5:7])
weekday = datetime.weekday(datetime.today())
date = int(str(datetime.today())[8:10])
hour = int(str(datetime.today())[11:13])
min = int(str(datetime.today())[14:16])
# Return year, month, Weekday, date, hour and min based on today's datetime
return (year, month, weekday, date, hour, min)
def scheduled_endpoints_groups():
hourly_tasks = scheduled_hourly_tasks()
daily_tasks = scheduled_daily_tasks()
weekly_tasks = scheduled_weekly_tasks()
monthly_tasks = scheduled_monthly_tasks()
yearly_tasks = scheduled_yearly_tasks()
onlyonce_tasks = scheduled_onlyonce_tasks()
endpoints = []
endpoint_status = []
# endpoints = Endpoint.query.all()
endpointgroup = {}
for tasks in hourly_tasks:
endpoint = Endpoint.query.filter_by(endpoint_uuid=tasks.uuid_id).first()
if endpoint != None:
endpoints.append(endpoint)
endpoint_status.append(tasks.expected_status)
for tasks in daily_tasks:
endpoint = Endpoint.query.filter_by(endpoint_uuid=tasks.uuid_id).first()
if endpoint != None:
endpoints.append(endpoint)
endpoint_status.append(tasks.expected_status)
for tasks in weekly_tasks:
endpoint = Endpoint.query.filter_by(endpoint_uuid=tasks.uuid_id).first()
if endpoint != None:
endpoints.append(endpoint)
endpoint_status.append(tasks.expected_status)
for tasks in monthly_tasks:
endpoint = Endpoint.query.filter_by(endpoint_uuid=tasks.uuid_id).first()
if endpoint != None:
endpoints.append(endpoint)
endpoint_status.append(tasks.expected_status)
for tasks in yearly_tasks:
endpoint = Endpoint.query.filter_by(endpoint_uuid=tasks.uuid_id).first()
if endpoint != None:
endpoints.append(endpoint)
endpoint_status.append(tasks.expected_status)
for tasks in onlyonce_tasks:
endpoint = Endpoint.query.filter_by(endpoint_uuid=tasks.uuid_id).first()
if endpoint != None:
endpoints.append(endpoint)
endpoint_status.append(tasks.expected_status)
for endpoint1 in endpoints:
print endpoint1.endpoint_uuid
return (endpoints, endpointgroup, endpoint_status)
# def delete_all_except(endpoint):
def scheduled_hourly_tasks():
# Get current date time and weekday in variables
year, month, weekday, date, hour, min = get_scheduler_current_timestamp()
# Query the tasks which are marked true for hourly, and has current min
tasks = Schedule.query.filter_by(status = True, hourly = True, min = min)
return tasks
def scheduled_daily_tasks():
# Get current date time and weekday in variables
year, month, weekday, date, hour, min = get_scheduler_current_timestamp()
# Query the tasks which are marked true for daily, and has current hour and min
tasks = Schedule.query.filter_by(status = True, daily = True, hour = hour, min = min)
return tasks
def scheduled_weekly_tasks():
# Get current date time and weekday in variables
year, month, weekday, date, hour, min = get_scheduler_current_timestamp()
# Query the tasks which are marked true for weekly, and has current weekday, hour and min
tasks = Schedule.query.filter_by(status = True, weekly = True, weekday = weekday, hour = hour, min = min)
return tasks
def scheduled_monthly_tasks():
# Get current date time and weekday in variables
year, month, weekday, date, hour, min = get_scheduler_current_timestamp()
# Query the tasks which are marked true for monthly, and has current weekday, hour and min
tasks = Schedule.query.filter_by(status = True, monthly = True, date = date, hour = hour, min = min)
return tasks
def scheduled_yearly_tasks():
# Get current date time and weekday in variables
year, month, weekday, date, hour, min = get_scheduler_current_timestamp()
# Query the tasks which are marked true for yearly, and has current weekday, hour and min
tasks = Schedule.query.filter_by(status = True, yearly = True, month = month, date = date, hour = hour, min = min)
return tasks
def scheduled_onlyonce_tasks():
# Get current date time and weekday in variables
year, month, weekday, date, hour, min = get_scheduler_current_timestamp()
# Query the tasks which are marked true for yearly, and has current weekday, hour and min
tasks = Schedule.query.filter_by(status = True, onlyonce = True, year = year, month = month, date = date, hour = hour, min = min)
return tasks
def system_start():
str_ip = get_intranet_ip_address('eth0')
if str_ip == "0.0.0.0":
str_ip = get_intranet_ip_address('wlan0')
str_ip = 'http://' + str_ip
int_serial = get_serial()
str_ext_url = get_external_url()
hubdetails = Hub.query.first()
# Commit to db and call Server API only if there are any changes to PI Serial, internal_url, external_url
if (hubdetails.hub_id != int_serial and int_serial != 9999999999999999) or (hubdetails.internal_url != str_ip and str_ip != "0.0.0.0") or (hubdetails.external_url != str_ext_url and (str_ext_url != "Error" or str_ext_url != "ERROR")):
hubdetails.hub_id = int_serial
hubdetails.internal_url = str_ip
hubdetails.external_url = str_ext_url
try:
hubdetails.last_changed_by = g.user.username
except:
hubdetails.last_changed_by = str("BackendUser")
hubdetails.last_changed_on = datetime.today()
db.session.add(hubdetails)
db.session.commit()
# Call Server API
resp = server_update_hub(hubdetails)
# External URL is fetched, Server is updated
if (str_ext_url != 'Error' or str_ext_url != 'ERROR') and resp != None:
debug_msg('hub_started__external_url_fetched__server_updated', int_serial, str_ip, str_ext_url, resp, hubdetails.status)
# External URL is fetched, Server is not updated
elif (str_ext_url != 'Error' or str_ext_url != 'ERROR') and resp == None:
debug_msg('hub_started__external_url_fetched__server_not_updated', int_serial, str_ip, str_ext_url, resp, hubdetails.status)
# External URL is not fetched, Server is updated
elif (str_ext_url == 'Error' or str_ext_url == 'ERROR') and resp != None:
debug_msg('hub_started__external_url_not_fetched__server_updated', int_serial, str_ip, str_ext_url, resp, hubdetails.status)
# External URL is not fetched, Server is not updated
else:
debug_msg('hub_started__external_url_not_fetched__server_not_updated', int_serial, str_ip, str_ext_url, resp, hubdetails.status)
def operate_endpoint_group(uuid, expected_status):
status = -1
errors = ""
# Find if it is Endpoint or Group
endpoint = Endpoint.query.filter_by(endpoint_uuid = uuid).first()
if endpoint == None:
group = EndpointGroup.query.filter_by(group_uuid = uuid).first()
if group == None:
# UUID is not valid Endpoint or Group, exit the function with errors
errors = no_records('operate.operate.endpoint_group',uuid)
status = -1
debug_msg('improper_uuid', errors, status)
return (status,errors)
# Action required for Endpoint as UUID passed is endpoint
if endpoint != None:
# Validate if this status is possible
if not (operate_validation(uuid,expected_status)):
errors = invalid_operation()
status = -1
debug_msg('endpointvalidation', errors, status)
return (status,errors)
# Get the parameters stored in Endpoint
endpointtype = EndpointTypes.query.filter_by(node_type=endpoint.node_type, endpoint_type=endpoint.endpoint_type).first()
if endpointtype == None:
errors = no_records('operate.operate.endpointtype',endpoint.node_type,endpoint.endpoint_type)
status = -1
debug_msg('endpointtypes_validation', errors, status,endpoint.node_type,endpoint.endpoint_type)
return (status,errors)
# All the details are received, now call the corresponding method in interface and get the status
debug_msg('interface_communication', endpoint.endpoint_uuid,endpoint.node_type,endpoint.endpoint_type,endpoint.internal_nod_id,endpoint.internal_end_id,endpointtype.method,expected_status)
interfaces_method_name = getattr(interface,endpointtype.method)
status, errors = interfaces_method_name(endpoint,expected_status)
# Action required for Group as UUID passed is endpoint group
elif group != None:
pass
return (status,errors) | mit | 3,501,948,730,950,021,000 | 39.149813 | 630 | 0.617513 | false | 3.658951 | false | false | false |
proversity-org/edx-platform | openedx/core/djangoapps/user_api/management/commands/migrate_user_profile_langs.py | 15 | 3863 | """
Migrates user preferences from one language code to another in batches. Dark lang preferences are not affected.
"""
from __future__ import print_function
import logging
from time import sleep
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.db.models import Q, Max
from openedx.core.djangoapps.dark_lang.models import DarkLangConfig
from openedx.core.djangoapps.user_api.models import UserPreference
DEFAULT_CHUNK_SIZE = 10000
DEFAULT_SLEEP_TIME_SECS = 10
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Implementation of the migrate command
"""
help = 'Migrate all user language preferences (excluding dark languages) from one language code to another.'
def add_arguments(self, parser):
parser.add_argument('old_lang_code',
help='Original language code, ex. "zh-cn"')
parser.add_argument('new_lang_code',
help='New language code, ex. "zh-hans"')
parser.add_argument('--start_id',
type=int,
default=1,
help='ID to begin from, in case a run needs to be restarted from the middle.')
parser.add_argument('--chunk_size',
type=int,
default=DEFAULT_CHUNK_SIZE,
help='Number of users whose preferences will be updated per batch.')
parser.add_argument('--sleep_time_secs',
type=int,
default=DEFAULT_SLEEP_TIME_SECS,
help='Number of seconds to sleep between batches.')
def handle(self, *args, **options):
"""
Execute the command.
"""
old_lang_code = options['old_lang_code']
new_lang_code = options['new_lang_code']
chunk_size = options['chunk_size']
sleep_time_secs = options['sleep_time_secs']
start = options['start_id']
end = start + chunk_size
# Make sure we're changing to a code that actually exists. Presumably it's safe to move away from a code that
# doesn't.
langs = [lang_code[0] for lang_code in settings.LANGUAGES]
langs += DarkLangConfig.current().released_languages_list
if new_lang_code not in langs:
raise CommandError('{} is not a configured language code in settings.LANGUAGES '
'or the current DarkLangConfig.'.format(new_lang_code))
max_id = UserPreference.objects.all().aggregate(Max('id'))['id__max']
print('Updating user language preferences from {} to {}. '
'Start id is {}, current max id is {}. '
'Chunk size is of {}'.format(old_lang_code, new_lang_code, start, max_id, chunk_size))
updated_count = 0
while True:
# On the last time through catch any new rows added since this run began
if end >= max_id:
print('Last round, includes all new rows added since this run started.')
id_query = Q(id__gte=start)
else:
id_query = Q(id__gte=start) & Q(id__lt=end)
curr = UserPreference.objects.filter(
id_query,
key='pref-lang',
value=old_lang_code
).update(value=new_lang_code)
updated_count += curr
print('Updated rows {} to {}, {} rows affected'.format(start, end - 1, curr))
if end >= max_id:
break
start = end
end += chunk_size
sleep(sleep_time_secs)
print('Finished! Updated {} total preferences from {} to {}'.format(updated_count, old_lang_code, new_lang_code))
| agpl-3.0 | -8,263,227,351,948,027,000 | 37.247525 | 121 | 0.577789 | false | 4.399772 | false | false | false |
kaltura/play-server | poc/tracker/ffmpegTSParams.py | 2 | 6872 | import commands
MEDIAINFO_BIN = 'mediainfo'
class MediaInfoParsers:
@staticmethod
def parseValue(value):
value = value.split(' / ')[0] # support 'Sampling rate : 44.1 KHz / 22.05 KHz'
splittedValue = value.split(' ')
value = ''.join(splittedValue[:-1])
try:
if value.endswith('.0'):
value = int(value[:-2])
elif '.' in value:
value = float(value)
else:
value = int(value)
except ValueError:
return None
return (value, splittedValue[-1])
@staticmethod
def parseBitrate(value):
value, units = MediaInfoParsers.parseValue(value)
if units == 'bps':
return value
elif units == 'Kbps':
return value * 1024
elif units == 'Mbps':
return value * 1024 * 1024
elif units == 'Gbps':
return value * 1024 * 1024 * 1024
return None
@staticmethod
def parseSamplingRate(value):
value, units = MediaInfoParsers.parseValue(value)
if units == 'KHz':
return value * 1000
return None
@staticmethod
def getSimpleParser(allowedUnits):
def result(value):
value, units = MediaInfoParsers.parseValue(value)
if units in allowedUnits:
return value
return None
return result
@staticmethod
def parseVideoProfile(value):
splittedValue = value.split('@L')
if len(splittedValue) != 2:
return None
return splittedValue
@staticmethod
def parseAudioProfile(value):
return value.split(' / ')[0].split('@')[0] # support 'HE-AAC / LC'
class MediaInfo:
PARSING_CONFIG = {
'general': [
('overall bit rate', 'containerBitrate', MediaInfoParsers.parseBitrate),
],
'video': [
('bit rate', 'videoBitrate', MediaInfoParsers.parseBitrate),
('width', 'videoWidth', MediaInfoParsers.getSimpleParser(['pixels'])),
('height', 'videoHeight', MediaInfoParsers.getSimpleParser(['pixels'])),
('frame rate', 'videoFrameRate', MediaInfoParsers.getSimpleParser(['fps'])),
('format settings, reframes', 'videoReframes', MediaInfoParsers.getSimpleParser(['frame', 'frames'])),
('format profile', 'videoProfile', MediaInfoParsers.parseVideoProfile),
],
'audio': [
('bit rate', 'audioBitrate', MediaInfoParsers.parseBitrate),
('sampling rate', 'audioSamplingRate', MediaInfoParsers.parseSamplingRate),
('channel(s)', 'audioChannels', MediaInfoParsers.getSimpleParser(['channel', 'channels'])),
('format profile', 'audioProfile', MediaInfoParsers.parseAudioProfile),
],
}
def parse(self, inputFileName):
cmdLine = '%s %s' % (MEDIAINFO_BIN, inputFileName)
output = commands.getoutput(cmdLine)
sectionName = None
values = {}
for curLine in output.split('\n'):
curLine = curLine.strip()
if len(curLine) == 0:
sectionName = None
continue
splittedLine = map(lambda x: x.strip(), curLine.split(':', 1))
if len(splittedLine) == 1:
sectionName = splittedLine[0].lower()
elif sectionName != None:
values.setdefault(sectionName, {})
values[sectionName][splittedLine[0].lower()] = splittedLine[1]
for sectionName, fields in self.PARSING_CONFIG.items():
for keyName, memberName, parser in fields:
value = None
if values.has_key(sectionName) and values[sectionName].has_key(keyName):
value = parser(values[sectionName][keyName])
setattr(self, memberName, value)
self.hasVideo = values.has_key('video')
self.hasAudio = values.has_key('audio')
def normalizeBitrate(bitrate, standardBitrates):
normBitrate = standardBitrates[0]
for curBitrate in standardBitrates:
if abs(curBitrate - bitrate) < abs(normBitrate - bitrate):
normBitrate = curBitrate
return normBitrate
def normalizeVideoBitrate(bitrate):
return normalizeBitrate(bitrate, [300,400,500,700,900,1200,1600,2000,2500,3000,4000])
def normalizeAudioBitrate(bitrate):
return normalizeBitrate(bitrate, [64,128])
def getMpegTSEncodingParams(referenceFileName, blackDuration = 10):
# get the mediainfo of the source file
mediaInfo = MediaInfo()
mediaInfo.parse(referenceFileName)
if not mediaInfo.hasVideo and not mediaInfo.hasAudio:
return (None, None) # no audio and no video -> file is invalid
# video codec
if mediaInfo.hasVideo:
blackInput = '-t %s' % blackDuration
vcodec = "-vcodec libx264 -subq 7 -qcomp 0.6 -qmin 10 -qmax 50 -qdiff 4 -bf 0 -coder 1 -x264opts b-pyramid:weightb:mixed-refs:8x8dct:no-fast-pskip=0:force-cfr:sps-id=26 -pix_fmt yuv420p -threads 4 -force_key_frames \"expr:gte(t,n_forced*2)\""
videoProfile = ' -vprofile main -level 3.1'
if mediaInfo.videoProfile != None:
profile, level = mediaInfo.videoProfile
if profile.lower() in ['baseline', 'main', 'high', 'high10', 'high422', 'high444']:
videoProfile = ' -vprofile %s -level %s' % (profile.lower(), level)
vcodec += videoProfile
if mediaInfo.videoBitrate != None:
vcodec += ' -b:v %sk' % normalizeVideoBitrate(mediaInfo.videoBitrate / 1024)
elif mediaInfo.containerBitrate != None:
vcodec += ' -b:v %sk' % normalizeVideoBitrate(mediaInfo.containerBitrate / 1024)
if mediaInfo.videoWidth != None and mediaInfo.videoHeight != None:
vcodec += ' -vf scale="iw*min(%s/iw\,%s/ih):ih*min(%s/iw\,%s/ih),pad=%s:%s:(%s-iw)/2:(%s-ih)/2"' % ((mediaInfo.videoWidth, mediaInfo.videoHeight) * 4)
blackInput += ' -s %sx%s' % (mediaInfo.videoWidth, mediaInfo.videoHeight)
if mediaInfo.videoFrameRate != None:
vcodec += ' -r %s' % (mediaInfo.videoFrameRate)
blackInput += ' -r %s' % (mediaInfo.videoFrameRate)
if mediaInfo.videoReframes != None:
vcodec += ' -refs %s' % (mediaInfo.videoReframes)
else:
vcodec += ' -refs 6'
blackInput += ' -f rawvideo -pix_fmt rgb24 -i /dev/zero'
else:
blackInput = ''
vcodec = '-vn'
# audio codec
if mediaInfo.hasAudio:
silenceInput = '-t %s' % blackDuration
acodec = '-acodec libfdk_aac'
audioProfile = ' -profile:a aac_he'
AUDIO_PROFILE_MAPPING = {
'LC': 'aac_low',
'HE-AAC': 'aac_he',
'HE-AACv2': 'aac_he_v2',
'ER AAC LD': 'aac_ld',
'ER AAC ELD': 'aac_eld',
}
if AUDIO_PROFILE_MAPPING.has_key(mediaInfo.audioProfile):
audioProfile = ' -profile:a %s' % AUDIO_PROFILE_MAPPING[mediaInfo.audioProfile]
acodec += audioProfile
if mediaInfo.audioBitrate != None:
acodec += ' -b:a %sk' % normalizeAudioBitrate(mediaInfo.audioBitrate / 1024)
if mediaInfo.audioSamplingRate != None:
acodec += ' -ar %s' % (mediaInfo.audioSamplingRate)
silenceInput += ' -ar %s' % (mediaInfo.audioSamplingRate)
if mediaInfo.audioChannels != None:
acodec += ' -ac %s' % (mediaInfo.audioChannels)
silenceInput += ' -ac %s' % (mediaInfo.audioChannels)
silenceInput += ' -f s16le -acodec pcm_s16le -i /dev/zero'
else:
silenceInput = ''
acodec = '-an'
# filter / format - fixed
filter = "-bsf h264_mp4toannexb"
format = '-f mpegts'
encParams = ' '.join([vcodec, acodec, filter, format])
blackEncParams = ' '.join([blackInput, silenceInput, encParams])
return (encParams, blackEncParams)
| agpl-3.0 | -2,543,118,354,289,121,000 | 32.359223 | 244 | 0.68408 | false | 2.983934 | false | false | false |
Lightmatter/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/IPython/extensions/rmagic.py | 5 | 22605 | # -*- coding: utf-8 -*-
"""
======
Rmagic
======
Magic command interface for interactive work with R via rpy2
.. note::
The ``rpy2`` package needs to be installed separately. It
can be obtained using ``easy_install`` or ``pip``.
You will also need a working copy of R.
Usage
=====
To enable the magics below, execute ``%load_ext rmagic``.
``%R``
{R_DOC}
``%Rpush``
{RPUSH_DOC}
``%Rpull``
{RPULL_DOC}
``%Rget``
{RGET_DOC}
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
import tempfile
from glob import glob
from shutil import rmtree
# numpy and rpy2 imports
import numpy as np
import rpy2.rinterface as ri
import rpy2.robjects as ro
try:
from rpy2.robjects import pandas2ri
pandas2ri.activate()
except ImportError:
pandas2ri = None
from rpy2.robjects import numpy2ri
numpy2ri.activate()
# IPython imports
from IPython.core.displaypub import publish_display_data
from IPython.core.magic import (Magics, magics_class, line_magic,
line_cell_magic, needs_local_scope)
from IPython.testing.skipdoctest import skip_doctest
from IPython.core.magic_arguments import (
argument, magic_arguments, parse_argstring
)
from IPython.external.simplegeneric import generic
from IPython.utils.py3compat import (str_to_unicode, unicode_to_str, PY3,
unicode_type)
from IPython.utils.text import dedent
class RInterpreterError(ri.RRuntimeError):
"""An error when running R code in a %%R magic cell."""
def __init__(self, line, err, stdout):
self.line = line
self.err = err.rstrip()
self.stdout = stdout.rstrip()
def __unicode__(self):
s = 'Failed to parse and evaluate line %r.\nR error message: %r' % \
(self.line, self.err)
if self.stdout and (self.stdout != self.err):
s += '\nR stdout:\n' + self.stdout
return s
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode_to_str(unicode(self), 'utf-8')
def Rconverter(Robj, dataframe=False):
"""
Convert an object in R's namespace to one suitable
for ipython's namespace.
For a data.frame, it tries to return a structured array.
It first checks for colnames, then names.
If all are NULL, it returns np.asarray(Robj), else
it tries to construct a recarray
Parameters
----------
Robj: an R object returned from rpy2
"""
is_data_frame = ro.r('is.data.frame')
colnames = ro.r('colnames')
rownames = ro.r('rownames') # with pandas, these could be used for the index
names = ro.r('names')
if dataframe:
as_data_frame = ro.r('as.data.frame')
cols = colnames(Robj)
_names = names(Robj)
if cols != ri.NULL:
Robj = as_data_frame(Robj)
names = tuple(np.array(cols))
elif _names != ri.NULL:
names = tuple(np.array(_names))
else: # failed to find names
return np.asarray(Robj)
Robj = np.rec.fromarrays(Robj, names = names)
return np.asarray(Robj)
@generic
def pyconverter(pyobj):
"""Convert Python objects to R objects. Add types using the decorator:
@pyconverter.when_type
"""
return pyobj
# The default conversion for lists seems to make them a nested list. That has
# some advantages, but is rarely convenient, so for interactive use, we convert
# lists to a numpy array, which becomes an R vector.
@pyconverter.when_type(list)
def pyconverter_list(pyobj):
return np.asarray(pyobj)
if pandas2ri is None:
# pandas2ri was new in rpy2 2.3.3, so for now we'll fallback to pandas'
# conversion function.
try:
from pandas import DataFrame
from pandas.rpy.common import convert_to_r_dataframe
@pyconverter.when_type(DataFrame)
def pyconverter_dataframe(pyobj):
return convert_to_r_dataframe(pyobj, strings_as_factors=True)
except ImportError:
pass
@magics_class
class RMagics(Magics):
"""A set of magics useful for interactive work with R via rpy2.
"""
def __init__(self, shell, Rconverter=Rconverter,
pyconverter=pyconverter,
cache_display_data=False):
"""
Parameters
----------
shell : IPython shell
Rconverter : callable
To be called on values taken from R before putting them in the
IPython namespace.
pyconverter : callable
To be called on values in ipython namespace before
assigning to variables in rpy2.
cache_display_data : bool
If True, the published results of the final call to R are
cached in the variable 'display_cache'.
"""
super(RMagics, self).__init__(shell)
self.cache_display_data = cache_display_data
self.r = ro.R()
self.Rstdout_cache = []
self.pyconverter = pyconverter
self.Rconverter = Rconverter
def eval(self, line):
'''
Parse and evaluate a line of R code with rpy2.
Returns the output to R's stdout() connection,
the value generated by evaluating the code, and a
boolean indicating whether the return value would be
visible if the line of code were evaluated in an R REPL.
R Code evaluation and visibility determination are
done via an R call of the form withVisible({<code>})
'''
old_writeconsole = ri.get_writeconsole()
ri.set_writeconsole(self.write_console)
try:
res = ro.r("withVisible({%s\n})" % line)
value = res[0] #value (R object)
visible = ro.conversion.ri2py(res[1])[0] #visible (boolean)
except (ri.RRuntimeError, ValueError) as exception:
warning_or_other_msg = self.flush() # otherwise next return seems to have copy of error
raise RInterpreterError(line, str_to_unicode(str(exception)), warning_or_other_msg)
text_output = self.flush()
ri.set_writeconsole(old_writeconsole)
return text_output, value, visible
def write_console(self, output):
'''
A hook to capture R's stdout in a cache.
'''
self.Rstdout_cache.append(output)
def flush(self):
'''
Flush R's stdout cache to a string, returning the string.
'''
value = ''.join([str_to_unicode(s, 'utf-8') for s in self.Rstdout_cache])
self.Rstdout_cache = []
return value
@skip_doctest
@needs_local_scope
@line_magic
def Rpush(self, line, local_ns=None):
'''
A line-level magic for R that pushes
variables from python to rpy2. The line should be made up
of whitespace separated variable names in the IPython
namespace::
In [7]: import numpy as np
In [8]: X = np.array([4.5,6.3,7.9])
In [9]: X.mean()
Out[9]: 6.2333333333333343
In [10]: %Rpush X
In [11]: %R mean(X)
Out[11]: array([ 6.23333333])
'''
if local_ns is None:
local_ns = {}
inputs = line.split(' ')
for input in inputs:
try:
val = local_ns[input]
except KeyError:
try:
val = self.shell.user_ns[input]
except KeyError:
# reraise the KeyError as a NameError so that it looks like
# the standard python behavior when you use an unnamed
# variable
raise NameError("name '%s' is not defined" % input)
self.r.assign(input, self.pyconverter(val))
@skip_doctest
@magic_arguments()
@argument(
'-d', '--as_dataframe', action='store_true',
default=False,
help='Convert objects to data.frames before returning to ipython.'
)
@argument(
'outputs',
nargs='*',
)
@line_magic
def Rpull(self, line):
'''
A line-level magic for R that pulls
variables from python to rpy2::
In [18]: _ = %R x = c(3,4,6.7); y = c(4,6,7); z = c('a',3,4)
In [19]: %Rpull x y z
In [20]: x
Out[20]: array([ 3. , 4. , 6.7])
In [21]: y
Out[21]: array([ 4., 6., 7.])
In [22]: z
Out[22]:
array(['a', '3', '4'],
dtype='|S1')
If --as_dataframe, then each object is returned as a structured array
after first passed through "as.data.frame" in R before
being calling self.Rconverter.
This is useful when a structured array is desired as output, or
when the object in R has mixed data types.
See the %%R docstring for more examples.
Notes
-----
Beware that R names can have '.' so this is not fool proof.
To avoid this, don't name your R objects with '.'s...
'''
args = parse_argstring(self.Rpull, line)
outputs = args.outputs
for output in outputs:
self.shell.push({output:self.Rconverter(self.r(output),dataframe=args.as_dataframe)})
@skip_doctest
@magic_arguments()
@argument(
'-d', '--as_dataframe', action='store_true',
default=False,
help='Convert objects to data.frames before returning to ipython.'
)
@argument(
'output',
nargs=1,
type=str,
)
@line_magic
def Rget(self, line):
'''
Return an object from rpy2, possibly as a structured array (if possible).
Similar to Rpull except only one argument is accepted and the value is
returned rather than pushed to self.shell.user_ns::
In [3]: dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')]
In [4]: datapy = np.array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5, 'e')], dtype=dtype)
In [5]: %R -i datapy
In [6]: %Rget datapy
Out[6]:
array([['1', '2', '3', '4'],
['2', '3', '2', '5'],
['a', 'b', 'c', 'e']],
dtype='|S1')
In [7]: %Rget -d datapy
Out[7]:
array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5.0, 'e')],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')])
'''
args = parse_argstring(self.Rget, line)
output = args.output
return self.Rconverter(self.r(output[0]),dataframe=args.as_dataframe)
@skip_doctest
@magic_arguments()
@argument(
'-i', '--input', action='append',
help='Names of input variable from shell.user_ns to be assigned to R variables of the same names after calling self.pyconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-o', '--output', action='append',
help='Names of variables to be pushed from rpy2 to shell.user_ns after executing cell body and applying self.Rconverter. Multiple names can be passed separated only by commas with no whitespace.'
)
@argument(
'-w', '--width', type=int,
help='Width of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-h', '--height', type=int,
help='Height of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-d', '--dataframe', action='append',
help='Convert these objects to data.frames and return as structured arrays.'
)
@argument(
'-u', '--units', type=unicode_type, choices=["px", "in", "cm", "mm"],
help='Units of png plotting device sent as an argument to *png* in R. One of ["px", "in", "cm", "mm"].'
)
@argument(
'-r', '--res', type=int,
help='Resolution of png plotting device sent as an argument to *png* in R. Defaults to 72 if *units* is one of ["in", "cm", "mm"].'
)
@argument(
'-p', '--pointsize', type=int,
help='Pointsize of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-b', '--bg',
help='Background of png plotting device sent as an argument to *png* in R.'
)
@argument(
'-n', '--noreturn',
help='Force the magic to not return anything.',
action='store_true',
default=False
)
@argument(
'code',
nargs='*',
)
@needs_local_scope
@line_cell_magic
def R(self, line, cell=None, local_ns=None):
'''
Execute code in R, and pull some of the results back into the Python namespace.
In line mode, this will evaluate an expression and convert the returned value to a Python object.
The return value is determined by rpy2's behaviour of returning the result of evaluating the
final line.
Multiple R lines can be executed by joining them with semicolons::
In [9]: %R X=c(1,4,5,7); sd(X); mean(X)
Out[9]: array([ 4.25])
In cell mode, this will run a block of R code. The resulting value
is printed if it would printed be when evaluating the same code
within a standard R REPL.
Nothing is returned to python by default in cell mode::
In [10]: %%R
....: Y = c(2,4,3,9)
....: summary(lm(Y~X))
Call:
lm(formula = Y ~ X)
Residuals:
1 2 3 4
0.88 -0.24 -2.28 1.64
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.0800 2.3000 0.035 0.975
X 1.0400 0.4822 2.157 0.164
Residual standard error: 2.088 on 2 degrees of freedom
Multiple R-squared: 0.6993,Adjusted R-squared: 0.549
F-statistic: 4.651 on 1 and 2 DF, p-value: 0.1638
In the notebook, plots are published as the output of the cell::
%R plot(X, Y)
will create a scatter plot of X bs Y.
If cell is not None and line has some R code, it is prepended to
the R code in cell.
Objects can be passed back and forth between rpy2 and python via the -i -o flags in line::
In [14]: Z = np.array([1,4,5,10])
In [15]: %R -i Z mean(Z)
Out[15]: array([ 5.])
In [16]: %R -o W W=Z*mean(Z)
Out[16]: array([ 5., 20., 25., 50.])
In [17]: W
Out[17]: array([ 5., 20., 25., 50.])
The return value is determined by these rules:
* If the cell is not None, the magic returns None.
* If the cell evaluates as False, the resulting value is returned
unless the final line prints something to the console, in
which case None is returned.
* If the final line results in a NULL value when evaluated
by rpy2, then None is returned.
* No attempt is made to convert the final value to a structured array.
Use the --dataframe flag or %Rget to push / return a structured array.
* If the -n flag is present, there is no return value.
* A trailing ';' will also result in no return value as the last
value in the line is an empty string.
The --dataframe argument will attempt to return structured arrays.
This is useful for dataframes with
mixed data types. Note also that for a data.frame,
if it is returned as an ndarray, it is transposed::
In [18]: dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')]
In [19]: datapy = np.array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5, 'e')], dtype=dtype)
In [20]: %%R -o datar
datar = datapy
....:
In [21]: datar
Out[21]:
array([['1', '2', '3', '4'],
['2', '3', '2', '5'],
['a', 'b', 'c', 'e']],
dtype='|S1')
In [22]: %%R -d datar
datar = datapy
....:
In [23]: datar
Out[23]:
array([(1, 2.9, 'a'), (2, 3.5, 'b'), (3, 2.1, 'c'), (4, 5.0, 'e')],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '|S1')])
The --dataframe argument first tries colnames, then names.
If both are NULL, it returns an ndarray (i.e. unstructured)::
In [1]: %R mydata=c(4,6,8.3); NULL
In [2]: %R -d mydata
In [3]: mydata
Out[3]: array([ 4. , 6. , 8.3])
In [4]: %R names(mydata) = c('a','b','c'); NULL
In [5]: %R -d mydata
In [6]: mydata
Out[6]:
array((4.0, 6.0, 8.3),
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
In [7]: %R -o mydata
In [8]: mydata
Out[8]: array([ 4. , 6. , 8.3])
'''
args = parse_argstring(self.R, line)
# arguments 'code' in line are prepended to
# the cell lines
if cell is None:
code = ''
return_output = True
line_mode = True
else:
code = cell
return_output = False
line_mode = False
code = ' '.join(args.code) + code
# if there is no local namespace then default to an empty dict
if local_ns is None:
local_ns = {}
if args.input:
for input in ','.join(args.input).split(','):
try:
val = local_ns[input]
except KeyError:
try:
val = self.shell.user_ns[input]
except KeyError:
raise NameError("name '%s' is not defined" % input)
self.r.assign(input, self.pyconverter(val))
if getattr(args, 'units') is not None:
if args.units != "px" and getattr(args, 'res') is None:
args.res = 72
args.units = '"%s"' % args.units
png_argdict = dict([(n, getattr(args, n)) for n in ['units', 'res', 'height', 'width', 'bg', 'pointsize']])
png_args = ','.join(['%s=%s' % (o,v) for o, v in png_argdict.items() if v is not None])
# execute the R code in a temporary directory
tmpd = tempfile.mkdtemp()
self.r('png("%s/Rplots%%03d.png",%s)' % (tmpd.replace('\\', '/'), png_args))
text_output = ''
try:
if line_mode:
for line in code.split(';'):
text_result, result, visible = self.eval(line)
text_output += text_result
if text_result:
# the last line printed something to the console so we won't return it
return_output = False
else:
text_result, result, visible = self.eval(code)
text_output += text_result
if visible:
old_writeconsole = ri.get_writeconsole()
ri.set_writeconsole(self.write_console)
ro.r.show(result)
text_output += self.flush()
ri.set_writeconsole(old_writeconsole)
except RInterpreterError as e:
print(e.stdout)
if not e.stdout.endswith(e.err):
print(e.err)
rmtree(tmpd)
return
finally:
self.r('dev.off()')
# read out all the saved .png files
images = [open(imgfile, 'rb').read() for imgfile in glob("%s/Rplots*png" % tmpd)]
# now publish the images
# mimicking IPython/zmq/pylab/backend_inline.py
fmt = 'png'
mimetypes = { 'png' : 'image/png', 'svg' : 'image/svg+xml' }
mime = mimetypes[fmt]
# publish the printed R objects, if any
display_data = []
if text_output:
display_data.append(('RMagic.R', {'text/plain':text_output}))
# flush text streams before sending figures, helps a little with output
for image in images:
# synchronization in the console (though it's a bandaid, not a real sln)
sys.stdout.flush(); sys.stderr.flush()
display_data.append(('RMagic.R', {mime: image}))
# kill the temporary directory
rmtree(tmpd)
# try to turn every output into a numpy array
# this means that output are assumed to be castable
# as numpy arrays
if args.output:
for output in ','.join(args.output).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=False)})
if args.dataframe:
for output in ','.join(args.dataframe).split(','):
self.shell.push({output:self.Rconverter(self.r(output), dataframe=True)})
for tag, disp_d in display_data:
publish_display_data(tag, disp_d)
# this will keep a reference to the display_data
# which might be useful to other objects who happen to use
# this method
if self.cache_display_data:
self.display_cache = display_data
# if in line mode and return_output, return the result as an ndarray
if return_output and not args.noreturn:
if result != ri.NULL:
return self.Rconverter(result, dataframe=False)
__doc__ = __doc__.format(
R_DOC = dedent(RMagics.R.__doc__),
RPUSH_DOC = dedent(RMagics.Rpush.__doc__),
RPULL_DOC = dedent(RMagics.Rpull.__doc__),
RGET_DOC = dedent(RMagics.Rget.__doc__)
)
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(RMagics)
# Initialising rpy2 interferes with readline. Since, at this point, we've
# probably just loaded rpy2, we reset the delimiters. See issue gh-2759.
if ip.has_readline:
ip.readline.set_completer_delims(ip.readline_delims)
| mit | 3,299,383,235,358,065,000 | 31.478448 | 211 | 0.54019 | false | 3.871382 | false | false | false |
JohnKendrick/PDielec | Examples/SizeEffects/BaTiO3/script.py | 1 | 2272 | #
# SettingsTab
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = False
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'average'
#
# 0th Scenario tabs
#
tab = self.notebook.scenarios[0]
tab.settings['Matrix'] = 'ptfe'
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['Volume fraction'] = 0.1
tab.settings['Ellipsoid a/b'] = 0.5
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 0
tab.settings['Effective medium method'] = 'Averaged permittivity'
tab.settings['Particle shape'] = 'Sphere'
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Legend'] = 'Averaged permittivity'
# Add new scenarios
methods = ['Bruggeman']
shapes = ['Ellipsoid','Plate']
hkls = [[0,0,1], [1,0,0]]
vfs = [0.1]
sizes = [0.0, 1.0, 3.0]
for method in methods:
for shape,hkl in zip(shapes,hkls):
for vf in vfs:
for size in sizes:
self.notebook.addScenario()
tab = self.notebook.scenarios[-1]
tab.settings['Volume fraction'] = vf
tab.settings['Particle shape'] = shape
tab.settings['Particle size(mu)'] = size
tab.settings['Effective medium method'] = method
tab.settings['Unique direction - h'] = hkl[0]
tab.settings['Unique direction - k'] = hkl[1]
tab.settings['Unique direction - l'] = hkl[2]
#tab.settings['Legend'] = method + ' ' + shape + ' vf='+str(vf)+' size='+str(size)
tab.settings['Legend'] = method + ' ' + shape + str(hkl) + ' size='+str(size)
#
# Plotting Tab
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0.0
tab.settings['Maximum frequency'] = 300.0
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Plot title'] = 'Size Effects BaTiO3'
#
# Analysis Tab
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 800
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
| mit | -4,286,127,456,843,243,500 | 34.5 | 98 | 0.632042 | false | 3.302326 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.