repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
zerodays/gradientni-spust-predstavitev | Elipsa/select_data.py | 2 | 3571 | from gradient_descent import get_data
def select_data():
done = False
while not done:
try:
print('Select input data set:')
print(' 1 Mars half year')
print(' 2 Mars full (whole year measured every Earth month)')
print(' 3 Mars small (every fourth point of \'Mars full\')')
print(' 4 Earth full (every 14 days)')
print(' 5 Saturn full (every 100 days since 1987 = one Saturn year)')
print(' 6 Jupiter full (every 60 days since 2005 = one Jupiter year)')
print(' 7 Halley full (every 30 days 1984 - 1987)')
print(' 8 custom file path')
answer = int(input('Your selection: '))
if answer == 1:
data = get_data('Podatki/mars_half_year.csv')
elif answer == 2:
data = get_data('Podatki/mars_full.csv')
elif answer == 3:
data = get_data('Podatki/mars_full.csv')[::4]
elif answer == 4:
data = get_data('Podatki/earth.csv')
elif answer == 5:
data = get_data('Podatki/saturn.csv')
elif answer == 6:
data = get_data('Podatki/jupiter.csv')
elif answer == 7:
data = get_data('Podatki/halley.csv')
elif answer == 8:
data = get_data(input('Path: '))
else:
continue
print('\nSelect start parameters:')
print(' 1 default [10, 0, 10, 0, 0, 0]')
print(' 2 Mars approximation [-100, 0, -100, -300, 200, 30000]')
print(' 3 Mars half year wrong minimum (hyperbola) [-1017000, 39000, -299600, -2983000, 561000, 23157000]')
print(' 4 Jupiter approximation [-813700, -6200, -785600, -6000, -1600, 5376000]')
print(' 5 Saturn approximation [5541730, 107633, 6468945, 1673, -90184, 72001305]')
print(' 6 Halley approximation [-1000, -1400, -600, -25000, 30000, 230000]')
print(' 7 custom params')
try:
answer = int(input('Your selection: '))
except ValueError:
params = [10, 0, 10, 0, 0, -300]
if answer == 1:
params = [10, 0, 10, 0, 0, -300]
elif answer == 2:
params = [-100, 0, -100, -300, 200, 30000]
elif answer == 3:
params = [-1017000, 39000, -299600, -2983000, 561000, 23157000]
elif answer == 4:
params = [-813700, -6200, -785600, -6000, -1600, 5376000]
elif answer == 5:
params = [5541730, 107633, 6468945, 1673, -90184, 72001305]
elif answer == 6:
params = [-1000, -1400, -600, -25000, 30000, 230000]
elif answer == 7:
params = [float(i) for i in input('Params separated by ,: ').split(',')]
else:
continue
print('\nRecommended steps:')
print(' Mars: 1e-7')
print(' Earth: 1e-6')
print(' Saturn: 7e-11')
print(' Jupiter, Halley: 1e-9')
try:
step = float(input('Define step (default is 1e-6): '))
except ValueError:
step = 1e-6
# load Earth data
earth_data = get_data('Podatki/earth.csv')
done = True
except ValueError:
print('Invalid input!')
print()
return data, earth_data, params, step
| gpl-3.0 | 6,817,748,943,440,151,000 | 40.523256 | 121 | 0.486138 | false |
NabilZaman/data_structures | python-data-structures/B-tree/btree.py | 1 | 13381 | #!/usr/bin/env python
"""
btree.py
This file contains an implementation of a B-Tree as outlined in
https://en.wikipedia.org/wiki/B-tree.
"""
def find_index_in_sorted_list(key, L):
"""
Helper function. Return index closest (above) key in sorted list.
"""
if not L:
return 0
if len(L) == 1:
if key > L[0]:
return 1
else:
return 0
halfway = len(L) / 2
if key < L[halfway]:
return find_index_in_sorted_list(key, L[:halfway])
else:
return halfway + find_index_in_sorted_list(key, L[halfway:])
class BTree(object):
""" The BTree class is a 'generic' implementation of a BTree.
The tree can contain any comparable types.
The tree is instantiated with an "order" or "branching factor"
which specifies the maximum number of children any node in the
tree may have. """
def __init__(self, order, parent=None, keys=None, children=None):
if order < 3:
raise AttributeError("Order must be greater than 2.")
self.order = order
self.parent = parent
if keys is None:
self.keys = []
else:
self.keys = keys
if children is None:
self.children = []
else:
self.children = children
for child in self.children:
child.parent = self
self.size = len(self.keys) + sum(len(child) for child in self.children)
def contains(self, key):
""" Return whether the tree contains the given key. """
if key in self.keys:
return True
elif self.is_leaf:
return False
else:
return self._find_child_for_key(key).contains(key)
def insert(self, key):
"""
Insert given key into key. Return whether the tree was modified.
The tree will not be modified if the key was already present.
"""
if key in self.keys:
return False
if self.is_leaf():
# Simply add the key to your keys
self.keys.append(key)
self.keys.sort()
# Then make sure you haven't become too large
self._split_if_needed()
self.size += 1
return True
else:
# Recursively insert into appropriate child
changed = self._find_child_for_key(key).insert(key)
if changed:
self.size += 1
return changed
def insert_all(self, keys):
return [self.insert(key) for key in keys]
def delete(self, key):
"""
Remove given key from the tree. Return whether tree was modified.
The tree will not be modified if the key was not present.
"""
print "Trying to delete", key, "from", self.keys
if self.is_leaf():
if key not in self.keys:
# Nothing to do
return False
else:
# Remove the key and make sure you haven't become too small
self.keys.remove(key)
self._merge_if_needed()
self.size -= 1
return True
else:
if key not in self.keys:
# Recursively try to delete from appropriate child
changed = self._find_child_for_key(key).delete(key)
if changed:
self.size -= 1
return changed
else:
replacement_index = find_index_in_sorted_list(key, self.keys)
# Find a suitable replacement to replace the deleted key
replacement_child = self.children[replacement_index]
replacement = replacement_child.find_greatest()
# Replace the deleted key
self.keys[replacement_index] = replacement
# Remove the replacement value from child it came from
replacement_child.delete(replacement)
self.size -= 1
return True
def delete_all(self, keys):
return [self.delete(key) for key in keys]
def height(self):
""" Return the height of the tree. """
if self.is_leaf():
return 1
return 1 + max(child.height() for child in self.children)
def is_leaf(self):
""" Return whether the tree is a leaf node (has no children). """
return not self.children
def is_root(self):
""" Return whether this tree is the root node (has no parent). """
return self.parent is None
def is_empty(self):
""" Return whether the tree is empty (has no keys). """
return not self.keys
def is_too_full(self):
""" Return whether you have exceeded capacity. """
return len(self.keys) >= self.order
def is_too_empty(self):
"""
Return whether you have dropped below needed capacity.
Does not apply to the root node.
"""
return len(self.keys) < self.order/2 and not self.is_root()
def find_greatest(self):
""" Return the greatest key value in the tree. """
if self.is_empty():
return
if self.is_leaf():
return self.keys[-1]
else:
return self.children[-1].find_greatest()
def find_least(self):
""" Return the least key value in the tree. """
if self.is_empty():
return
if self.is_leaf():
return self.keys[0]
else:
return self.children[0].find_least()
def _merge_if_needed(self):
"""
Helper function. Merge this node if it is too small.
Do nothing if the node is not below needed capacity.
"""
if self.is_too_empty():
self.parent._merge_child_with_key(self.keys[0])
def _merge_child_with_key(self, key):
"""
Helper function. Increase the size of the child with given key.
There are two strategies for doing this.
Firstly you may find a find a sibling with spare keys and
rotate that siblings extreme key closest to the problem node
so that that key becomes the new seperator between them and
the old seperator is inserted into the problem node.
Secondly if no such sibling can be find, combine this node
and one of its siblings (that are minimal in size) and add the
key seperating them to create a new, full-sized node.
"""
child_index = find_index_in_sorted_list(key, self.keys)
child = self.children[child_index]
# First check if there is a child to its right that can donate a key
if child_index + 1 < len(self.children) and \
len(self.children[child_index+1].keys) > self.order/2:
right_child = self.children[child_index+1]
print "Rotating left", child.keys
# Rotate "to the left"
child.keys.append(self.keys[child_index])
self.keys[child_index] = right_child.keys.pop(0)
if not right_child.is_leaf():
transfer_children = right_child.children.pop(0)
child.children.append(transfer_children)
transfer_children.parent = child
elif child_index - 1 >= 0 and \
len(self.children[child_index-1].keys) > self.order/2:
left_child = self.children[child_index-1]
print "Rotating right", child.keys
# Rotate "to the right"
self.child.keys.insert(0, self.keys[child_index-1])
self.keys[child_index-1] = left_child.pop()
if not left_child.is_leaf():
transfer_children = left_child.children.pop()
child.children.insert(0, transfer_children)
transfer_children.parent = child
else:
if child_index + 1 < len(self.children):
# Merge with the right_child
left_child_index = child_index
else:
# Merge with the left child
left_child_index = child_index-1
right_child_index = left_child_index+1
left_child = self.children[left_child_index]
right_child = self.children[right_child_index]
print "Merging...", left_child.keys, right_child.keys
# Combine the keys and children of the two children you're merging
newKeys = left_child.keys + [self.keys.pop(left_child_index)] + \
right_child.keys
newChildren = left_child.children + right_child.children
if not self.keys:
# You're the root and you've removed your last key
self.keys = newKeys
self.children = newChildren
for child in self.children:
child.parent = self
else:
# Add the new merged child to your list of children
mergedChild = BTree(self.order, parent=self, keys=newKeys,\
children=newChildren)
self.children = self.children[:left_child_index] +\
[mergedChild] + self.children[right_child_index+1:]
# Merge yourself if this caused you to grow too small
self._merge_if_needed()
def _split_if_needed(self):
"""
Helper function. Split this node if it is too large.
Do nothing if the node does not exceed capacity.
Mutually recurse with _split_child_with_key.
"""
if self.is_too_full():
# Need to split the node
half_keys = len(self.keys) / 2
median = self.keys[half_keys]
self.keys = self.keys[:half_keys] + self.keys[half_keys+1:]
if self.is_root():
# Need to split yourself
dummy_child = BTree(self.order, parent=self,\
keys=self.keys, children=self.children)
self.keys = []
self.children = [dummy_child]
self._split_child_with_key(median)
else:
# Get your parent to split you
self.parent._split_child_with_key(median)
def _split_child_with_key(self, key):
"""
Helper function. Split a "full" child node into two children.
Find the child the key would belong to, splits that child in half,
then uses that key as the seperator between those two new children.
Mutually recurse with _split_if_needed.
"""
# Find the child
child_index = find_index_in_sorted_list(key, self.keys)
child = self.children[child_index]
# Split its keys and children
half_keys = len(child.keys) / 2
left_half_keys = child.keys[:half_keys]
right_half_keys = child.keys[half_keys:]
half_children = len(child.children) / 2
left_half_children = child.children[:half_children]
right_half_children = child.children[half_children:]
# Build two new children each with half its resources
newLeftChild = BTree(self.order, parent=self, \
keys=left_half_keys, children=left_half_children)
newRightChild = BTree(self.order, parent=self, \
keys=right_half_keys, children=right_half_children)
# Fill in your keys/children with these new ones
self.keys = self.keys[:child_index] + [key] + self.keys[child_index:]
self.children = self.children[:child_index] + \
[newLeftChild, newRightChild] + \
self.children[child_index+1:]
# Split yourself if this caused you to grow too large
self._split_if_needed()
def _find_child_for_key(self, key):
"""
Helper function. Return the child that is responsible for the given key.
Key can not be in this node.
"""
child_index = find_index_in_sorted_list(key, self.keys)
return self.children[child_index]
def __repr__(self, level=0):
result = ''
if self.is_leaf():
result += '{pre}{keys}\n'.format(pre=level*'\t', keys=self.keys)
else:
for index, key in enumerate(self.keys):
result += '{pre}{child}\n'.format(pre=level*'\t', \
child=self.children[index].__repr__(level+1))
result += '{pre}{key}:\n'.format(pre=level*'\t', key=key)
result += '{pre}{child}\n'.format(pre=level*'\t', \
child=self.children[-1].__repr__(level+1))
return result
def __len__(self):
""" Return number of keys in tree. """
return len(self.keys) + sum(len(child) for child in self.children)
if __name__ == '__main__':
test_list1 = [1, 3, 5, 7, 9, 11, 13]
test_list2 = [2, 4, 6, 8, 10, 12]
test_list3 = [1]
keys = [0, 2, 6, 9, 15]
print [(find_index_in_sorted_list(key, test_list1), key) for key in keys]
print [(find_index_in_sorted_list(key, test_list2), key) for key in keys]
print [(find_index_in_sorted_list(key, test_list3), key) for key in keys]
tree = BTree(5)
tree.insert_all(range(100))
tree.delete_all(range(100))
print tree.keys
print len(tree)
print tree.size
print tree
print tree.find_greatest()
print tree.find_least()
| mit | -8,827,456,034,595,784,000 | 34.873995 | 80 | 0.561841 | false |
lgywata/soletta | data/scripts/suite.py | 1 | 5341 | #!/usr/bin/env python3
# This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from multiprocessing import Manager
from threading import Thread
import argparse
import subprocess
WARN = '\033[93m'
PASS = '\033[92m'
FAIL = '\033[31m'
ENDC = '\033[0m'
STATUS_COLOR = [FAIL, PASS]
STATUS_TAG = ["FAIL", "PASS"]
def run_test_program(cmd, test, stat, log):
success = 1
print("%sRUNNING: %s%s" % (WARN, ENDC, test))
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
shell=True, universal_newlines=True)
log.append(output)
except subprocess.CalledProcessError as e:
success = 0
log.append(e.output)
stat[test] = success
# print out each test's result right away
print("%s%s:%s %s" % (STATUS_COLOR[success], STATUS_TAG[success], ENDC, test))
def print_log(log_file, stat, log):
output = ""
status_cnt = {'FAIL': 0, 'PASS': 0}
for k,v in sorted(stat.items()):
curr_status = STATUS_TAG[v]
status_cnt[curr_status] = status_cnt[curr_status] + 1
output += "============================================================================\n"
output += "Testsuite summary\n"
output += "============================================================================\n"
output += "# TOTAL: %d\n" % len(stat)
output += "# SUCCESS: %d\n" % status_cnt["PASS"]
output += "# FAIL: %d\n" % status_cnt["FAIL"]
output += "============================================================================\n"
output += "See %s\n" % log_file
output += "============================================================================\n"
# show the stat in the stdout
print(output)
log_output = ""
f = open(log_file, mode="w+")
for i in log:
log_output += "%s\n" % i
f.write(log_output)
f.close()
def run_valgrind_test(args):
manager = Manager()
common_args = "--error-exitcode=1 --num-callers=30"
valgrind_tools = {
'memcheck': '--leak-check=full --show-reachable=no',
'helgrind': '--history-level=approx',
'drd': None,
'exp-sgcheck': None,
}
for k,v in valgrind_tools.items():
stat = manager.dict()
log = manager.list()
threads = []
for i in args.tests.split():
cmd = "{valgrind} {test_path} {supp} --tool={tool} {tool_args} {common}". \
format(valgrind=args.valgrind, test_path=i, supp=args.valgrind_supp, \
tool=k, tool_args=v, common=common_args)
t = Thread(target=run_test_program, args=(cmd, i, stat, log,))
t.start()
threads.append(t)
for t in threads:
t.join()
print_log("test-suite-%s.log" % k, stat, log)
def run_test(args):
manager = Manager()
stat = manager.dict()
log = manager.list()
threads = []
for i in args.tests.split():
t = Thread(target=run_test_program, args=(i, i, stat, log,))
t.start()
threads.append(t)
for t in threads:
t.join()
print_log("test-suite.log", stat, log)
def run_suite(args):
if args.valgrind:
run_valgrind_test(args)
else:
run_test(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tests", help="List of tests to run", type=str)
parser.add_argument("--valgrind", help="Path to valgrind, if provided " \
"the tests are run with it", type=str)
parser.add_argument("--valgrind-supp", help="Path to valgrind's suppression file", type=str)
args = parser.parse_args()
if args.valgrind_supp:
args.valgrind_supp = "--suppressions=%s" % args.valgrind_supp
run_suite(args)
| bsd-3-clause | -4,497,790,538,252,103,700 | 34.606667 | 96 | 0.597641 | false |
dario-ramos/tp3_cloud-events | src/model/guest_repository.py | 1 | 1057 | #!/usr/bin/env python
from abc import ABCMeta, abstractmethod
from google.appengine.ext import ndb
from google.appengine.ext import db
from event_guest import EventGuest
class IEventGuestRepository:
__metaclass__ = ABCMeta
@abstractmethod
def create(self, guestFirstName, guestLastName,
guestEmail, guestCompany): pass
def getByEmail(self, email): pass
class GuestRepository(IEventGuestRepository):
def create(self, guestFirstName, guestLastName, guestEmail, guestCompany):
newGuest = EventGuest(
firstName=guestFirstName,
lastName=guestLastName,
email=guestEmail,
company=guestCompany
)
newGuest.put()
def getAll(self):
return EventGuest.query().fetch(900) # TODO Don't hardcode
def getByName(self, email):
# TODO This is inefficient; make one of the above work
guestList = self.getAll()
for guest in guestList:
if guest.email == email:
return guest
return None
| gpl-2.0 | 8,995,529,977,190,002,000 | 26.102564 | 78 | 0.657521 | false |
youhusky/Facebook_Prepare | 341. Flatten Nested List Iterator.py | 1 | 2047 | # Given a nested list of integers, implement an iterator to flatten it.
# Each element is either an integer, or a list -- whose elements may also be integers or other lists.
# Example 1:
# Given the list [[1,1],2,[1,1]],
# By calling next repeatedly until hasNext returns false, the order of elements returned by next should be: [1,1,2,1,1].
# Example 2:
# Given the list [1,[4,[6]]],
# By calling next repeatedly until hasNext returns false, the order of elements returned by next should be: [1,4,6].
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class NestedIterator(object):
def __init__(self,nestedList):
self.queue = collections.deque([])
for elem in nestedList:
if elem.isInteger():
self.queue.append(elem.getInteger())
else:
newList = NestedIterator(elem.getList())
while newList.hasNext():
self.queue.append(newList.next())
def hasNext(self):
if self.queue:
return True
return False
def next(self):
return self.queue.popleft()
# Your NestedIterator object will be instantiated and called as such:
# i, v = NestedIterator(nestedList), []
# while i.hasNext(): v.append(i.next()) | mit | 2,659,754,284,575,051,300 | 33.133333 | 120 | 0.624817 | false |
dinhkute/Incisive-AIESEC | webroot/Pj/mysite/mysite/polls/urls.py | 1 | 2551 | from django.conf.urls import url
from . import views
from django.contrib.auth.views import (login, logout,
password_reset,
password_reset_complete,
password_reset_done,
password_reset_confirm)
app_name = 'polls'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
#url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
url(r'login/$', login, {'template_name':'polls/login.html'}, name = 'login'),
url(r'logout/$', logout, {'template_name':'polls/logout.html'}, name='logout'),
url(r'register/$', views.register, name='register'),
url(r'^profile/$', views.profile, name='profile'),
url(r'^profile/edit/$', views.edit_profile, name='edit_profile'),
url(r'^change-password/$', views.change_password, name='changepassword'),
url(r'^reset-password/$', password_reset,
{'template_name': 'polls/reset_password.html', 'post_reset_redirect': 'polls:password_reset_done',
'email_template_name': 'polls/reset_password_email.html'}, name='reset_password'),
url(r'^reset-password/done/$', password_reset_done, {'template_name': 'polls/reset_password_done.html'},
name='password_reset_done'),
url(r'^reset-password/confirm/(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', password_reset_confirm,
{'template_name': 'polls/reset_password_confirm.html',
'post_reset_redirect': 'polls:password_reset_complete'}, name='password_reset_confirm'),
url(r'^reset-password/complete/$', password_reset_complete,
{'template_name': 'polls/reset_password_complete.html'}, name='password_reset_complete'),
url(r'^test/$', views.testTemplate.as_view(), name='test'),
url(r'^test/(?P<pk>[0-9]+)/$', views.DetailForm, name='detailform'),
url(r'^listform/$', views.RegisterFormList.as_view(), name='listform'),
url(r'^listform/(?P<pk>[0-9]+)/$', views.AnswerForm, name='question'),
url(r'^event/$', views.Event.as_view(), name='event'),
url(r'^event/(?P<pk>[0-9]+)/$', views.DetailEvent.as_view(), name='detail_event'),
url(r'^(?P<event_id>[0-9]+)/registerevent/$', views.UserEvent, name='register_event'),
] | apache-2.0 | -9,020,143,675,355,446,000 | 60.268293 | 108 | 0.593885 | false |
bayesimpact/bob-emploi | frontend/server/modules/test/commute_test.py | 1 | 7276 | """Unit tests for the commute module."""
import unittest
from bob_emploi.frontend.api import geo_pb2
from bob_emploi.frontend.server.test import base_test
from bob_emploi.frontend.server.test import scoring_test
class CommuteScoringModelTestCase(scoring_test.ScoringModelTestBase):
"""Unit test for the "Commute" scoring model."""
model_id = 'advice-commute'
# TODO(guillaume): Add more tests when the scoring model takes the city into account.
def setUp(self) -> None:
super().setUp()
self.persona = self._random_persona().clone()
self.database.cities.insert_one({
'_id': '69123',
'longitude': 4.8363116,
'latitude': 45.7640454,
})
self.database.hiring_cities.insert_one({
'_id': 'M1604',
'hiringCities': [
{
'offersPerInhabitant': .001,
'city': {
'cityId': '69028',
'name': 'Brindas',
'longitude': 4.6965532,
'latitude': 45.7179675,
'population': 10000,
}
},
{
'offersPerInhabitant': .000_1,
'city': {
'cityId': '69123',
'name': 'Lyon',
'longitude': 4.8363116,
'latitude': 45.7640454,
'population': 400000,
}
},
{
'offersPerInhabitant': .004,
'city': {
'cityId': '69290',
'name': 'Saint-Priest',
'longitude': 4.9123846,
'latitude': 45.7013617,
'population': 20000,
}
},
{
'offersPerInhabitant': .004,
'city': {
'cityId': '69256',
'name': 'Vaulx-en-Velin',
'longitude': 4.8892431,
'latitude': 45.7775502,
'population': 10000
}
}
]
})
def test_lyon(self) -> None:
"""Test that people in Lyon match."""
self.persona.project.city.city_id = '69123'
self.persona.project.target_job.job_group.rome_id = 'M1604'
score = self._score_persona(self.persona)
self.assertGreater(score, 1, msg=f'Fail for "{self.persona.name}"')
def test_non_valid(self) -> None:
"""Test that people with a non-valid INSEE code should not get any commute advice."""
self.persona.project.city.city_id = '691234'
self.persona.project.target_job.job_group.rome_id = 'M1604'
score = self._score_persona(self.persona)
self.assertEqual(score, 0, msg=f'Fail for "{self.persona.name}"')
def test_super_commute(self) -> None:
"""Test that people that wants to move and with super commute cities have score 3."""
self.persona.project.city.city_id = '69123'
self.persona.project.target_job.job_group.rome_id = 'M1604'
if self.persona.project.area_type <= geo_pb2.CITY:
self.persona.project.area_type = geo_pb2.DEPARTEMENT
score = self._score_persona(self.persona)
self.assertEqual(score, 3, msg=f'Fail for "{self.persona.name}"')
class EndpointTestCase(base_test.ServerTestCase):
"""Unit tests for the project/.../commute endpoint."""
def setUp(self) -> None:
super().setUp()
self._db.advice_modules.insert_one({
'adviceId': 'commute',
'triggerScoringModel': 'advice-commute',
})
self.user_id, self.auth_token = self.create_user_with_token(
modifiers=[base_test.add_project_modifier], advisor=True)
user_info = self.get_user_info(self.user_id, self.auth_token)
self.project_id = user_info['projects'][0]['projectId']
def test_bad_project_id(self) -> None:
"""Test with a non existing project ID."""
response = self.app.get(
f'/api/advice/commute/{self.user_id}/foo',
headers={'Authorization': 'Bearer ' + self.auth_token})
self.assertEqual(404, response.status_code)
self.assertIn('Projet "foo" inconnu.', response.get_data(as_text=True))
def test_no_cities(self) -> None:
"""Basic test with no cities."""
response = self.app.get(
f'/api/advice/commute/{self.user_id}/{self.project_id}',
headers={'Authorization': 'Bearer ' + self.auth_token})
self.assertEqual({}, self.json_from_response(response))
def test_lyon(self) -> None:
"""Cities available close to Lyon."""
user_id, auth_token = self.create_user_with_token(
data={'projects': [{
'city': {'cityId': '69123'},
'targetJob': {'jobGroup': {'romeId': 'A6789'}},
}]})
self._db.cities.insert_one({
'_id': '69123',
'name': 'Lyon',
'longitude': 4.8363116,
'latitude': 45.7640454,
'population': 400000,
})
self._db.hiring_cities.insert_one({
'_id': 'A6789',
'hiringCities': [
{
'offersPerInhabitant': .001,
'city': {
'cityId': '69028',
'departementId': '69',
'name': 'Brindas',
'longitude': 4.6965532,
'latitude': 45.7179675,
'population': 10000,
},
},
{
'offersPerInhabitant': .01,
'city': {
'cityId': '69266',
'departementId': '69',
'name': 'Villeurbanne',
'longitude': 4.6964532,
'latitude': 45.7178675,
'population': 10000,
},
},
{
'offersPerInhabitant': .000_4,
'city': {
'cityId': '69123',
'departementId': '69',
'name': 'Lyon',
'longitude': 4.8363116,
'latitude': 45.7640454,
'population': 400000,
},
},
],
})
user_info = self.get_user_info(user_id, auth_token)
project_id = user_info['projects'][0]['projectId']
response = self.app.get(
f'/api/advice/commute/{user_id}/{project_id}',
headers={'Authorization': 'Bearer ' + auth_token})
hiring_cities = self.json_from_response(response).get('cities', [])
self.assertEqual(['Villeurbanne', 'Brindas'], [h.get('name') for h in hiring_cities])
self.assertEqual('69', hiring_cities[0].get('departementId'))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 3,982,741,636,697,991,700 | 35.93401 | 93 | 0.4721 | false |
jtauber/pyuca | test.py | 1 | 8168 | # coding: utf8
from __future__ import unicode_literals
import sys
import unittest
PYTHON3 = sys.version_info >= (3,)
V8_0_0 = sys.version_info >= (3, 5)
V10_0_0 = sys.version_info >= (3, 7)
class SmokeTest(unittest.TestCase):
def test_cafe(self):
from pyuca import Collator
c = Collator()
self.assertEqual(
sorted(["cafe", "caff", "café"]),
["cafe", "caff", "café"]
)
self.assertEqual(
sorted(["cafe", "caff", "café"], key=c.sort_key),
["cafe", "café", "caff"]
)
class UtilsTest(unittest.TestCase):
def test_hexstrings2int(self):
from pyuca.utils import hexstrings2int
self.assertEqual(
hexstrings2int(["0000", "0001", "FFFF"]),
[0, 1, 65535]
)
def test_int2hexstrings(self):
from pyuca.utils import int2hexstrings
self.assertEqual(
int2hexstrings([0, 1, 65535]),
["0000", "0001", "FFFF"]
)
def test_format_collation_elements(self):
from pyuca.utils import format_collation_elements
self.assertEqual(
format_collation_elements([[1, 2, 3], [4, 5]]),
"[0001.0002.0003], [0004.0005]"
)
def test_format_collation_elements_none(self):
from pyuca.utils import format_collation_elements
self.assertEqual(
format_collation_elements(None),
None
)
def test_format_sort_key(self):
from pyuca.utils import format_sort_key
self.assertEqual(
format_sort_key([0, 1, 65535]),
"| 0001 FFFF"
)
class TrieTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
from pyuca.trie import Trie
super(TrieTest, self).__init__(*args, **kwargs)
self.t = Trie()
def test_1(self):
self.t.add("foo", "bar")
self.assertEqual(self.t.find_prefix("fo"), ("", None, "fo"))
self.assertEqual(self.t.find_prefix("foo"), ("foo", "bar", ""))
self.assertEqual(self.t.find_prefix("food"), ("foo", "bar", "d"))
def test_2(self):
self.t.add("a", "yes")
self.t.add("abc", "yes")
self.assertEqual(self.t.find_prefix("abdc"), ("a", "yes", "bdc"))
class FromFullTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
from pyuca import Collator
super(FromFullTest, self).__init__(*args, **kwargs)
self.c = Collator()
(0, 74, 33, 0, 2, 2, 0)
@unittest.skipIf(not PYTHON3, "only matches Python 3's UCA version")
def test_1(self):
self.assertEqual(
self.c.sort_key("\u0332\u0334"),
(0x0000, 0x004A, 0x0021, 0x0000, 0x0002, 0x0002, 0x0000)
)
@unittest.skipIf(not PYTHON3, "only matches Python 3's UCA version")
@unittest.skipIf(V8_0_0, "not for UCA version 8.0.0")
@unittest.skipIf(V10_0_0, "not for UCA version 10.0.0")
def test_2(self):
self.assertEqual(
self.c.sort_key("\u0430\u0306\u0334"),
(0x1991, 0x0000, 0x0020, 0x004A, 0x0000, 0x0002, 0x0002, 0x0000)
)
@unittest.skipIf(not PYTHON3, "only matches Python 3's UCA version")
@unittest.skipIf(V8_0_0, "not for UCA version 8.0.0")
@unittest.skipIf(V10_0_0, "not for UCA version 10.0.0")
def test_3(self):
self.assertEqual(
self.c.sort_key("\u0FB2\u0F71\u0001\u0F80\u0061"),
(0x2571, 0x2587, 0x258A, 0x15EB, 0x0000, 0x0020, 0x0020, 0x0020,
0x0020, 0x0000, 0x0002, 0x0002, 0x0002, 0x0002, 0x0000)
)
@unittest.skipIf(not PYTHON3, "only matches Python 3's UCA version")
@unittest.skipIf(V8_0_0, "not for UCA version 8.0.0")
@unittest.skipIf(V10_0_0, "not for UCA version 10.0.0")
def test_4(self):
self.assertEqual(
self.c.sort_key("\u4E00\u0021"),
(0xFB40, 0xCE00, 0x025D, 0x0000, 0x0020,
0x0020, 0x0000, 0x0002, 0x0002, 0x0000)
)
@unittest.skipIf(not PYTHON3, "only matches Python 3's UCA version")
@unittest.skipIf(V8_0_0, "not for UCA version 8.0.0")
@unittest.skipIf(V10_0_0, "not for UCA version 10.0.0")
def test_5(self):
self.assertEqual(
self.c.sort_key("\u3400\u0021"),
(0xFB80, 0xB400, 0x025D, 0x0000, 0x0020,
0x0020, 0x0000, 0x0002, 0x0002, 0x0000)
)
@unittest.skipIf(PYTHON3, "only matches the older Python 2's UCA version")
def test_1_old(self):
self.assertEqual(
self.c.sort_key("\u0332\u0334"),
(0x0000, 0x007C, 0x0021, 0x0000, 0x0002, 0x0002, 0x0000)
)
@unittest.skipIf(PYTHON3, "only matches the older Python 2's UCA version")
def test_2_old(self):
self.assertEqual(
self.c.sort_key("\u0430\u0306\u0334"),
(0x15B0, 0x0000, 0x0020, 0x007C, 0x0000, 0x0002, 0x0002, 0x0000)
)
@unittest.skipIf(PYTHON3, "only matches the older Python 2's UCA version")
def test_3_old(self):
self.assertEqual(
self.c.sort_key("\u0FB2\u0F71\u0001\u0F80\u0061"),
(0x205B, 0x206D, 0x2070, 0x120F, 0x0000, 0x0020, 0x0020, 0x0020,
0x0020, 0x0000, 0x0002, 0x0002, 0x0002, 0x0002, 0x0000)
)
@unittest.skipIf(PYTHON3, "only matches the older Python 2's UCA version")
def test_4_old(self):
self.assertEqual(
self.c.sort_key("\u4E00\u0021"),
(0xFB40, 0xCE00, 0x026E, 0x0000, 0x0020,
0x0020, 0x0000, 0x0002, 0x0002, 0x0000)
)
@unittest.skipIf(PYTHON3, "only matches the older Python 2's UCA version")
def test_5_old(self):
self.assertEqual(
self.c.sort_key("\u3400\u0021"),
(0xFB80, 0xB400, 0x026E, 0x0000, 0x0020,
0x0020, 0x0000, 0x0002, 0x0002, 0x0000)
)
class FromFullTestV8_0_0(unittest.TestCase):
def __init__(self, *args, **kwargs):
from pyuca.collator import Collator_8_0_0
super(FromFullTestV8_0_0, self).__init__(*args, **kwargs)
self.c = Collator_8_0_0()
@unittest.skipIf(not V8_0_0, "only matches UCA version 8.0.0")
def test_1(self):
from pyuca.utils import format_sort_key
self.assertEqual(
format_sort_key(self.c.sort_key("\u9FD5\u0062")),
"FB41 9FD5 1BDB | 0020 0020 | 0002 0002 |",
)
@unittest.skipIf(not V8_0_0, "only matches UCA version 8.0.0")
def test_2(self):
from pyuca.utils import format_sort_key
self.assertEqual(
format_sort_key(self.c.sort_key("\U0002CEA1\u0062")),
"FB85 CEA1 1BDB | 0020 0020 | 0002 0002 |",
)
@unittest.skipIf(not V8_0_0, "only matches UCA version 8.0.0")
def test_3(self):
from pyuca.utils import format_sort_key
self.assertEqual(
format_sort_key(self.c.sort_key("\U0002B81E\u0062")),
"FBC5 B81E 1BDB | 0020 0020 | 0002 0002 |",
)
@unittest.skipIf(not V8_0_0, "only matches UCA version 8.0.0")
def test_4(self):
from pyuca.utils import format_sort_key
self.assertEqual(
format_sort_key(self.c.sort_key("\U0002CEA2\u0021")),
"FBC5 CEA2 025F | 0020 0020 | 0002 0002 |",
)
class FromFullTestV10_0_0(unittest.TestCase):
def __init__(self, *args, **kwargs):
from pyuca.collator import Collator_10_0_0
super(FromFullTestV10_0_0, self).__init__(*args, **kwargs)
self.c = Collator_10_0_0()
@unittest.skipIf(not V10_0_0, "only matches UCA version 10.0.0")
def test_1(self):
from pyuca.utils import format_sort_key
self.assertEqual(
format_sort_key(self.c.sort_key("\u1DF6\u0334")),
"| 004A 0033 | 0002 0002 |",
)
@unittest.skipIf(not V10_0_0, "only matches UCA version 10.0.0")
def test_2(self):
from pyuca.utils import format_sort_key
self.assertEqual(
format_sort_key(self.c.sort_key("\u9FEA\u0062")),
"FB41 9FEA 1CC6 | 0020 0020 | 0002 0002 |",
)
if __name__ == "__main__":
unittest.main()
| mit | -2,176,341,617,226,708,500 | 32.596708 | 78 | 0.578393 | false |
amitjamadagni/sympy | sympy/combinatorics/tests/test_partitions.py | 3 | 3304 | from sympy.combinatorics.partitions import (Partition, IntegerPartition,
RGS_enum, RGS_unrank, RGS_rank,
random_integer_partition)
from sympy.utilities.pytest import raises
from sympy.utilities.iterables import default_sort_key, partitions
def test_partition():
from sympy.abc import x
raises(ValueError, lambda: Partition(range(3)))
raises(ValueError, lambda: Partition([[1, 1, 2]]))
a = Partition([[1, 2, 3], [4]])
b = Partition([[1, 2], [3, 4]])
c = Partition([[x]])
l = [a, b, c]
l.sort(key=default_sort_key)
assert l == [c, a, b]
l.sort(key=lambda w: default_sort_key(w, order='rev-lex'))
assert l == [c, a, b]
assert (a == b) is False
assert a <= b
assert (a > b) is False
assert a != b
assert (a + 2).partition == [[1, 2], [3, 4]]
assert (b - 1).partition == [[1, 2, 4], [3]]
assert (a - 1).partition == [[1, 2, 3, 4]]
assert (a + 1).partition == [[1, 2, 4], [3]]
assert (b + 1).partition == [[1, 2], [3], [4]]
assert a.rank == 1
assert b.rank == 3
assert a.RGS == (0, 0, 0, 1)
assert b.RGS == (0, 0, 1, 1)
def test_integer_partition():
# no zeros in partition
raises(ValueError, lambda: IntegerPartition(range(3)))
# check fails since 1 + 2 != 100
raises(ValueError, lambda: IntegerPartition(100, range(1, 3)))
a = IntegerPartition(8, [1, 3, 4])
b = a.next_lex()
c = IntegerPartition([1, 3, 4])
d = IntegerPartition(8, {1: 3, 3: 1, 2: 1})
assert a == c
assert a.integer == d.integer
assert a.conjugate == [3, 2, 2, 1]
assert (a == b) is False
assert a <= b
assert (a > b) is False
assert a != b
for i in range(1, 11):
next = set()
prev = set()
a = IntegerPartition([i])
ans = set([IntegerPartition(p) for p in partitions(i)])
n = len(ans)
for j in range(n):
next.add(a)
a = a.next_lex()
IntegerPartition(i, a.partition) # check it by giving i
for j in range(n):
prev.add(a)
a = a.prev_lex()
IntegerPartition(i, a.partition) # check it by giving i
assert next == ans
assert prev == ans
assert IntegerPartition([1, 2, 3]).as_ferrers() == '###\n##\n#'
assert IntegerPartition([1, 1, 3]).as_ferrers('o') == 'ooo\no\no'
assert str(IntegerPartition([1, 1, 3])) == '[3, 1, 1]'
assert IntegerPartition([1, 1, 3]).partition == [3, 1, 1]
raises(ValueError, lambda: random_integer_partition(-1))
assert random_integer_partition(1) == [1]
assert random_integer_partition(10, seed=[1, 3, 2, 1, 5, 1]
) == [5, 2, 1, 1, 1]
def test_rgs():
raises(ValueError, lambda: RGS_unrank(-1, 3))
raises(ValueError, lambda: RGS_unrank(3, 0))
raises(ValueError, lambda: RGS_unrank(10, 1))
raises(ValueError, lambda: Partition.from_rgs(range(3), range(2)))
raises(ValueError, lambda: Partition.from_rgs(range(1, 3), range(2)))
assert RGS_enum(-1) == 0
assert RGS_enum(1) == 1
assert RGS_unrank(7, 5) == [0, 0, 1, 0, 2]
assert RGS_unrank(23, 14) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 2]
assert RGS_rank(RGS_unrank(40, 100)) == 40
| bsd-3-clause | 5,293,114,591,856,392,000 | 32.714286 | 75 | 0.548729 | false |
tsdmgz/ansible | lib/ansible/modules/network/nxos/nxos_snapshot.py | 1 | 14173 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_snapshot
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manage snapshots of the running states of selected features.
description:
- Create snapshots of the running states of selected features, add
new show commands for snapshot creation, delete and compare
existing snapshots.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(transport=cli) may cause timeout errors.
- The C(element_key1) and C(element_key2) parameter specify the tags used
to distinguish among row entries. In most cases, only the element_key1
parameter needs to specified to be able to distinguish among row entries.
- C(action=compare) will always store a comparison report on a local file.
options:
action:
description:
- Define what snapshot action the module would perform.
required: true
choices: ['create','add','compare','delete']
snapshot_name:
description:
- Snapshot name, to be used when C(action=create)
or C(action=delete).
required: false
default: null
description:
description:
- Snapshot description to be used when C(action=create).
required: false
default: null
snapshot1:
description:
- First snapshot to be used when C(action=compare).
required: false
default: null
snapshot2:
description:
- Second snapshot to be used when C(action=compare).
required: false
default: null
comparison_results_file:
description:
- Name of the file where snapshots comparison will be store.
required: false
default: null
compare_option:
description:
- Snapshot options to be used when C(action=compare).
required: false
default: null
choices: ['summary','ipv4routes','ipv6routes']
section:
description:
- Used to name the show command output, to be used
when C(action=add).
required: false
default: null
show_command:
description:
- Specify a new show command, to be used when C(action=add).
required: false
default: null
row_id:
description:
- Specifies the tag of each row entry of the show command's
XML output, to be used when C(action=add).
required: false
default: null
element_key1:
description:
- Specify the tags used to distinguish among row entries,
to be used when C(action=add).
required: false
default: null
element_key2:
description:
- Specify the tags used to distinguish among row entries,
to be used when C(action=add).
required: false
default: null
save_snapshot_locally:
description:
- Specify to locally store a new created snapshot,
to be used when C(action=create).
required: false
default: false
choices: ['true','false']
path:
description:
- Specify the path of the file where new created snapshot or
snapshots comparison will be stored, to be used when
C(action=create) and C(save_snapshot_locally=true) or
C(action=compare).
required: false
default: './'
'''
EXAMPLES = '''
# Create a snapshot and store it locally
- nxos_snapshot:
action: create
snapshot_name: test_snapshot
description: Done with Ansible
save_snapshot_locally: true
path: /home/user/snapshots/
# Delete a snapshot
- nxos_snapshot:
action: delete
snapshot_name: test_snapshot
# Delete all existing snapshots
- nxos_snapshot:
action: delete_all
# Add a show command for snapshots creation
- nxos_snapshot:
section: myshow
show_command: show ip interface brief
row_id: ROW_intf
element_key1: intf-name
# Compare two snapshots
- nxos_snapshot:
action: compare
snapshot1: pre_snapshot
snapshot2: post_snapshot
comparison_results_file: compare_snapshots.txt
compare_option: summary
path: '../snapshot_reports/'
'''
RETURN = '''
commands:
description: commands sent to the device
returned: verbose mode
type: list
sample: ["snapshot create post_snapshot Post-snapshot"]
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
def execute_show_command(command, module):
command = [{
'command': command,
'output': 'text',
}]
return run_commands(module, command)
def get_existing(module):
existing = []
command = 'show snapshots'
body = execute_show_command(command, module)[0]
if body:
split_body = body.splitlines()
snapshot_regex = (r'(?P<name>\S+)\s+(?P<date>\w+\s+\w+\s+\d+\s+\d+'
r':\d+:\d+\s+\d+)\s+(?P<description>.*)')
for snapshot in split_body:
temp = {}
try:
match_snapshot = re.match(snapshot_regex, snapshot, re.DOTALL)
snapshot_group = match_snapshot.groupdict()
temp['name'] = snapshot_group['name']
temp['date'] = snapshot_group['date']
temp['description'] = snapshot_group['description']
existing.append(temp)
except AttributeError:
pass
return existing
def action_create(module, existing_snapshots):
commands = list()
exist = False
for snapshot in existing_snapshots:
if module.params['snapshot_name'] == snapshot['name']:
exist = True
if exist is False:
commands.append('snapshot create {0} {1}'.format(
module.params['snapshot_name'], module.params['description']))
return commands
def action_add(module, existing_snapshots):
commands = list()
command = 'show snapshot sections'
sections = []
body = execute_show_command(command, module)[0]
if body:
section_regex = r'.*\[(?P<section>\S+)\].*'
split_body = body.split('\n\n')
for section in split_body:
temp = {}
for line in section.splitlines():
try:
match_section = re.match(section_regex, section, re.DOTALL)
temp['section'] = match_section.groupdict()['section']
except (AttributeError, KeyError):
pass
if 'show command' in line:
temp['show_command'] = line.split('show command: ')[1]
elif 'row id' in line:
temp['row_id'] = line.split('row id: ')[1]
elif 'key1' in line:
temp['element_key1'] = line.split('key1: ')[1]
elif 'key2' in line:
temp['element_key2'] = line.split('key2: ')[1]
if temp:
sections.append(temp)
proposed = {
'section': module.params['section'],
'show_command': module.params['show_command'],
'row_id': module.params['row_id'],
'element_key1': module.params['element_key1'],
'element_key2': module.params['element_key2'] or '-',
}
if proposed not in sections:
if module.params['element_key2']:
commands.append('snapshot section add {0} "{1}" {2} {3} {4}'.format(
module.params['section'], module.params['show_command'],
module.params['row_id'], module.params['element_key1'],
module.params['element_key2']))
else:
commands.append('snapshot section add {0} "{1}" {2} {3}'.format(
module.params['section'], module.params['show_command'],
module.params['row_id'], module.params['element_key1']))
return commands
def action_compare(module, existing_snapshots):
command = 'show snapshot compare {0} {1}'.format(
module.params['snapshot1'], module.params['snapshot2'])
if module.params['compare_option']:
command += ' {0}'.format(module.params['compare_option'])
body = execute_show_command(command, module)[0]
return body
def action_delete(module, existing_snapshots):
commands = list()
exist = False
for snapshot in existing_snapshots:
if module.params['snapshot_name'] == snapshot['name']:
exist = True
if exist:
commands.append('snapshot delete {0}'.format(
module.params['snapshot_name']))
return commands
def action_delete_all(module, existing_snapshots):
commands = list()
if existing_snapshots:
commands.append('snapshot delete all')
return commands
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_snapshot(module):
command = 'show snapshot dump {0}'.format(module.params['snapshot_name'])
body = execute_show_command(command, module)[0]
return body
def write_on_file(content, filename, module):
path = module.params['path']
if path[-1] != '/':
path += '/'
filepath = '{0}{1}'.format(path, filename)
try:
report = open(filepath, 'w')
report.write(content)
report.close()
except:
module.fail_json(msg="Error while writing on file.")
return filepath
def main():
argument_spec = dict(
action=dict(required=True, choices=['create', 'add', 'compare', 'delete', 'delete_all']),
snapshot_name=dict(type='str'),
description=dict(type='str'),
snapshot1=dict(type='str'),
snapshot2=dict(type='str'),
compare_option=dict(choices=['summary', 'ipv4routes', 'ipv6routes']),
comparison_results_file=dict(type='str'),
section=dict(type='str'),
show_command=dict(type='str'),
row_id=dict(type='str'),
element_key1=dict(type='str'),
element_key2=dict(type='str'),
save_snapshot_locally=dict(type='bool', default=False),
path=dict(type='str', default='./')
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
action = module.params['action']
comparison_results_file = module.params['comparison_results_file']
CREATE_PARAMS = ['snapshot_name', 'description']
ADD_PARAMS = ['section', 'show_command', 'row_id', 'element_key1']
COMPARE_PARAMS = ['snapshot1', 'snapshot2', 'comparison_results_file']
if not os.path.isdir(module.params['path']):
module.fail_json(msg='{0} is not a valid directory name.'.format(
module.params['path']))
if action == 'create':
for param in CREATE_PARAMS:
if not module.params[param]:
module.fail_json(msg='snapshot_name and description are '
'required when action=create')
elif action == 'add':
for param in ADD_PARAMS:
if not module.params[param]:
module.fail_json(msg='section, show_command, row_id '
'and element_key1 are required '
'when action=add')
elif action == 'compare':
for param in COMPARE_PARAMS:
if not module.params[param]:
module.fail_json(msg='snapshot1 and snapshot2 are required '
'when action=create')
elif action == 'delete' and not module.params['snapshot_name']:
module.fail_json(msg='snapshot_name is required when action=delete')
existing_snapshots = invoke('get_existing', module)
action_results = invoke('action_%s' % action, module, existing_snapshots)
result = {'changed': False, 'commands': []}
if not module.check_mode:
if action == 'compare':
result['commands'] = []
if module.params['path'] and comparison_results_file:
snapshot1 = module.params['snapshot1']
snapshot2 = module.params['snapshot2']
compare_option = module.params['compare_option']
command = 'show snapshot compare {0} {1} {2}'.format(snapshot1, snapshot2, compare_option)
content = execute_show_command(command, module)[0]
if content:
write_on_file(content, comparison_results_file, module)
else:
if action_results:
load_config(module, action_results)
result['commands'] = action_results
result['changed'] = True
if action == 'create' and module.params['path']:
command = 'show snapshot | include {}'.format(module.params['snapshot_name'])
content = execute_show_command(command, module)[0]
if content:
write_on_file(content, module.params['snapshot_name'], module)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 292,380,842,566,198,300 | 32.426887 | 106 | 0.596133 | false |
jacquerie/inspire-next | inspirehep/modules/workflows/workflows/article.py | 1 | 12747 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Workflow for processing single arXiv records harvested."""
from __future__ import absolute_import, division, print_function
from workflow.patterns.controlflow import (
IF,
IF_NOT,
IF_ELSE,
)
from inspirehep.modules.workflows.tasks.refextract import extract_journal_info
from inspirehep.modules.workflows.tasks.arxiv import (
arxiv_author_list,
arxiv_package_download,
arxiv_plot_extract,
arxiv_derive_inspire_categories,
populate_arxiv_document,
)
from inspirehep.modules.workflows.tasks.actions import (
add_core,
download_documents,
error_workflow,
fix_submission_number,
halt_record,
is_arxiv_paper,
is_experimental_paper,
is_marked,
is_record_accepted,
is_record_relevant,
is_submission,
mark,
normalize_journal_titles,
populate_journal_coverage,
populate_submission_document,
refextract,
reject_record,
save_workflow,
set_refereed_and_fix_document_type,
validate_record,
)
from inspirehep.modules.workflows.tasks.classifier import (
classify_paper,
filter_core_keywords,
)
from inspirehep.modules.workflows.tasks.beard import guess_coreness
from inspirehep.modules.workflows.tasks.magpie import (
guess_keywords,
guess_categories,
guess_experiments,
)
from inspirehep.modules.workflows.tasks.matching import (
stop_processing,
match_non_completed_wf_in_holdingpen,
match_previously_rejected_wf_in_holdingpen,
exact_match,
fuzzy_match,
is_fuzzy_match_approved,
set_exact_match_as_approved_in_extradata,
set_fuzzy_match_approved_in_extradata,
previously_rejected,
has_same_source,
stop_matched_holdingpen_wfs,
auto_approve,
set_core_in_extra_data,
)
from inspirehep.modules.workflows.tasks.upload import store_record, set_schema
from inspirehep.modules.workflows.tasks.submission import (
close_ticket,
create_ticket,
filter_keywords,
prepare_keywords,
remove_references,
reply_ticket,
send_robotupload,
wait_webcoll,
)
from inspirehep.modules.literaturesuggest.tasks import (
curation_ticket_needed,
reply_ticket_context,
new_ticket_context,
curation_ticket_context,
)
NOTIFY_SUBMISSION = [
create_ticket(
template="literaturesuggest/tickets/curator_submitted.html",
queue="HEP_add_user",
context_factory=new_ticket_context,
ticket_id_key="ticket_id"
),
reply_ticket(
template="literaturesuggest/tickets/user_submitted.html",
context_factory=reply_ticket_context,
keep_new=True
),
]
CHECK_AUTO_APPROVE = [
IF_ELSE(
is_submission,
mark('auto-approved', False),
IF_ELSE(
auto_approve,
[
mark('auto-approved', True),
set_core_in_extra_data,
],
mark('auto-approved', False),
),
),
]
ENHANCE_RECORD = [
IF(
is_arxiv_paper,
[
populate_arxiv_document,
arxiv_package_download,
arxiv_plot_extract,
arxiv_derive_inspire_categories,
arxiv_author_list("authorlist2marcxml.xsl"),
]
),
IF(
is_submission,
populate_submission_document,
),
download_documents,
refextract,
normalize_journal_titles,
extract_journal_info,
populate_journal_coverage,
classify_paper(
taxonomy="HEPont.rdf",
only_core_tags=False,
spires=True,
with_author_keywords=True,
),
filter_core_keywords,
guess_categories,
IF(
is_experimental_paper,
guess_experiments,
),
guess_keywords,
guess_coreness,
]
NOTIFY_NOT_ACCEPTED = [
IF(
is_submission,
reply_ticket(context_factory=reply_ticket_context),
)
]
NOTIFY_ALREADY_EXISTING = [
reject_record('Article was already found on INSPIRE'),
mark('approved', False),
reply_ticket(
template=(
"literaturesuggest/tickets/"
"user_rejected_exists.html"
),
context_factory=reply_ticket_context
),
close_ticket(ticket_id_key="ticket_id"),
save_workflow,
stop_processing,
]
NOTIFY_ACCEPTED = [
IF(
is_submission,
reply_ticket(
template='literaturesuggest/tickets/user_accepted.html',
context_factory=reply_ticket_context,
),
),
]
NOTIFY_CURATOR_IF_CORE = [
IF_NOT(
is_marked('is-update'),
IF(
curation_ticket_needed,
create_ticket(
template='literaturesuggest/tickets/curation_core.html',
queue='HEP_curation',
context_factory=curation_ticket_context,
ticket_id_key='curation_ticket_id',
),
),
),
]
POSTENHANCE_RECORD = [
add_core,
filter_keywords,
prepare_keywords,
remove_references,
set_refereed_and_fix_document_type,
fix_submission_number,
]
SEND_TO_LEGACY = [
IF_ELSE(
is_marked('is-update'),
[
# TODO: once we have the merger in place
# send_robotupload(mode="replace")
mark('skipped-robot-upload', True)
],
[
send_robotupload(mode="replace"),
]
),
]
WAIT_FOR_LEGACY_WEBCOLL = [
IF_NOT(
is_marked('is-update'),
wait_webcoll,
),
]
STOP_IF_EXISTING_SUBMISSION = [
IF(
is_submission,
IF(
is_marked('is-update'),
NOTIFY_ALREADY_EXISTING
)
)
]
HALT_FOR_APPROVAL_IF_NEW_OR_STOP_IF_NOT_RELEVANT = [
IF_NOT(
is_record_relevant,
[
reject_record('Article automatically rejected'),
mark('approved', False),
save_workflow,
stop_processing,
],
),
IF_ELSE(
is_marked('is-update'),
[
mark('approved', True)
],
IF_ELSE(
is_marked('auto-approved'),
mark('approved', True),
halt_record(
action="hep_approval",
message="Submission halted for curator approval.",
)
),
),
]
STORE_RECORD = [
IF_ELSE(
is_marked('is-update'),
mark('skipped-store-record', True),
store_record,
)
]
MARK_IF_MATCH_IN_HOLDINGPEN = [
IF_ELSE(
match_non_completed_wf_in_holdingpen,
[
mark('already-in-holding-pen', True),
save_workflow,
],
mark('already-in-holding-pen', False),
),
IF_ELSE(
match_previously_rejected_wf_in_holdingpen,
[
mark('previously_rejected', True),
save_workflow,
],
mark('previously_rejected', False),
)
]
ERROR_WITH_UNEXPECTED_WORKFLOW_PATH = [
mark('unexpected-workflow-path', True),
error_workflow('Unexpected workflow path.'),
save_workflow,
]
# Currently we handle harvests as if all were arxiv, that will have to change.
PROCESS_HOLDINGPEN_MATCH_HARVEST = [
IF_NOT(
is_marked('is-update'),
IF(
is_marked('previously_rejected'),
IF_NOT(
is_marked('auto-approved'),
IF(
has_same_source('previously_rejected_matches'),
[
mark('approved', False), # auto-reject
save_workflow,
stop_processing,
],
)
),
),
),
IF_ELSE(
is_marked('already-in-holding-pen'),
IF_ELSE(
has_same_source('holdingpen_matches'),
# stop the matched wf and continue this one
[
stop_matched_holdingpen_wfs,
mark('stopped-matched-holdingpen-wf', True),
],
[
# else, it's an update from another source
# keep the old one
mark('stopped-matched-holdingpen-wf', False),
save_workflow,
stop_processing
],
),
mark('stopped-matched-holdingpen-wf', False),
),
save_workflow,
]
PROCESS_HOLDINGPEN_MATCH_SUBMISSION = [
IF(
is_marked('already-in-holding-pen'),
IF_ELSE(
has_same_source('holdingpen_matches'),
# form should detect this double submission
ERROR_WITH_UNEXPECTED_WORKFLOW_PATH,
# stop the matched wf and continue this one
[
stop_matched_holdingpen_wfs,
mark('stopped-matched-holdingpen-wf', True),
save_workflow
],
)
)
]
PROCESS_HOLDINGPEN_MATCHES = [
IF_ELSE(
is_submission,
PROCESS_HOLDINGPEN_MATCH_SUBMISSION,
PROCESS_HOLDINGPEN_MATCH_HARVEST,
)
]
CHECK_IS_UPDATE = [
IF_ELSE(
exact_match,
[
set_exact_match_as_approved_in_extradata,
mark('is-update', True),
mark('exact-matched', True),
],
IF_ELSE(
fuzzy_match,
[
halt_record(
action="match_approval",
message="Halted for matching approval.",
),
IF_ELSE(
is_fuzzy_match_approved,
[
set_fuzzy_match_approved_in_extradata,
mark('fuzzy-matched', True),
mark('is-update', True),
],
mark('is-update', False),
)
],
mark('is-update', False),
)
),
save_workflow,
]
STOP_IF_TOO_OLD = [
# checks to perform only for harvested records
IF_ELSE(
is_submission,
[
mark('too-many-days', False),
],
[
IF_ELSE(
previously_rejected(),
[
mark('too-many-days', True),
save_workflow,
stop_processing,
],
mark('too-many-days', False),
),
]
),
save_workflow,
]
NOTIFY_IF_SUBMISSION = [
IF(
is_submission,
NOTIFY_SUBMISSION,
)
]
INIT_MARKS = [
mark('too-many-days', None),
mark('auto-approved', None),
mark('already-in-holding-pen', None),
mark('previously_rejected', None),
mark('is-update', None),
mark('stopped-matched-holdingpen-wf', None),
mark('approved', None),
mark('unexpected-workflow-path', None),
save_workflow
]
PRE_PROCESSING = [
# Make sure schema is set for proper indexing in Holding Pen
set_schema,
INIT_MARKS,
validate_record('hep')
]
class Article(object):
"""Article ingestion workflow for Literature collection."""
name = "HEP"
data_type = "hep"
workflow = (
PRE_PROCESSING +
NOTIFY_IF_SUBMISSION +
MARK_IF_MATCH_IN_HOLDINGPEN +
CHECK_IS_UPDATE +
STOP_IF_EXISTING_SUBMISSION +
CHECK_AUTO_APPROVE +
PROCESS_HOLDINGPEN_MATCHES +
ENHANCE_RECORD +
HALT_FOR_APPROVAL_IF_NEW_OR_STOP_IF_NOT_RELEVANT +
[
IF_ELSE(
is_record_accepted,
(
POSTENHANCE_RECORD +
STORE_RECORD +
SEND_TO_LEGACY +
WAIT_FOR_LEGACY_WEBCOLL +
NOTIFY_ACCEPTED +
NOTIFY_CURATOR_IF_CORE
),
NOTIFY_NOT_ACCEPTED,
),
IF(
is_submission,
close_ticket(ticket_id_key="ticket_id"),
)
]
)
| gpl-3.0 | 1,027,301,321,972,007,800 | 23.372849 | 78 | 0.556915 | false |
kwikteam/phy | phy/plot/gloo/array.py | 1 | 2303 | # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
Vertex Array objects are OpenGL objects that store all of the state needed
to supply vertex data. Only available from GL > 3.2.
Read more on buffer objects on `OpenGL Wiki
<https://www.opengl.org/wiki/Vertex_Specification>`_
**Example usage**:
.. code:: python
dtype = [("position", np.float32, 3),
("color", np.float32, 4)]
V = np.zeros(4,dtype).view(gloo.VertexArray)
"""
import logging
import numpy as np
from . import gl
from .gpudata import GPUData
from .globject import GLObject
from .buffer import VertexBuffer
log = logging.getLogger(__name__)
class VertexArray(GPUData, GLObject):
"""
Vertex array.
A vertex array is an interface used to specify vertex data structure.
"""
def __init__(self, usage=gl.GL_DYNAMIC_DRAW):
GLObject.__init__(self)
self._target = gl.GL_ARRAY_BUFFER
self._buffer = self.view(VertexBuffer)
self._buffer.__init__(usage)
@property
def need_update(self):
""" Whether object needs to be updated """
return self._buffer.need_update
def _update(self):
""" Upload all pending data to GPU. """
self._buffer._update()
def _create(self):
""" Create vertex array on GPU """
self._handle = gl.glGenVertexArrays(1)
log.debug("GPU: Creating vertex array (id=%d)" % self._id)
self._deactivate()
self._buffer._create()
def _delete(self):
""" Delete vertex array from GPU """
if self._handle > -1:
self._buffer._delete()
gl.glDeleteVertexArrays(1, np.array([self._handle]))
def _activate(self):
""" Bind the array """
log.debug("GPU: Activating array (id=%d)" % self._id)
gl.glBindVertexArray(self._handle)
self._buffer._activate()
def _deactivate(self):
""" Unbind the current bound array """
self._buffer._deactivate()
log.debug("GPU: Deactivating array (id=%d)" % self._id)
gl.glBindVertexArray(0)
| bsd-3-clause | 7,233,584,848,580,602,000 | 26.094118 | 79 | 0.570994 | false |
jimrybarski/fylm | fylm/model/workunit.py | 1 | 1243 | class WorkUnit(object):
def __init__(self, model, required_data_sources, dependencies=None):
"""
:param model: the data structure that is being populated with data
:param required_data_sources: one of: "nd2", "imagereader"
:type required_data_sources: str
:param dependencies: models that must be completed before this one can be started
:type dependencies: str
"""
self._model = model
self._required_data_sources = required_data_sources
self._dependencies = dependencies
self._model_sources = {}
def add_model_source(self, model_source):
self._model_sources[model_source.name] = model_source
@property
def required_data_sources(self):
return self._required_data_sources
@property
def is_complete(self):
return self._model.is_complete
@property
def name(self):
return self._model.name
def dependencies_satisfied(self, completed_work_units):
completed_names = [work_unit.name for work_unit in completed_work_units]
for dependency in self._dependencies:
if dependency not in completed_names:
return False
return True | mit | 7,536,287,025,366,467,000 | 32.621622 | 92 | 0.635559 | false |
ibusybox/algorithm | src/python/test_sort.py | 1 | 3366 | #!/usr/bin/env python
#coding: utf-8
import unittest
import sort
class TestInsertionSort(unittest.TestCase):
def setUp(self):
print "start insertion sort test"
def tearDown(self):
print "end insertion sort test"
def test_case01(self):
A = [3,2,1]
print "origin A=%s" % A
sort.insertion_sort(A)
print "insertion sorted A=%s" % A
self.assertTrue([1,2,3] == A)
def test_case02(self):
A = [3,1]
print "origin A=%s" % A
sort.insertion_sort(A)
print "insertion sorted A=%s" % A
self.assertTrue([1,3] == A)
def test_case03(self):
A = [3,4,5,2,1]
print "origin A=%s" % A
sort.insertion_sort(A)
print "insertion sorted A=%s" % A
self.assertTrue([1,2,3,4,5] == A)
class TestSelectionSort(unittest.TestCase):
def setUp(self):
print "start selection sort test"
def tearDown(self):
print "end selection sort test"
def test_case01(self):
A = [3,2,1]
print "origin A=%s" % A
sort.selection_sort(A)
print "selection sorted A=%s" % A
self.assertTrue([1,2,3] == A)
def test_case02(self):
A = [3,1]
print "origin A=%s" % A
sort.selection_sort(A)
print "selection sorted A=%s" % A
self.assertTrue([1,3] == A)
def test_case03(self):
A = [3,4,5,2,1]
print "origin A=%s" % A
sort.selection_sort(A)
print "selection sorted A=%s" % A
self.assertTrue([1,2,3,4,5] == A)
class TestBubbleSort(unittest.TestCase):
def setUp(self):
print "start bubble sort test"
def tearDown(self):
print "end bubble sort test"
def test_case01(self):
A = [3,2,1]
print "origin A=%s" % A
sort.bubble_sort(A)
print "bubble sorted A=%s" % A
self.assertTrue([1,2,3] == A)
def test_case02(self):
A = [3,1]
print "origin A=%s" % A
sort.bubble_sort(A)
print "bubble sorted A=%s" % A
self.assertTrue([1,3] == A)
def test_case03(self):
A = [3,4,5,2,1]
print "origin A=%s" % A
sort.bubble_sort(A)
print "bubble sorted A=%s" % A
self.assertTrue([1,2,3,4,5] == A)
class TestMergeSort(unittest.TestCase):
def setUp(self):
print "start merge sort test"
def tearDown(self):
print "end merge sort test"
def test_case01(self):
A = [3,2,1]
print "origin A=%s" % A
sort.merge_sort(A, 0, len(A))
print "merge sorted A=%s" % A
self.assertTrue([1,2,3] == A)
def test_case02(self):
A = [3,1]
print "origin A=%s" % A
sort.merge_sort(A, 0, len(A))
print "merge sorted A=%s" % A
self.assertTrue([1,3] == A)
def test_case03(self):
A = [3,4,5,2,1]
print "origin A=%s" % A
sort.merge_sort(A, 0, len(A))
print "merge sorted A=%s" % A
self.assertTrue([1,2,3,4,5] == A)
if __name__ == '__main__':
suit = unittest.TestSuite()
suit.addTest(unittest.makeSuite(TestSelectionSort))
suit.addTest(unittest.makeSuite(TestInsertionSort))
suit.addTest(unittest.makeSuite(TestBubbleSort))
suit.addTest(unittest.makeSuite(TestMergeSort))
runner = unittest.TextTestRunner()
runner.run(suit)
| apache-2.0 | -3,799,206,955,780,221,400 | 29.324324 | 55 | 0.549614 | false |
davebrent/consyn | consyn/cli/add.py | 1 | 3643 | # -*- coding: utf-8 -*-
# Copyright (C) 2014, David Poulter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
import os
import time
import click
try:
from glob2 import glob
except ImportError:
from glob import glob
from . import configurator
from ..commands import add_mediafile
from ..commands import remove_mediafile
from ..models import MediaFile
@click.command("add", short_help="Add a mediafile to a database.")
@click.option("--force", is_flag=True, default=False,
help="Overwrite file(s) if already exists.")
@click.option("--bufsize", default=1024,
help="Buffer size in samples.")
@click.option("--hopsize", default=512,
help="Hopsize in samples.")
@click.option("--onset-threshold", default=0.3,
help="Aubio onset threshold.")
@click.option("--onset-method", default="default",
help="Aubio onset threshold.")
@click.argument("files", nargs=-1)
@configurator
def command(config, files, force, bufsize, hopsize, onset_threshold,
onset_method):
if len(files) == 1 and not os.path.isfile(files[0]):
files = glob(files[0])
duration = 0
failures = []
succeses = []
files = set(files)
start = time.time()
label = "Adding {} files".format(len(files))
with click.progressbar(files, label=label) as files:
for path in files:
if not os.path.isfile(path):
failures.append("File does not exist {}".format(path))
continue
exists = MediaFile.by_id_or_name(config.session, path)
if exists:
if force:
remove_mediafile(config.session, exists)
else:
failures.append("File has already been added".format(path))
continue
try:
mediafile = add_mediafile(config.session, path,
bufsize=bufsize,
hopsize=hopsize,
method=onset_method,
threshold=onset_threshold)
duration += mediafile.duration / mediafile.samplerate
config.session.commit()
succeses.append(path)
except StandardError:
failures.append("Unable to open file")
if len(succeses) > 0:
succ_str = "Successfully added {} files, ({}) in {}".format(
len(succeses), datetime.timedelta(seconds=duration),
datetime.timedelta(seconds=int(time.time() - start)))
click.secho(succ_str, fg="green")
if config.verbose:
for path in succeses:
click.secho(path, fg="green")
if len(failures) > 0:
fail_str = "Failed to add {} files".format(len(failures))
click.secho(fail_str, fg="red")
if config.verbose:
for path in failures:
click.secho(path, fg="red")
| gpl-3.0 | 3,481,227,380,055,677,000 | 34.715686 | 79 | 0.598957 | false |
HonzaKral/warehouse | tests/conftest.py | 1 | 4822 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import xmlrpc.client
import alembic.command
import click.testing
import psycopg2
import pyramid.testing
import pytest
import webtest as _webtest
from pytest_dbfixtures.factories.postgresql import (
init_postgresql_database, drop_postgresql_database,
)
from pytest_dbfixtures.utils import get_config
from sqlalchemy import event
from warehouse.config import configure
from .common.db import Session
def pytest_collection_modifyitems(items):
for item in items:
if not hasattr(item, "module"): # e.g.: DoctestTextfile
continue
module_path = os.path.relpath(
item.module.__file__,
os.path.commonprefix([__file__, item.module.__file__]),
)
module_root_dir = module_path.split(os.pathsep)[0]
if (module_root_dir.startswith("functional")):
item.add_marker(pytest.mark.functional)
elif module_root_dir.startswith("unit"):
item.add_marker(pytest.mark.unit)
else:
raise RuntimeError(
"Unknown test type (filename = {0})".format(module_path)
)
@pytest.fixture
def pyramid_request():
return pyramid.testing.DummyRequest()
@pytest.yield_fixture
def pyramid_config(pyramid_request):
with pyramid.testing.testConfig(request=pyramid_request) as config:
yield config
@pytest.yield_fixture
def cli():
runner = click.testing.CliRunner()
with runner.isolated_filesystem():
yield runner
@pytest.fixture(scope="session")
def database(request, postgresql_proc):
config = get_config(request)
pg_host = postgresql_proc.host
pg_port = postgresql_proc.port
pg_user = config.postgresql.user
pg_db = config.postgresql.db
# Create our Database.
init_postgresql_database(psycopg2, pg_user, pg_host, pg_port, pg_db)
# Ensure our database gets deleted.
@request.addfinalizer
def drop_database():
drop_postgresql_database(psycopg2, pg_user, pg_host, pg_port, pg_db)
return "postgresql://{}@{}:{}/{}".format(pg_user, pg_host, pg_port, pg_db)
@pytest.fixture
def app_config(database):
config = configure(
settings={
"warehouse.prevent_esi": True,
"warehouse.token": "insecure token",
"camo.url": "http://localhost:9000/",
"camo.key": "insecure key",
"celery.broker_url": "amqp://",
"celery.result_url": "redis://localhost:0/",
"database.url": database,
"docs.url": "http://docs.example.com/",
"download_stats.url": "redis://localhost:0/",
"files.backend": "warehouse.packaging.services.LocalFileStorage",
"sessions.secret": "123456",
"sessions.url": "redis://localhost:0/",
},
)
# Ensure our migrations have been ran.
alembic.command.upgrade(config.alembic_config(), "head")
return config
@pytest.yield_fixture
def db_session(app_config):
engine = app_config.registry["sqlalchemy.engine"]
conn = engine.connect()
trans = conn.begin()
session = Session(bind=conn)
# Start the session in a SAVEPOINT
session.begin_nested()
# Then each time that SAVEPOINT ends, reopen it
@event.listens_for(session, "after_transaction_end")
def restart_savepoint(session, transaction):
if transaction.nested and not transaction._parent.nested:
session.begin_nested()
try:
yield session
finally:
session.close()
Session.remove()
trans.rollback()
conn.close()
engine.dispose()
@pytest.fixture
def db_request(pyramid_request, db_session):
pyramid_request.db = db_session
return pyramid_request
class _TestApp(_webtest.TestApp):
def xmlrpc(self, path, method, *args):
body = xmlrpc.client.dumps(args, methodname=method)
resp = self.post(path, body, headers={"Content-Type": "text/xml"})
return xmlrpc.client.loads(resp.body)
@pytest.yield_fixture
def webtest(app_config):
# We want to disable anything that relies on TLS here.
app_config.add_settings(enforce_https=False)
try:
yield _TestApp(app_config.make_wsgi_app())
finally:
app_config.registry["sqlalchemy.engine"].dispose()
| apache-2.0 | -7,687,774,955,321,452,000 | 28.224242 | 78 | 0.660929 | false |
Mitali-Sodhi/CodeLingo | Dataset/python/test_engine.py | 1 | 7506 | """
Scrapy engine tests
This starts a testing web server (using twisted.server.Site) and then crawls it
with the Scrapy crawler.
To view the testing web server in a browser you can start it by running this
module with the ``runserver`` argument::
python test_engine.py runserver
"""
import sys, os, re, urlparse
from twisted.internet import reactor, defer
from twisted.web import server, static, util
from twisted.trial import unittest
from scrapy import signals
from scrapy.utils.test import get_crawler
from scrapy.xlib.pydispatch import dispatcher
from scrapy.tests import tests_datadir
from scrapy.spider import BaseSpider
from scrapy.item import Item, Field
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.http import Request
from scrapy.utils.signal import disconnect_all
class TestItem(Item):
name = Field()
url = Field()
price = Field()
class TestSpider(BaseSpider):
name = "scrapytest.org"
allowed_domains = ["scrapytest.org", "localhost"]
itemurl_re = re.compile("item\d+.html")
name_re = re.compile("<h1>(.*?)</h1>", re.M)
price_re = re.compile(">Price: \$(.*?)<", re.M)
def parse(self, response):
xlink = SgmlLinkExtractor()
itemre = re.compile(self.itemurl_re)
for link in xlink.extract_links(response):
if itemre.search(link.url):
yield Request(url=link.url, callback=self.parse_item)
def parse_item(self, response):
item = TestItem()
m = self.name_re.search(response.body)
if m:
item['name'] = m.group(1)
item['url'] = response.url
m = self.price_re.search(response.body)
if m:
item['price'] = m.group(1)
return item
def start_test_site(debug=False):
root_dir = os.path.join(tests_datadir, "test_site")
r = static.File(root_dir)
r.putChild("redirect", util.Redirect("/redirected"))
r.putChild("redirected", static.Data("Redirected here", "text/plain"))
port = reactor.listenTCP(0, server.Site(r), interface="127.0.0.1")
if debug:
print "Test server running at http://localhost:%d/ - hit Ctrl-C to finish." \
% port.getHost().port
return port
class CrawlerRun(object):
"""A class to run the crawler and keep track of events occurred"""
def __init__(self):
self.spider = None
self.respplug = []
self.reqplug = []
self.itemresp = []
self.signals_catched = {}
def run(self):
self.port = start_test_site()
self.portno = self.port.getHost().port
start_urls = [self.geturl("/"), self.geturl("/redirect")]
self.spider = TestSpider(start_urls=start_urls)
for name, signal in vars(signals).items():
if not name.startswith('_'):
dispatcher.connect(self.record_signal, signal)
self.crawler = get_crawler()
self.crawler.install()
self.crawler.configure()
self.crawler.signals.connect(self.item_scraped, signals.item_scraped)
self.crawler.signals.connect(self.request_scheduled, signals.request_scheduled)
self.crawler.signals.connect(self.response_downloaded, signals.response_downloaded)
self.crawler.crawl(self.spider)
self.crawler.start()
self.deferred = defer.Deferred()
dispatcher.connect(self.stop, signals.engine_stopped)
return self.deferred
def stop(self):
self.port.stopListening()
for name, signal in vars(signals).items():
if not name.startswith('_'):
disconnect_all(signal)
self.crawler.uninstall()
self.deferred.callback(None)
def geturl(self, path):
return "http://localhost:%s%s" % (self.portno, path)
def getpath(self, url):
u = urlparse.urlparse(url)
return u.path
def item_scraped(self, item, spider, response):
self.itemresp.append((item, response))
def request_scheduled(self, request, spider):
self.reqplug.append((request, spider))
def response_downloaded(self, response, spider):
self.respplug.append((response, spider))
def record_signal(self, *args, **kwargs):
"""Record a signal and its parameters"""
signalargs = kwargs.copy()
sig = signalargs.pop('signal')
signalargs.pop('sender', None)
self.signals_catched[sig] = signalargs
class EngineTest(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler(self):
self.run = CrawlerRun()
yield self.run.run()
self._assert_visited_urls()
self._assert_scheduled_requests()
self._assert_downloaded_responses()
self._assert_scraped_items()
self._assert_signals_catched()
def _assert_visited_urls(self):
must_be_visited = ["/", "/redirect", "/redirected",
"/item1.html", "/item2.html", "/item999.html"]
urls_visited = set([rp[0].url for rp in self.run.respplug])
urls_expected = set([self.run.geturl(p) for p in must_be_visited])
assert urls_expected <= urls_visited, "URLs not visited: %s" % list(urls_expected - urls_visited)
def _assert_scheduled_requests(self):
self.assertEqual(6, len(self.run.reqplug))
paths_expected = ['/item999.html', '/item2.html', '/item1.html']
urls_requested = set([rq[0].url for rq in self.run.reqplug])
urls_expected = set([self.run.geturl(p) for p in paths_expected])
assert urls_expected <= urls_requested
def _assert_downloaded_responses(self):
# response tests
self.assertEqual(6, len(self.run.respplug))
for response, _ in self.run.respplug:
if self.run.getpath(response.url) == '/item999.html':
self.assertEqual(404, response.status)
if self.run.getpath(response.url) == '/redirect':
self.assertEqual(302, response.status)
def _assert_scraped_items(self):
self.assertEqual(2, len(self.run.itemresp))
for item, response in self.run.itemresp:
self.assertEqual(item['url'], response.url)
if 'item1.html' in item['url']:
self.assertEqual('Item 1 name', item['name'])
self.assertEqual('100', item['price'])
if 'item2.html' in item['url']:
self.assertEqual('Item 2 name', item['name'])
self.assertEqual('200', item['price'])
def _assert_signals_catched(self):
assert signals.engine_started in self.run.signals_catched
assert signals.engine_stopped in self.run.signals_catched
assert signals.spider_opened in self.run.signals_catched
assert signals.spider_idle in self.run.signals_catched
assert signals.spider_closed in self.run.signals_catched
self.assertEqual({'spider': self.run.spider},
self.run.signals_catched[signals.spider_opened])
self.assertEqual({'spider': self.run.spider},
self.run.signals_catched[signals.spider_idle])
self.run.signals_catched[signals.spider_closed].pop('spider_stats', None) # XXX: remove for scrapy 0.17
self.assertEqual({'spider': self.run.spider, 'reason': 'finished'},
self.run.signals_catched[signals.spider_closed])
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'runserver':
start_test_site(debug=True)
reactor.run()
| mit | 6,183,092,245,584,551,000 | 35.26087 | 111 | 0.62963 | false |
mbareta/edx-platform-ft | lms/djangoapps/affiliates/urls.py | 1 | 1342 | """
URLs for the Affiliate Feature.
"""
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', 'affiliates.views.index', name='index'),
url(r'^admin$', 'affiliates.views.admin', name='admin'),
url(r'^csv_admin$', 'affiliates.views.csv_admin', name='csv_admin'),
url(r'^csv_export$', 'affiliates.views.csv_export', name='csv_export'),
url(r'^payment$', 'affiliates.views.payment', name='payment'),
url(r'^new$', 'affiliates.views.new', name='new'),
url(r'^create$', 'affiliates.views.create', name='create'),
url(r'^login_as_user$', 'affiliates.views.login_as_user', name='login_as_user'),
url(r'^(?P<slug>[^/]*)$', 'affiliates.views.show', name='show'),
url(r'^edit/(?P<slug>[^/]*)$', 'affiliates.views.edit', name='edit'),
url(r'^delete/(?P<slug>[^/]*)$', 'affiliates.views.delete', name='delete'),
url(r'^edit/(?P<slug>[^/]*)/add_member$', 'affiliates.views.add_member', name='add_member'),
url(r'^edit/(?P<slug>[^/]*)/remove_member/(?P<member_id>\d+)$', 'affiliates.views.remove_member', name='remove_member'),
url(r'^edit/(?P<slug>[^/]*)/remove_invite/(?P<invite_id>\d+)$', 'affiliates.views.remove_invite', name='remove_invite'),
url(r'^toggle_active_status/(?P<slug>[^/]*)', 'affiliates.views.toggle_active_status', name='toggle_active_status'),
)
| agpl-3.0 | 5,093,399,337,685,562,000 | 57.347826 | 124 | 0.625931 | false |
pulsar-chem/Pulsar-Core | lib/systems/citric_acid.py | 1 | 1187 | import pulsar as psr
def load_ref_system():
""" Returns citric_acid as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
H 2.1308 0.2221 0.9663
C 1.7122 0.3441 -0.0545
H 2.0997 1.3175 -0.4232
C 0.1676 0.3915 0.0369
O -0.4569 0.3566 -1.2330
H 0.0995 0.8199 -1.8461
C -0.2168 1.7105 0.7689
O 0.4067 2.2339 1.6687
O -1.3020 2.4321 0.4397
H -1.8587 1.9564 -0.1662
C -0.3519 -0.8162 0.8480
H -0.2111 -0.6189 1.9309
H 0.2675 -1.7100 0.6187
C -1.7968 -1.1766 0.5983
O -2.2458 -2.2220 0.1705
O -2.7110 -0.2321 0.9295
H -3.5898 -0.5552 0.7564
C 2.2246 -0.7707 -0.9353
O 2.5655 -1.8941 -0.6193
O 2.3375 -0.4508 -2.2467
H 2.6587 -1.1970 -2.7435
""")
| bsd-3-clause | -1,024,794,622,188,936,300 | 41.392857 | 67 | 0.401011 | false |
storborg/typelift | typelift/dewoff.py | 1 | 4596 | import struct
import zlib
from cStringIO import StringIO
def read_woff_header(f):
"""
Read the WOFF header from a file-like object, consuming the file up to the
end of the header.
Reference here: http://www.w3.org/TR/WOFF/#WOFFHeader
"""
fmt = '>IIIHHIHHIIIII'
buf = f.read(struct.calcsize(fmt))
header = struct.unpack(fmt, buf)
return dict(signature=header[0],
flavor=header[1],
length=header[2],
numTables=header[3],
reserved=header[4],
totalSfontSize=header[5],
majorVersion=header[6],
minorVersion=header[7],
metaOffset=header[8],
metaLength=header[9],
metaOrigLength=header[10],
privOffset=header[11],
privLength=header[12])
def read_woff_table_directory(f, num_tables):
"""
Return an iterator over table directory entries in a file-like object,
consuming the file progressively.
"""
fmt = '>IIIII'
entries = []
for ii in range(num_tables):
buf = f.read(struct.calcsize(fmt))
entry = struct.unpack(fmt, buf)
entries.append(dict(tag=entry[0],
offset=entry[1],
compLength=entry[2],
origLength=entry[3],
origChecksum=entry[4]))
return entries
def read_font_table(f, entry):
f.seek(entry['offset'])
data = f.read(entry['compLength'])
if entry['compLength'] != entry['origLength']:
data = zlib.decompress(data)
return data
def read_woff(f):
"""
Parse a WOFF file from a file-like object.
"""
header = read_woff_header(f)
entries = read_woff_table_directory(f, header['numTables'])
font_tables = []
for entry in entries:
font_tables.append(read_font_table(f, entry))
return dict(header=header,
entries=entries,
font_tables=font_tables)
def write_otf_header(f, in_header):
"""
Write an OTF header to a file-like object, given data supplied from a WOFF
header.
"""
num_tables = in_header['numTables']
entrySelector = 0
searchRange = 0
for ii in range(64):
sq = ii ** 2
if sq < num_tables:
entrySelector = ii
searchRange = sq * 16
rangeShift = (num_tables * 16) - searchRange
out_header = struct.pack('>IHHHH',
in_header['flavor'],
in_header['numTables'],
searchRange,
entrySelector,
rangeShift)
f.write(out_header)
def write_otf_table_directory_entry(f, entry, offset):
"""
Write an OTF font table directory entry, specifying an offset for the font
table.
Return the length of this entry's font table, padded to a word boundary as
per OTF spec.
"""
l = entry['origLength']
f.write(struct.pack('>IIII',
entry['tag'],
entry['origChecksum'],
offset,
l))
if (l % 4) != 0:
l += 4 - (l % 4)
return l
def write_otf_font_table(f, entry, font_table, offset):
"""
Write an OTF font table.
"""
f.seek(offset)
f.write(font_table)
offset += entry['origLength']
if (offset % 4) != 0:
f.write('\0' * (4 - (offset % 4)))
def write_otf(f, data):
"""
Write an OTF file to a file-like object, using data as supplied from
read_woff().
"""
write_otf_header(f, data['header'])
offset = f.tell() + (16 * len(data['entries']))
table_offsets = []
for entry in data['entries']:
table_offsets.append(offset)
l = write_otf_table_directory_entry(f, entry, offset)
offset += l
for entry, font_table, offset in zip(data['entries'],
data['font_tables'],
table_offsets):
write_otf_font_table(f, entry, font_table, offset)
def woff_to_otf(font_data):
"""
Translate a string containing WOFF data to a string containing OTF data.
"""
inf = StringIO(font_data)
outf = StringIO()
write_otf(outf, read_woff(inf))
return outf.getvalue()
if __name__ == '__main__':
import sys
_, a, b = sys.argv
with open(a, 'rb') as inf:
data = read_woff(inf)
with open(b, 'wb') as outf:
write_otf(outf, data)
| mit | 1,986,326,004,806,239,200 | 27.196319 | 78 | 0.534595 | false |
eng-tools/sfsimodels | sfsimodels/functions.py | 1 | 10123 | from collections import OrderedDict
import numpy as np
import sfsimodels.exceptions
#
# def convert_stress_to_mass(q, width, length, gravity):
# """
# Converts a foundation stress to an equivalent mass.
#
# :param q: applied stress [Pa]
# :param width: foundation width [m]
# :param length: foundation length [m]
# :param gravity: applied gravitational acceleration [m/s2]
# :return:
# """
# mass = q * width * length / gravity
# return mass
def clean_float(value):
"""Converts a value to a float or returns None"""
if value is None or value == "":
return None
return float(value)
def collect_serial_value(value, export_none=False):
"""
Introspective function that returns a serialisable value
The function converts objects to dictionaries
"""
if isinstance(value, str):
return value
elif isinstance(value, int):
return value
elif isinstance(value, np.int64):
return int(value)
elif hasattr(value, "to_dict"):
return value.to_dict(export_none=export_none)
elif hasattr(value, "__len__"):
tolist = getattr(value, "tolist", None)
if callable(tolist):
value = value.tolist()
return value
else:
if hasattr(value, "to_dict"):
value = value.to_dict(export_none=export_none)
return value
else:
values = []
for item in value:
values.append(collect_serial_value(item, export_none=export_none))
return values
else:
return value
def get_key_value(value, objs, key=None):
if key is not None and "_id" == key[-3:]:
obj_base_type = key[:-3]
if value is not None:
try:
value = objs[obj_base_type][int(value)]
except KeyError:
raise KeyError(f'Cannot load type: {obj_base_type}, id: {int(value)}')
return obj_base_type, value
elif isinstance(value, list):
vals = []
for item in value:
ikey, val = get_key_value(item, objs)
vals.append(val)
# if isinstance(item, list) or isinstance(item, dict) or isinstance(item, OrderedDict):
return key, vals
elif isinstance(value, dict):
vals = {}
for item in value:
ikey, ivalue = get_key_value(value[item], objs, key=item)
vals[ikey] = ivalue
return key, vals
elif isinstance(value, OrderedDict):
vals = OrderedDict()
for item in value:
ikey, ivalue = get_key_value(value[item], objs, key=item)
vals[ikey] = ivalue
return key, vals
else:
return key, value
def add_to_obj(obj, dictionary, objs=None, exceptions=None, verbose=0):
"""
Cycles through a dictionary and adds the key-value pairs to an object.
Parameters
----------
obj: object
An object that parameters should be added to
dictionary: dict
Keys are object parameter names, values are object parameter values
exceptions: list
Parameters that should be excluded
verbose: bool
If true then show print statements
:return:
"""
if exceptions is None:
exceptions = []
# exceptions.append('unique_hash')
for item in dictionary:
if item == 'unique_hash':
obj._loaded_unique_hash = dictionary[item]
continue
if item in exceptions:
continue
if dictionary[item] is not None:
if verbose:
print("process: ", item, dictionary[item])
key, value = get_key_value(dictionary[item], objs, key=item)
if verbose:
print("assign: ", key, value)
if isinstance(value, dict) and len(value) == 2: # if is a dict to ref another object
keys = list(value.keys())
cleaned_keys = [val.replace('_unique_hash', '') for val in keys]
if cleaned_keys[0] == cleaned_keys[1]:
value = value[cleaned_keys[0]]
try:
setattr(obj, key, value)
except AttributeError:
if hasattr(obj, f'set_{key}'):
try:
getattr(obj, f'set_{key}')(value, two_way=False)
except AttributeError:
raise AttributeError("Can't set {0}={1} on object: {2}".format(key, value, obj))
else:
raise AttributeError("Can't set {0}={1} on object: {2}".format(key, value, obj))
except sfsimodels.exceptions.ModelError:
pass
def get_value_of_a_get_method(obj, method, extras=None):
"""
Can access exposed 'get' methods and pass in keyword arguments if required
Parameters
----------
obj: object
The Object that has the get method
method: str
The name of the get method
extras: dict
A Dictionary of possible required keyword arguments
Returns
-------
"""
if extras is None:
extras = {}
try:
value = getattr(obj, method)()
except TypeError as e:
if "required positional argument:" in str(e):
parameters = [str(e).split("argument: ")[-1]]
elif "required positional arguments:" in str(e):
p_str = str(e).split("arguments: ")[-1]
if ", and " in p_str: # if more than 2
partial = p_str.split(", and ")
parameters = partial[0].split(", ") + partial[-1:]
else: # if one
parameters = p_str.split(" and ")
else:
raise TypeError(e)
params = []
for parameter in parameters:
parameter = parameter[1:-1]
if parameter in extras:
params.append(extras[parameter])
else:
params.append(getattr(obj, parameter))
value = getattr(obj, method)(*params)
return value
def interp_left(x0, x, y=None, low=None):
"""
Interpolation takes the lower value
Parameters
----------
x0: array_like
Values to be interpolated on x-axis
x: array_like
Existing values on x-axis
y: array_like
Existing y-axis values
low: str or float or int
What to do if x0 is less than x[0], if ='min' then clip x0 to x[0],
if float or int then clip to this value, else raise error
Returns
-------
"""
if y is None:
y = np.arange(len(x))
else:
y = np.array(y)
if low is None:
assert np.min(x0) >= x[0], (np.min(x0), x[0])
elif low == 'min':
x0 = np.clip(x0, x[0], None)
else:
x0 = np.clip(x0, low, None)
inds = np.searchsorted(x, x0, side='right') - 1
return y[inds]
def interp2d(x, xf, f):
"""
Can interpolate a table to get an array of values in 2D
Parameters
----------
x: array_like
1d array of values to be interpolated
xf: array_like
1d array of values
f: array_like
2d array of function values size=(len(xf), n)
Returns
-------
returns size=(len(x), n)
Examples
--------
>>> f = np.array([[0, 0, 0],
>>> [0, 1, 4],
>>> [2, 6, 2],
>>> [10, 10, 10]
>>> ])
>>> xf = np.array([0, 1, 2, 3])
>>> x = np.array([0.5, 1, 2.2, 2.5])
>>> f_interp = interp2d(x, xf, f)
>>> print(f_interp[0][0])
0.0
>>> print(f_interp[0][1])
0.5
>>> print(f_interp[0][2])
2.0
"""
x = np.array(x)
xf = np.array(xf)
f = np.array(f)
ind = np.argmin(np.abs(x[:, np.newaxis] - xf), axis=1)
x_ind = xf[ind]
ind0 = np.where(x_ind > x, ind - 1, ind)
ind1 = np.where(x_ind > x, ind, ind + 1)
ind0 = np.clip(ind0, 0, None)
ind1 = np.clip(ind1, None, len(xf) - 1)
f0 = f[ind0]
f1 = f[ind1]
a0 = xf[ind0]
a1 = xf[ind1]
denom = (a1 - a0)
denom_adj = np.clip(denom, 1e-10, None) # to avoid divide by zero warning
s0 = np.where(denom > 0, (x - a0) / denom_adj, 1) # if denom less than 0, then out of bounds
s1 = 1 - s0
return s1[:, np.newaxis] * f0 + s0[:, np.newaxis] * f1
def interp3d(x, y, xs, ys_at_xs, f):
"""
Can interpolate a table to get an array of values in 2D
Parameters
----------
x: array_like
1d array of values to be interpolated
y: array_like
1d array of values to be interpolated
xs: array_like
1d array of x-positions where points are known
ys_at_xs: list of array_like
list of 1d arrays of y-positions where points are known, len=len(x)
f: list of array_like
list of 1d arrays of function values size=(len(x), (len(ys_at_xs[j]))
Returns
-------
returns size=(len(x), len(y))
Examples
--------
"""
x_ind0 = interp_left(x, xs)
x_ind1 = np.clip(x_ind0 + 1, None, len(xs) - 1)
y_ind_x0y0 = interp_left(y, ys_at_xs[x_ind0])
y_ind_x0y1 = np.clip(y_ind_x0y0 + 1, None, len(ys_at_xs[x_ind0]) - 1)
y_ind_x1y0 = interp_left(y, ys_at_xs[x_ind1])
y_ind_x1y1 = np.clip(y_ind_x1y0 + 1, None, len(ys_at_xs[x_ind1]) - 1)
x0 = xs[x_ind0]
x1 = xs[x_ind1]
y0_at_x0 = ys_at_xs[x_ind0][y_ind_x0y0]
y1_at_x0 = ys_at_xs[x_ind0][y_ind_x0y1]
y0_at_x1 = ys_at_xs[x_ind1][y_ind_x1y0]
y1_at_x1 = ys_at_xs[x_ind1][y_ind_x1y1]
fx0y0 = f[x_ind0][y_ind_x0y0]
fx0y1 = f[x_ind0][y_ind_x0y1]
fx1y0 = f[x_ind1][y_ind_x1y0]
f1y1 = f[x_ind1][y_ind_x1y1]
x_w = (x - x0) / ((x1 - x0) + 1e-16 * x1)
y_w_x0 = (y - y0_at_x0) / ((y1_at_x0 - y0_at_x0) + 1e-16 * y1_at_x0)
y_w_x1 = (y - y0_at_x1) / ((y1_at_x1 - y0_at_x1) + 1e-16 * y1_at_x1)
fvs = (fx0y0 * (1 - x_w) * (1 - y_w_x0)) + (fx0y1 * (1 - x_w) * y_w_x0) \
+ (fx1y0 * x_w * (1 - y_w_x1)) + (f1y1 * x_w * y_w_x1)
return fvs
#
# if __name__ == '__main__':
# xs = np.array([0, 2])
# ys_at_xs = np.array([[0, 5, 10], [0, 2, 5, 8, 10]]) | mit | 693,612,150,368,522,200 | 30.150769 | 104 | 0.53581 | false |
mzbenami/pyeapi | test/unit/test_eapilib.py | 1 | 5296 | import unittest
import json
from mock import Mock, patch
import pyeapi.eapilib
class TestEapiConnection(unittest.TestCase):
def test_execute_valid_response(self):
response_dict = dict(jsonrpc='2.0', result=[], id=id(self))
mock_send = Mock(name='send')
mock_send.return_value = json.dumps(response_dict)
instance = pyeapi.eapilib.EapiConnection()
instance.send = mock_send
result = instance.execute(['command'])
self.assertEqual(json.loads(result), response_dict)
def test_execute_raises_type_error(self):
instance = pyeapi.eapilib.EapiConnection()
with self.assertRaises(TypeError):
instance.execute(None, encoding='invalid')
def test_execute_raises_connection_error(self):
mock_send = Mock(name='send')
mock_send.side_effect = pyeapi.eapilib.ConnectionError('test', 'test')
instance = pyeapi.eapilib.EapiConnection()
instance.send = mock_send
with self.assertRaises(pyeapi.eapilib.ConnectionError):
instance.execute('test')
def test_execute_raises_command_error(self):
mock_send = Mock(name='send')
mock_send.side_effect = pyeapi.eapilib.CommandError('1000', 'test')
instance = pyeapi.eapilib.EapiConnection()
instance.send = mock_send
with self.assertRaises(pyeapi.eapilib.CommandError):
instance.execute('test')
def test_create_socket_connection(self):
instance = pyeapi.eapilib.SocketEapiConnection()
self.assertIsInstance(instance, pyeapi.eapilib.EapiConnection)
self.assertIsNotNone(str(instance.transport))
@patch('pyeapi.eapilib.socket')
def test_socket_connection_create(self, mock_socket):
instance = pyeapi.eapilib.SocketConnection('/path/to/sock')
instance.connect()
mock_socket.socket.return_value.connect.assert_called_with('/path/to/sock')
def test_create_http_local_connection(self):
instance = pyeapi.eapilib.HttpLocalEapiConnection()
self.assertIsInstance(instance, pyeapi.eapilib.EapiConnection)
self.assertIsNotNone(str(instance.transport))
def test_create_http_connection(self):
instance = pyeapi.eapilib.HttpEapiConnection('localhost')
self.assertIsInstance(instance, pyeapi.eapilib.EapiConnection)
self.assertIsNotNone(str(instance.transport))
def test_create_https_connection(self):
instance = pyeapi.eapilib.HttpsEapiConnection('localhost')
self.assertIsInstance(instance, pyeapi.eapilib.EapiConnection)
self.assertIsNotNone(str(instance.transport))
def test_send(self):
response_dict = dict(jsonrpc='2.0', result=[{}], id=id(self))
response_json = json.dumps(response_dict)
mock_transport = Mock(name='transport')
mockcfg = {'getresponse.return_value.read.return_value': response_json}
mock_transport.configure_mock(**mockcfg)
instance = pyeapi.eapilib.EapiConnection()
instance.transport = mock_transport
instance.send('test')
self.assertTrue(mock_transport.close.called)
def test_send_with_authentication(self):
response_dict = dict(jsonrpc='2.0', result=[{}], id=id(self))
response_json = json.dumps(response_dict)
mock_transport = Mock(name='transport')
mockcfg = {'getresponse.return_value.read.return_value': response_json}
mock_transport.configure_mock(**mockcfg)
instance = pyeapi.eapilib.EapiConnection()
instance.authentication('username', 'password')
instance.transport = mock_transport
instance.send('test')
self.assertTrue(mock_transport.close.called)
def test_send_raises_connection_error(self):
mock_transport = Mock(name='transport')
mockcfg = {'getresponse.return_value.read.side_effect': ValueError}
mock_transport.configure_mock(**mockcfg)
instance = pyeapi.eapilib.EapiConnection()
instance.transport = mock_transport
with self.assertRaises(pyeapi.eapilib.ConnectionError):
instance.send('test')
def test_send_raises_command_error(self):
error = dict(code=9999, message='test', data=[{'errors': ['test']}])
response_dict = dict(jsonrpc='2.0', error=error, id=id(self))
response_json = json.dumps(response_dict)
mock_transport = Mock(name='transport')
mockcfg = {'getresponse.return_value.read.return_value': response_json}
mock_transport.configure_mock(**mockcfg)
instance = pyeapi.eapilib.EapiConnection()
instance.transport = mock_transport
with self.assertRaises(pyeapi.eapilib.CommandError):
instance.send('test')
class TestCommandError(unittest.TestCase):
def test_create_command_error(self):
result = pyeapi.eapilib.CommandError(9999, 'test')
self.assertIsInstance(result, pyeapi.eapilib.EapiError)
def test_command_error_trace(self):
commands = ['test command', 'test command', 'test command']
output = [{}, 'test output']
result = pyeapi.eapilib.CommandError(9999, 'test', commands=commands,
output=output)
self.assertIsNotNone(result.trace)
| bsd-3-clause | -7,965,464,696,578,998,000 | 36.828571 | 83 | 0.670128 | false |
sergey-lebedev/concor | bots/algorithms.py | 1 | 17788 | import copy
import random
import time
DEBUG = False
inf = float("infinity")
def adjacency_list_generator():
#adjacency_list
adjacency_list = {}
ij_list = [(i, j) for i in range(width) for j in range(height)]
for (i, j) in ij_list:
link_list = []
#link_list = {}
for direction in DIRECTIONS:
(dx, dy) = DIRECTIONS[direction]
dyj = dy + j
dxi = dx + i
if (0 <= dyj < height) and (0 <= dxi < width):
link_list.append((dxi, dyj))
#link_list[(dxi, dyj)] = True
adjacency_list[(i, j)] = set(link_list)
#adjacency_list[(i, j)] = link_list
return adjacency_list
def available_positions_generator(loc, wall_list, player_list, adjacency_list):
# calculate available positions
available_positions = {}
for position in adjacency_list:
#print positions
available_positions[position] = adjacency_list[position].copy()
#available_positions = adjacency_list.copy()
for wall in wall_list:
(col, row) = wall['location']
left_top = (col - 1, row - 1)
right_top = (col, row - 1)
left_bottom = (col - 1, row)
right_bottom = (col, row)
#print left_top, right_top,left_bottom, right_bottom
if wall['type'] == 'horizontal':
available_positions[left_top].difference_update(set([left_bottom]))
available_positions[left_bottom].difference_update(set([left_top]))
available_positions[right_top].difference_update(set([right_bottom]))
available_positions[right_bottom].difference_update(set([right_top]))
elif wall['type'] == 'vertical':
available_positions[left_top].difference_update(set([right_top]))
available_positions[left_bottom].difference_update(set([right_bottom]))
available_positions[right_top].difference_update(set([left_top]))
available_positions[right_bottom].difference_update(set([left_bottom]))
#occupied cells
(col, row) = loc
set_loc = set([loc])
player_locations = []
for player in player_list:
player_locations.append(player['location'])
for direction in DIRECTIONS:
(dx, dy) = DIRECTIONS[direction]
for a_loc in player_locations:
if (a_loc == (col + dx, row + dy) and
a_loc in available_positions[loc]):
#print a_loc
(a_col, a_row) = a_loc
for neighbors in available_positions[a_loc]:
available_positions[neighbors].difference_update(set([a_loc]))
b_loc = (a_col + dx, a_row + dy)
if (b_loc in available_positions[a_loc] and
b_loc not in player_locations):
available_positions[b_loc].update(set_loc)
available_positions[loc].update(set([b_loc]))
else:
#sideway jump
(ldx, ldy) = DIRECTIONS[LEFT[direction]]
c_loc = (a_col + ldx, a_row + ldy)
if (c_loc in available_positions[a_loc] and
c_loc not in player_locations):
available_positions[c_loc].update(set_loc)
available_positions[loc].update(set([c_loc]))
(rdx, rdy) = DIRECTIONS[RIGHT[direction]]
d_loc = (a_col + rdx, a_row + rdy)
if (d_loc in available_positions[a_loc] and
d_loc not in player_locations):
available_positions[d_loc].update(set_loc)
available_positions[loc].update(set([d_loc]))
available_positions[a_loc] = set([])
#print available_positions[loc]
return available_positions
def iapg(player, wall_list, player_list, adjacency_list):
# calculate available positions
available_positions = {}
for position in adjacency_list:
#print positions
available_positions[position] = adjacency_list[position].copy()
#available_positions = adjacency_list.copy()
#available_positions = dict([pair for pair in zip(adjacency_list.keys(), adjacency_list.values())])
for wall in wall_list:
(col, row) = wall['location']
left_top = (col - 1, row - 1)
right_top = (col, row - 1)
left_bottom = (col - 1, row)
right_bottom = (col, row)
#print left_top, right_top,left_bottom, right_bottom
if wall['type'] == 'horizontal':
available_positions[left_top].difference_update(set([left_bottom]))
available_positions[left_bottom].difference_update(set([left_top]))
available_positions[right_top].difference_update(set([right_bottom]))
available_positions[right_bottom].difference_update(set([right_top]))
elif wall['type'] == 'vertical':
available_positions[left_top].difference_update(set([right_top]))
available_positions[left_bottom].difference_update(set([right_bottom]))
available_positions[right_top].difference_update(set([left_top]))
available_positions[right_bottom].difference_update(set([left_bottom]))
if player:
# occupied cells
loc = player['location']
(col, row) = loc
set_loc = set([loc])
# opponent's locations
opponent_locations = [item['location'] for item in player_list if item != player]
for direction in DIRECTIONS:
(dx, dy) = DIRECTIONS[direction]
for a_loc in opponent_locations:
if (a_loc == (col + dx, row + dy) and
a_loc in available_positions[loc]):
#print a_loc
(a_col, a_row) = a_loc
for neighbors in available_positions[a_loc]:
available_positions[neighbors].difference_update(set([a_loc]))
b_loc = (a_col + dx, a_row + dy)
if (b_loc in available_positions[a_loc] and
b_loc not in opponent_locations):
available_positions[b_loc].update(set_loc)
available_positions[loc].update(set([b_loc]))
else:
#sideway jump
(ldx, ldy) = DIRECTIONS[LEFT[direction]]
c_loc = (a_col + ldx, a_row + ldy)
if (c_loc in available_positions[a_loc] and
c_loc not in opponent_locations):
available_positions[c_loc].update(set_loc)
available_positions[loc].update(set([c_loc]))
(rdx, rdy) = DIRECTIONS[RIGHT[direction]]
d_loc = (a_col + rdx, a_row + rdy)
if (d_loc in available_positions[a_loc] and
d_loc not in opponent_locations):
available_positions[d_loc].update(set_loc)
available_positions[loc].update(set([d_loc]))
available_positions[a_loc] = set([])
#print available_positions[loc]
return available_positions
def w2p(wall_list):
#print wall_list
p = dict(((ij, ['horizontal', 'vertical']) for ij in ij_list_for_p))
p_has_key = p.has_key
#set_vertical = set(['vertical'])
#set_horizontal = set(['horizontal'])
for wall in wall_list:
(x, y) = wall['location']
#p[(x, y)] = set([])
p[(x, y)] = []
if wall['type'] == 'horizontal':
for direction in ('w', 'e'):
(dx, dy) = DIRECTIONS[direction]
location = (x + dx, y + dy)
if p_has_key(location):
#p[location].difference_update(set_horizontal)
if 'horizontal' in p[location]: p[location].remove('horizontal')
#if p[location].has_key('horizontal'): del p[location]['horizontal']
elif wall['type'] == 'vertical':
for direction in ('n', 's'):
(dx, dy) = DIRECTIONS[direction]
location = (x + dx, y + dy)
if p_has_key(location):
#p[location].difference_update(set_vertical)
if 'vertical' in p[location]: p[location].remove('vertical')
#if p[location].has_key('vertical'): del p[location]['vertical']
#print p
return p
def bfs(loc, available_positions, target_loc):
# breadth-first search
neighbor = loc
queue = [loc]
visited = {}
visited[loc] = True
is_break = False
path = {}
while queue and not is_break:
node = queue.pop(0)
for neighbor in available_positions[node]:
if not visited.has_key(neighbor):
path[neighbor] = node
visited[neighbor] = True
if target_loc.has_key(neighbor):
is_break = True
#print neighbor
queue.append(neighbor)
if is_break:
break
if not is_break:
step = None
else:
step = 0
node = neighbor
backtrace = [node]
while (node != loc) and is_break:
step += 1
neighbor = node
node = path[neighbor]
backtrace.append(node)
backtrace.reverse()
return step, backtrace
def bfs_light(loc, available_positions, target_loc):
# breadth-first search
neighbor = loc
queue = [loc]
visited = {}
visited[loc] = True
is_break = False
path = {}
while queue and not is_break:
node = queue.pop(0)
for neighbor in available_positions[node]:
if not visited.has_key(neighbor):
path[neighbor] = node
visited[neighbor] = True
if target_loc.has_key(neighbor):
is_break = True
#print neighbor
queue.append(neighbor)
if is_break:
break
if not is_break:
step = None
else:
step = 0
node = neighbor
while node != loc:
step += 1
neighbor = node
node = path[neighbor]
return step
def spwi(loc, available_positions, target_loc):
# breadth-first search
target_loc_has_key = target_loc.has_key
if target_loc_has_key(loc): return 0
queue = [loc]
visited = visited_template.copy()
visited[loc] = True
is_break = False
step = 0
while queue and not is_break:
step += 1
subqueue = []
subqueue_append = subqueue.append
for node in queue:
for neighbor in available_positions[node]:
if not visited[neighbor]:
visited[neighbor] = True
subqueue_append(neighbor)
if target_loc_has_key(neighbor):
is_break = True
break
if is_break: break
queue = subqueue
if not is_break: step = inf
return step
def improved_dijkstra(loc, available_positions, target_loc):
# dijkstra algorithm
target_loc_has_key = target_loc.has_key
if target_loc_has_key(loc): return 0
distances = distances_template.copy()
distances_has_key = distances.has_key
queue = [(0, loc)]
visited = visited_template.copy()
visited[loc] = True
is_break = False
step = 0
while queue and not is_break:
step += 1
subqueue = []
subqueue_append = subqueue.append
for (dummy, node) in queue:
for neighbor in available_positions[node]:
if not visited[neighbor]:
visited[neighbor] = True
if distances_has_key(neighbor):
distance = min(distances[neighbor], distances[node] + 1)
else:
distance = distances[node] + 1
distances[neighbor] = distance
estimation = (distance, neighbor)
subqueue.append(estimation)
if target_loc_has_key(neighbor):
is_break = True
break
if is_break: break
queue = subqueue
queue = sorted(queue)
if not is_break: step = inf
return step
def bfs_side(loc, available_positions, player):
# player-oriented
axis = player['axis']
line = player['line']
# breadth-first search
neighbor = loc
queue = [loc]
visited = {}
visited[loc] = True
is_break = False
path = {}
while queue and not is_break:
node = queue.pop(0)
for neighbor in available_positions[node]:
if not visited.has_key(neighbor):
path[neighbor] = node
visited[neighbor] = True
if neighbor[axis] == line:
is_break = True
#print neighbor
queue.append(neighbor)
if is_break:
break
if not is_break:
step = None
else:
step = 0
node = neighbor
while node != loc:
step += 1
neighbor = node
node = path[neighbor]
return step
def dijkstra(loc, available_positions, target_loc):
# dijkstra algorithm
neighbor = loc
distances = {loc: 0}
queue = [(0, loc)]
visited = {}
is_break = False
while queue and not is_break:
(dummy, node) = queue.pop(0)
for neighbor in available_positions[node]:
if not visited.has_key(neighbor):
visited[neighbor] = True
if distances.has_key(neighbor):
distance = min(distances[neighbor], distances[node] + 1)
else:
distance = distances[node] + 1
distances[neighbor] = distance
estimation = (distance, neighbor)
queue.append(estimation)
if neighbor in target_loc:
is_break = True
break
queue = sorted(queue)
if is_break:
step = distances[node]
else:
step = None
return step
def trace2places(trace):
places = []
offsets = ((0, 0), (1, 0), (0, 1), (1, 1))
for location in trace:
(col, row) = location
for offset in offsets:
(offset_col, offset_row) = offset
(place_col, place_row) = (col + offset_col, row + offset_row)
place = (place_col, place_row)
if ((0 < place_col < width) and
(0 < place_row < height) and
place not in places):
places.append(place)
return places
def alpha_beta_pruning(alpha, beta, value, owner):
pruning = False
if DEBUG:
print "alpha-beta pruning"
print "alpha:", alpha
print "beta:", beta
print "value:", value
if owner == 'max' and alpha != None:
if value >= alpha:
if DEBUG:
print "alpha pruning node"
print "alpha:", alpha
print "value:", value
pruning = True
if owner == 'min' and beta != None:
if -value < beta:
if DEBUG:
print "beta pruning node"
print "beta:", beta
print "value:", value
pruning = True
return pruning
def action_choice(action_list):
# action select
maximal_cost = None
equal_actions_list = []
for actions in action_list:
if actions['cost'] > maximal_cost:
equal_actions_list = []
maximal_cost = actions['cost']
action = actions
equal_actions_list.append(action)
elif actions['cost'] == maximal_cost:
action = actions
equal_actions_list.append(action)
variants = len(equal_actions_list)
if variants != 0:
action = random.choice(equal_actions_list)
else:
action = {'action_type': None}
#print action
return action
def action_choice_greedy(action_list):
# action select
maximal_movement_cost = None
maximal_building_cost = None
equal_movement_actions_list = []
equal_building_actions_list = []
for action in action_list:
if action['action_type'] == 'movement':
if action['cost'] > maximal_movement_cost:
equal_movement_actions_list = []
maximal_movement_cost = action['cost']
equal_movement_actions_list.append(action)
elif action['cost'] == maximal_movement_cost:
equal_movement_actions_list.append(action)
elif action['action_type'] == 'building':
if action['cost'] > maximal_building_cost:
equal_building_actions_list = []
maximal_building_cost = action['cost']
equal_building_actions_list.append(action)
elif action['cost'] == maximal_building_cost:
equal_building_actions_list.append(action)
#print maximal_movement_cost
#print maximal_building_cost
if maximal_movement_cost >= maximal_building_cost:
variants = len(equal_movement_actions_list)
if variants != 0:
action = random.choice(equal_movement_actions_list)
else:
action = {'action_type': None}
else:
variants = len(equal_building_actions_list)
if variants != 0:
action = random.choice(equal_building_actions_list)
else:
action = {'action_type': None}
#print action
return action
| mit | -1,127,524,987,407,583,500 | 34.647295 | 103 | 0.534461 | false |
waynechu/PythonProject | dns/rdtypes/ANY/CSYNC.py | 2 | 4721 | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2004-2007, 2009-2011, 2016 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.rdata
import dns.rdatatype
import dns.name
from dns._compat import xrange
class CSYNC(dns.rdata.Rdata):
"""CSYNC record
@ivar serial: the SOA serial number
@type serial: int
@ivar flags: the CSYNC flags
@type flags: int
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['serial', 'flags', 'windows']
def __init__(self, rdclass, rdtype, serial, flags, windows):
super(CSYNC, self).__init__(rdclass, rdtype)
self.serial = serial
self.flags = flags
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
text = ''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = bitmap[i]
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(window * 256 +
i * 8 + j))
text += (' ' + ' '.join(bits))
return '%d %d%s' % (self.serial, self.flags, text)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
serial = tok.get_uint32()
flags = tok.get_uint16()
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("CSYNC with bit 0")
if nrdtype > 65535:
raise dns.exception.SyntaxError("CSYNC with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = bytearray(b'\0' * 32)
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
windows.append((window, bitmap[0:octets]))
bitmap = bytearray(b'\0' * 32)
window = new_window
offset = nrdtype % 256
byte = offset // 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = bitmap[byte] | (0x80 >> bit)
windows.append((window, bitmap[0:octets]))
return cls(rdclass, rdtype, serial, flags, windows)
def to_wire(self, file, compress=None, origin=None):
file.write(struct.pack('!IH', self.serial, self.flags))
for (window, bitmap) in self.windows:
file.write(struct.pack('!BB', window, len(bitmap)))
file.write(bitmap)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
if rdlen < 6:
raise dns.exception.FormError("CSYNC too short")
(serial, flags) = struct.unpack("!IH", wire[current: current + 6])
current += 6
rdlen -= 6
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns.exception.FormError("CSYNC too short")
window = wire[current]
octets = wire[current + 1]
if octets == 0 or octets > 32:
raise dns.exception.FormError("bad CSYNC octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns.exception.FormError("bad CSYNC bitmap length")
bitmap = bytearray(wire[current: current + octets].unwrap())
current += octets
rdlen -= octets
windows.append((window, bitmap))
return cls(rdclass, rdtype, serial, flags, windows)
| mit | -6,451,602,005,815,375,000 | 36.468254 | 75 | 0.57509 | false |
webbpinner/OpenVDMv2 | server/workers/lowering_directory.py | 1 | 18277 | #!/usr/bin/env python3
"""
FILE: lowering_directory.py
DESCRIPTION: Gearman worker the handles the tasks of creating a new lowering
data directory and updating the lowering directory structure when additional
subdirectories must be added.
BUGS:
NOTES:
AUTHOR: Webb Pinner
COMPANY: Capable Solutions
VERSION: 2.4
CREATED: 2015-01-01
REVISION: 2020-12-29
LICENSE INFO: Open Vessel Data Management v2.5 (OpenVDMv2)
Copyright (C) OceanDataRat 2021
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/gpl-3.0.html>.
"""
import argparse
import os
import sys
import errno
import json
import time
import signal
import logging
from os.path import dirname, realpath
import python3_gearman
sys.path.append(dirname(dirname(dirname(realpath(__file__)))))
from server.lib.set_owner_group_permissions import set_owner_group_permissions
from server.lib.openvdm import OpenVDM
CUSTOM_TASKS = [
{
"taskID": "0",
"name": "createLoweringDirectory",
"longName": "Creating Lowering Directory",
},
{
"taskID": "0",
"name": "setLoweringDataDirectoryPermissions",
"longName": "Setting Lowering Data Directory Permissions",
}
]
def build_dest_dir(gearman_worker, dest_dir):
"""
Replace any wildcards in the provided directory
"""
return_dest_dir = dest_dir.replace('{loweringID}', gearman_worker.lowering_id)
return_dest_dir = return_dest_dir.replace('{loweringDataBaseDir}', gearman_worker.shipboard_data_warehouse_config['loweringDataBaseDir'],)
return_dest_dir = return_dest_dir.replace('{cruiseID}', gearman_worker.cruise_id)
return return_dest_dir
def build_directorylist(gearman_worker):
"""
build the list of directories to be created as part of creating the new
lowering
"""
return_directories = [ gearman_worker.lowering_dir ]
collection_system_transfers = gearman_worker.ovdm.get_active_collection_system_transfers(cruise=False)
return_directories.extend([ os.path.join(gearman_worker.lowering_dir, build_dest_dir(gearman_worker, collection_system_transfer['destDir'])) for collection_system_transfer in collection_system_transfers ])
return return_directories
def create_directories(directorylist):
"""
Create the directories in the provide directory list
"""
reasons = []
for directory in directorylist:
try:
os.makedirs(directory)
except OSError as exception:
if exception.errno != errno.EEXIST:
logging.error("Unable to create directory: %s", directory)
reasons.append("Unable to create directory: %s", directory)
if len(reasons) > 0:
return {'verdict': False, 'reason': '\n'.join(reasons)}
return {'verdict': True}
def lockdown_directory(base_dir, exempt_dir):
"""
Lockdown permissions on the base directory, skip the exempt directory if present
"""
dir_contents = [ os.path.join(base_dir,f) for f in os.listdir(base_dir)]
files = filter(os.path.isfile, dir_contents)
for file in files:
os.chmod(file, 0o600)
directories = filter(os.path.isdir, dir_contents)
for directory in directories:
if not directory == exempt_dir:
os.chmod(directory, 0o700)
class OVDMGearmanWorker(python3_gearman.GearmanWorker): # pylint: disable=too-many-instance-attributes
"""
Class for the current Gearman worker
"""
def __init__(self):
self.stop = False
self.ovdm = OpenVDM()
self.task = None
self.cruise_id = self.ovdm.get_cruise_id()
self.lowering_id = self.ovdm.get_lowering_id()
self.lowering_start_date = self.ovdm.get_lowering_start_date()
self.shipboard_data_warehouse_config = self.ovdm.get_shipboard_data_warehouse_config()
self.lowering_dir = os.path.join(self.shipboard_data_warehouse_config['shipboardDataWarehouseBaseDir'], self.cruise_id, self.shipboard_data_warehouse_config['loweringDataBaseDir'], self.lowering_id)
super().__init__(host_list=[self.ovdm.get_gearman_server()])
@staticmethod
def _get_custom_task(current_job):
"""
Fetch task metadata
"""
task = list(filter(lambda task: task['name'] == current_job.task, CUSTOM_TASKS))
return task[0] if len(task) > 0 else None
def on_job_execute(self, current_job):
"""
Function run whenever a new job arrives
"""
logging.debug("current_job: %s", current_job)
payload_obj = json.loads(current_job.data)
self.task = self._get_custom_task(current_job) if self._get_custom_task(current_job) is not None else self.ovdm.get_task_by_name(current_job.task)
logging.debug("task: %s", self.task)
if int(self.task['taskID']) > 0:
self.ovdm.set_running_task(self.task['taskID'], os.getpid(), current_job.handle)
# else:
# self.ovdm.track_gearman_job(taskLookup[current_job.task], os.getpid(), current_job.handle)
logging.info("Job: %s (%s) started at: %s", self.task['longName'], current_job.handle, time.strftime("%D %T", time.gmtime()))
self.cruise_id = payload_obj['cruiseID'] if 'cruiseID' in payload_obj else self.ovdm.get_cruise_id()
self.lowering_id = payload_obj['loweringID'] if 'loweringID' in payload_obj else self.ovdm.get_lowering_id()
self.lowering_start_date = payload_obj['loweringStartDate'] if 'loweringStartDate' in payload_obj else self.ovdm.get_lowering_start_date()
self.shipboard_data_warehouse_config = self.ovdm.get_shipboard_data_warehouse_config()
self.lowering_dir = os.path.join(self.shipboard_data_warehouse_config['shipboardDataWarehouseBaseDir'], self.cruise_id, self.shipboard_data_warehouse_config['loweringDataBaseDir'], self.lowering_id)
return super().on_job_execute(current_job)
def on_job_exception(self, current_job, exc_info):
"""
Function run whenever the current job has an exception
"""
logging.error("Job: %s (%s) failed at: %s", self.task['longName'], current_job.handle, time.strftime("%D %T", time.gmtime()))
self.send_job_data(current_job, json.dumps([{"partName": "Worker crashed", "result": "Fail", "reason": "Unknown"}]))
if int(self.task['taskID']) > 0:
self.ovdm.set_error_task(self.task['taskID'], "Worker crashed")
else:
self.ovdm.send_msg(self.task['longName'] + ' failed', 'Worker crashed')
exc_type, _, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.error(exc_type, fname, exc_tb.tb_lineno)
return super().on_job_exception(current_job, exc_info)
def on_job_complete(self, current_job, job_result):
"""
Function run whenever the current job completes
"""
results_obj = json.loads(job_result)
if len(results_obj['parts']) > 0:
if results_obj['parts'][-1]['result'] == "Fail": # Final Verdict
if int(self.task['taskID']) > 0:
self.ovdm.set_error_task(self.task['taskID'], results_obj['parts'][-1]['reason'])
else:
self.ovdm.send_msg(self.task['longName'] + ' failed', results_obj['parts'][-1]['reason'])
else:
if int(self.task['taskID']) > 0:
self.ovdm.set_idle_task(self.task['taskID'])
else:
if int(self.task['taskID']) > 0:
self.ovdm.set_idle_task(self.task['taskID'])
logging.debug("Job Results: %s", json.dumps(results_obj, indent=2))
logging.info("Job: %s (%s) completed at: %s", self.task['longName'], current_job.handle, time.strftime("%D %T", time.gmtime()))
return super().send_job_complete(current_job, job_result)
def stop_task(self):
"""
Function to stop the current job
"""
self.stop = True
logging.warning("Stopping current task...")
def quit_worker(self):
"""
Function to quit the worker
"""
self.stop = True
logging.warning("Quitting worker...")
self.shutdown()
def task_create_lowering_directory(gearman_worker, gearman_job):
"""
Setup the lowering directory for the specified lowering ID
"""
job_results = {'parts':[]}
payload_obj = json.loads(gearman_job.data)
logging.debug("Payload: %s", json.dumps(payload_obj, indent=2))
gearman_worker.send_job_status(gearman_job, 1, 10)
cruise_dir = os.path.join(gearman_worker.shipboard_data_warehouse_config['shipboardDataWarehouseBaseDir'], gearman_worker.cruise_id)
lowering_data_base_dir = os.path.join(cruise_dir, gearman_worker.shipboard_data_warehouse_config['loweringDataBaseDir'])
if os.path.exists(cruise_dir):
job_results['parts'].append({"partName": "Verify Cruise Directory exists", "result": "Pass"})
else:
logging.error("Failed to find cruise directory: %s", cruise_dir)
job_results['parts'].append({"partName": "Verify Cruise Directory exists", "result": "Fail", "reason": "Unable to find cruise directory: " + cruise_dir})
return json.dumps(job_results)
if os.path.exists(lowering_data_base_dir):
job_results['parts'].append({"partName": "Verify Lowering Data Directory exists", "result": "Pass"})
else:
logging.error("Lowering Data Directory doesn not exist: %s", lowering_data_base_dir)
job_results['parts'].append({"partName": "Verify Lowering Data Directory exists", "result": "Fail", "reason": "Unable to find lowering data base directory: " + lowering_data_base_dir})
return json.dumps(job_results)
if not os.path.exists(gearman_worker.lowering_dir):
job_results['parts'].append({"partName": "Verify Lowering Directory does not exists", "result": "Pass"})
else:
logging.error("Lowering directory already exists: %s", gearman_worker.lowering_dir)
job_results['parts'].append({"partName": "Verify Lowering Directory does not exists", "result": "Fail", "reason": "Lowering directory " + gearman_worker.lowering_dir + " already exists"})
return json.dumps(job_results)
gearman_worker.send_job_status(gearman_job, 2, 10)
directorylist = build_directorylist(gearman_worker)
logging.debug("Directory List: %s", json.dumps(directorylist, indent=2))
if len(directorylist) > 0:
job_results['parts'].append({"partName": "Build Directory List", "result": "Pass"})
else:
logging.warning("Directory list is empty")
job_results['parts'].append({"partName": "Build Directory List", "result": "Fail", "reason": "Empty list of directories to create"})
return json.dumps(job_results)
gearman_worker.send_job_status(gearman_job, 5, 10)
output_results = create_directories(directorylist)
if output_results['verdict']:
job_results['parts'].append({"partName": "Create Directories", "result": "Pass"})
else:
logging.error("Failed to create any/all of the lowering data directory structure")
job_results['parts'].append({"partName": "Create Directories", "result": "Fail", "reason": output_results['reason']})
gearman_worker.send_job_status(gearman_job, 8, 10)
output_results = set_owner_group_permissions(gearman_worker.shipboard_data_warehouse_config['shipboardDataWarehouseUsername'], gearman_worker.lowering_dir)
if output_results['verdict']:
job_results['parts'].append({"partName": "Set cruise directory ownership/permissions", "result": "Pass"})
else:
job_results['parts'].append({"partName": "Set cruise directory ownership/permissions", "result": "Fail", "reason": output_results['reason']})
return json.dumps(job_results)
gearman_worker.send_job_status(gearman_job, 10, 10)
return json.dumps(job_results)
def task_set_lowering_data_directory_permissions(gearman_worker, gearman_job):
"""
Set the permissions for the specified lowering ID
"""
job_results = {'parts':[]}
payload_obj = json.loads(gearman_job.data)
logging.debug("Payload: %s", json.dumps(payload_obj, indent=2))
gearman_worker.send_job_status(gearman_job, 5, 10)
if os.path.isdir(gearman_worker.lowering_dir):
logging.info("Clear read permissions")
set_owner_group_permissions(gearman_worker.shipboard_data_warehouse_config['shipboardDataWarehouseUsername'], gearman_worker.lowering_dir)
job_results['parts'].append({"partName": "Set Directory Permissions for current lowering", "result": "Pass"})
job_results['parts'].append({"partName": "Set LoweringData Directory Permissions", "result": "Pass"})
gearman_worker.send_job_status(gearman_job, 10, 10)
return json.dumps(job_results)
def task_rebuild_lowering_directory(gearman_worker, gearman_job):
"""
Verify and create if necessary all the lowering sub-directories
"""
job_results = {'parts':[]}
payload_obj = json.loads(gearman_job.data)
logging.debug("Payload: %s", json.dumps(payload_obj, indent=2))
gearman_worker.send_job_status(gearman_job, 1, 10)
if not os.path.exists(gearman_worker.lowering_dir):
logging.error("Lowering directory not found")
job_results['parts'].append({"partName": "Verify Lowering Directory exists", "result": "Fail", "reason": "Unable to find lowering directory: " + gearman_worker.lowering_dir})
return json.dumps(job_results)
job_results['parts'].append({"partName": "Verify Lowering Directory exists", "result": "Pass"})
gearman_worker.send_job_status(gearman_job, 2, 10)
logging.info("Build directory list")
directorylist = build_directorylist(gearman_worker)
logging.debug("Directory List: %s", json.dumps(directorylist, indent=2))
if len(directorylist) == 0:
logging.error("Directory list is empty")
job_results['parts'].append({"partName": "Build Directory List", "result": "Fail", "reason": "Empty list of directories to create"})
return json.dumps(job_results)
job_results['parts'].append({"partName": "Build Directory List", "result": "Pass"})
gearman_worker.send_job_status(gearman_job, 5, 10)
logging.info("Create directories")
output_results = create_directories(directorylist)
if output_results['verdict']:
job_results['parts'].append({"partName": "Create Directories", "result": "Pass"})
else:
logging.error("Unable to create any/all of the lowering data directory structure")
job_results['parts'].append({"partName": "Create Directories", "result": "Fail", "reason": output_results['reason']})
gearman_worker.send_job_status(gearman_job, 7, 10)
logging.info("Set directory ownership/permissions")
output_results = set_owner_group_permissions(gearman_worker.shipboard_data_warehouse_config['shipboardDataWarehouseUsername'], gearman_worker.lowering_dir)
if output_results['verdict']:
job_results['parts'].append({"partName": "Set Directory ownership/permissions", "result": "Pass"})
else:
logging.error("Failed to set directory ownership")
job_results['parts'].append({"partName": "Set Directory ownership/permissions", "result": "Fail", "reason": output_results['reason']})
gearman_worker.send_job_status(gearman_job, 10, 10)
return json.dumps(job_results)
# -------------------------------------------------------------------------------------
# Required python code for running the script as a stand-alone utility
# -------------------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Handle Lowering data directory related tasks')
parser.add_argument('-v', '--verbosity', dest='verbosity',
default=0, action='count',
help='Increase output verbosity')
parsed_args = parser.parse_args()
############################
# Set up logging before we do any other argument parsing (so that we
# can log problems with argument parsing).
LOGGING_FORMAT = '%(asctime)-15s %(levelname)s - %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
LOG_LEVELS = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
parsed_args.verbosity = min(parsed_args.verbosity, max(LOG_LEVELS))
logging.getLogger().setLevel(LOG_LEVELS[parsed_args.verbosity])
logging.debug("Creating Worker...")
new_worker = OVDMGearmanWorker()
new_worker.set_client_id(__file__)
logging.debug("Defining Signal Handlers...")
def sigquit_handler(_signo, _stack_frame):
"""
Signal Handler for QUIT
"""
logging.warning("QUIT Signal Received")
new_worker.stop_task()
def sigint_handler(_signo, _stack_frame):
"""
Signal Handler for INT
"""
logging.warning("INT Signal Received")
new_worker.quit_worker()
signal.signal(signal.SIGQUIT, sigquit_handler)
signal.signal(signal.SIGINT, sigint_handler)
logging.info("Registering worker tasks...")
logging.info("\tTask: createLoweringDirectory")
new_worker.register_task("createLoweringDirectory", task_create_lowering_directory)
logging.info("\tTask: setLoweringDataDirectoryPermissions")
new_worker.register_task("setLoweringDataDirectoryPermissions", task_set_lowering_data_directory_permissions)
logging.info("\tTask: rebuildLoweringDirectory")
new_worker.register_task("rebuildLoweringDirectory", task_rebuild_lowering_directory)
logging.info("Waiting for jobs...")
new_worker.work()
| gpl-3.0 | -6,683,112,757,770,289,000 | 38.993435 | 209 | 0.663621 | false |
openemr/demo-data-generator | demodata/scripts/patient.py | 1 | 4202 | """Generate patient data"""
import barnum
from demodata.util import *
__copyright__ = "Copyright (C) 2017 Robert Down"
__author__ = "Robert Down <[email protected]>"
__license__ = "GNU GPL3"
def generate_patients(count=1):
random.seed()
patients = []
for x in range(0, count):
sex = 'Male' if random.randint(0, 1) % 2 else 'Female'
fname, lname = barnum.create_name(gender=sex)
mname = barnum.create_name(False, sex) if random_truth(0.27) == 1 else ''
street, city, state, postal_code = generate_address()
dob = barnum.create_birthday(1, 100)
patient = {
'title': generate_title(sex),
'language': '',
'financial': '',
'fname': fname,
'lname': lname,
'mname': mname,
'DOB': dob.strftime("%Y-%m-%d"),
'street': street,
'postal_code': postal_code,
'city': city,
'state': state,
'country_code': 'US',
'drivers_license': random_drivers_license(lname[0], int(dob.strftime("%y"))),
'ss': random.randint(100000000, 999999999),
'occupation': barnum.create_job_title(),
'phone_home': barnum.create_phone(postal_code),
'phone_biz': barnum.create_phone(postal_code),
'phone_contact': barnum.create_phone(postal_code),
'phone_cell': barnum.create_phone(postal_code),
'pharmacy_id': 1,
'status': '',
'contact_relationship': '',
'date': barnum.create_date(past=True).strftime("%Y-%m-%d"),
'sex': sex,
'referrer': '',
'referrerID': '',
'providerID': 0,
'ref_providerID': 0,
'email': barnum.create_email(name=(fname, lname)),
'email_direct': '',
'ethnoracial': '',
'race': '',
'ethnicity': '',
'religion': '',
'interpretter': '',
'migrantseasonal': '',
'family_size': random.randint(1, 8),
'monthly_income': '',
'billing_note': '',
'homeless': '',
'financial_review': barnum.create_date(past=True).strftime("%Y-%m-%d"),
'pubpid': '',
'pid': str(random.randint(1, 9999999999)),
'hipaa_mail': 'yes' if random_truth(0.90) == 1 else 'no',
'hipaa_voice': 'yes' if random_truth(0.75) == 1 else 'no',
'hipaa_notice': 'yes' if random_truth(0.93) == 1 else 'no',
'hipaa_message': 'yes' if random_truth(0.90) == 1 else 'no',
'hipaa_allowsms': 'yes' if random_truth(0.50) == 1 else 'no',
'hipaa_allowemail': 'yes' if random_truth(0.70) == 1 else 'no',
}
patients.append(patient)
return patients
def random_drivers_license(initial_letter=None, year=None):
if not initial_letter:
letters = (
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z')
initial_letter = random.choice(letters)
if not year:
year = random.randint(10, 99)
triplicate = [random.randint(000, 999), random.randint(100, 999), random.randint(100, 999)]
return "%s%i-%i-%i-%i" % (initial_letter, triplicate[0], triplicate[1], year, triplicate[2])
def generate_title(gender=None):
"""
Randomly select a title based on given gender
Example: "Mr."
"""
random.seed()
if gender is None:
return ''
if gender == 'Male':
titles = {'': 47, 'Mr.': 49, 'Dr.': 3}
if gender == 'Female':
titles = {'': 47, 'Mrs.': 30, 'Ms.': 23}
weighted_titles = []
for t, w in titles.items():
weighted_titles.extend(repeat(t, w))
r = random.randint(0, 99)
return weighted_titles[r - 1 if r > 0 else 0]
def generate_pharmacy():
name = barnum.create_company_name()
street, city, state, zip = generate_address()
email = barnum.create_email()
phone = barnum.create_phone(zip)
fax = barnum.create_phone(zip)
return name, street, city, state, zip, email, phone, fax
| gpl-3.0 | -1,416,938,328,447,985,700 | 33.727273 | 116 | 0.519039 | false |
geographika/OWSLib | owslib/wmts.py | 1 | 31765 | # -*- coding: UTF-8 -*-
# =============================================================================
# Copyright (C) 2012 Brad Hards <[email protected]>
#
# Based on wms.py, which has the following copyright statement:
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2005 Nuxeo SARL <http://nuxeo.com>
#
# Authors : Sean Gillies <[email protected]>
# Julien Anguenot <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
"""
Abstract
--------
The wmts module of the OWSlib package provides client-side functionality
for fetching tiles from an OGC Web Map Tile Service (WMTS)
Disclaimer
----------
PLEASE NOTE: the owslib wmts module should be considered in early-beta
state: it has been tested against only one WMTS server (NASA EODSIS).
More extensive testing is needed and feedback (to [email protected])
would be appreciated.
"""
from __future__ import (absolute_import, division, print_function)
from random import randint
import warnings
import six
from six.moves import filter
try: # Python 3
from urllib.parse import (urlencode, urlparse, urlunparse, parse_qs,
ParseResult)
except ImportError: # Python 2
from urllib import urlencode
from urlparse import urlparse, urlunparse, parse_qs, ParseResult
from .etree import etree
from .util import openURL, testXMLValue, getXMLInteger
from .fgdc import Metadata
from .iso import MD_Metadata
from .ows import ServiceProvider, ServiceIdentification, OperationsMetadata
_OWS_NS = '{http://www.opengis.net/ows/1.1}'
_WMTS_NS = '{http://www.opengis.net/wmts/1.0}'
_XLINK_NS = '{http://www.w3.org/1999/xlink}'
_ABSTRACT_TAG = _OWS_NS + 'Abstract'
_IDENTIFIER_TAG = _OWS_NS + 'Identifier'
_LOWER_CORNER_TAG = _OWS_NS + 'LowerCorner'
_OPERATIONS_METADATA_TAG = _OWS_NS + 'OperationsMetadata'
_SERVICE_IDENTIFICATION_TAG = _OWS_NS + 'ServiceIdentification'
_SERVICE_PROVIDER_TAG = _OWS_NS + 'ServiceProvider'
_SUPPORTED_CRS_TAG = _OWS_NS + 'SupportedCRS'
_TITLE_TAG = _OWS_NS + 'Title'
_UPPER_CORNER_TAG = _OWS_NS + 'UpperCorner'
_WGS84_BOUNDING_BOX_TAG = _OWS_NS + 'WGS84BoundingBox'
_CONTENTS_TAG = _WMTS_NS + 'Contents'
_FORMAT_TAG = _WMTS_NS + 'Format'
_INFO_FORMAT_TAG = _WMTS_NS + 'InfoFormat'
_LAYER_TAG = _WMTS_NS + 'Layer'
_LAYER_REF_TAG = _WMTS_NS + 'LayerRef'
_MATRIX_HEIGHT_TAG = _WMTS_NS + 'MatrixHeight'
_MATRIX_WIDTH_TAG = _WMTS_NS + 'MatrixWidth'
_MAX_TILE_COL_TAG = _WMTS_NS + 'MaxTileCol'
_MAX_TILE_ROW_TAG = _WMTS_NS + 'MaxTileRow'
_MIN_TILE_COL_TAG = _WMTS_NS + 'MinTileCol'
_MIN_TILE_ROW_TAG = _WMTS_NS + 'MinTileRow'
_RESOURCE_URL_TAG = _WMTS_NS + 'ResourceURL'
_SCALE_DENOMINATOR_TAG = _WMTS_NS + 'ScaleDenominator'
_SERVICE_METADATA_URL_TAG = _WMTS_NS + 'ServiceMetadataURL'
_STYLE_TAG = _WMTS_NS + 'Style'
_THEME_TAG = _WMTS_NS + 'Theme'
_THEMES_TAG = _WMTS_NS + 'Themes'
_TILE_HEIGHT_TAG = _WMTS_NS + 'TileHeight'
_TILE_MATRIX_SET_LINK_TAG = _WMTS_NS + 'TileMatrixSetLink'
_TILE_MATRIX_SET_TAG = _WMTS_NS + 'TileMatrixSet'
_TILE_MATRIX_SET_LIMITS_TAG = _WMTS_NS + 'TileMatrixSetLimits'
_TILE_MATRIX_LIMITS_TAG = _WMTS_NS + 'TileMatrixLimits'
_TILE_MATRIX_TAG = _WMTS_NS + 'TileMatrix'
_TILE_WIDTH_TAG = _WMTS_NS + 'TileWidth'
_TOP_LEFT_CORNER_TAG = _WMTS_NS + 'TopLeftCorner'
_KEYWORDS_TAG = _OWS_NS + 'Keywords'
_KEYWORD_TAG = _OWS_NS + 'Keyword'
_HREF_TAG = _XLINK_NS + 'href'
class ServiceException(Exception):
"""WMTS ServiceException
Attributes:
message -- short error message
xml -- full xml error message from server
"""
def __init__(self, message, xml):
self.message = message
self.xml = xml
def __str__(self):
return repr(self.message)
class CapabilitiesError(Exception):
pass
class WebMapTileService(object):
"""Abstraction for OGC Web Map Tile Service (WMTS).
Implements IWebMapService.
"""
def __getitem__(self, name):
'''Check contents dictionary to allow dict like access to
service layers'''
if name in self.__getattribute__('contents'):
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, version='1.0.0', xml=None, username=None,
password=None, parse_remote_metadata=False,
vendor_kwargs=None):
"""Initialize.
Parameters
----------
url : string
Base URL for the WMTS service.
version : string
Optional WMTS version. Defaults to '1.0.0'.
xml : string
Optional XML content to use as the content for the initial
GetCapabilities request. Typically only used for testing.
username : string
Optional user name for authentication.
password : string
Optional password for authentication.
parse_remote_metadata: string
Currently unused.
vendor_kwargs : dict
Optional vendor-specific parameters to be included in all
requests.
"""
self.url = url
self.username = username
self.password = password
self.version = version
self.vendor_kwargs = vendor_kwargs
self._capabilities = None
# Authentication handled by Reader
reader = WMTSCapabilitiesReader(self.version, url=self.url,
un=self.username, pw=self.password)
if xml: # read from stored xml
self._capabilities = reader.readString(xml)
else: # read from server
self._capabilities = reader.read(self.url, self.vendor_kwargs)
# Avoid building capabilities metadata if the response is a
# ServiceExceptionReport.
# TODO: check if this needs a namespace
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# build metadata objects
self._buildMetadata(parse_remote_metadata)
def _getcapproperty(self):
if not self._capabilities:
reader = WMTSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
xml = reader.read(self.url, self.vendor_kwargs)
self._capabilities = ServiceMetadata(xml)
return self._capabilities
def _buildMetadata(self, parse_remote_metadata=False):
''' set up capabilities metadata objects '''
self.updateSequence = self._capabilities.attrib.get('updateSequence')
# serviceIdentification metadata
serviceident = self._capabilities.find(_SERVICE_IDENTIFICATION_TAG)
self.identification = ServiceIdentification(serviceident)
# serviceProvider metadata
serviceprov = self._capabilities.find(_SERVICE_PROVIDER_TAG)
if serviceprov is not None:
self.provider = ServiceProvider(serviceprov)
# serviceOperations metadata
self.operations = []
serviceop = self._capabilities.find(_OPERATIONS_METADATA_TAG)
# REST only WMTS does not have any Operations
if serviceop is not None:
for elem in serviceop[:]:
self.operations.append(OperationsMetadata(elem))
# serviceContents metadata: our assumption is that services use
# a top-level layer as a metadata organizer, nothing more.
self.contents = {}
caps = self._capabilities.find(_CONTENTS_TAG)
def gather_layers(parent_elem, parent_metadata):
for index, elem in enumerate(parent_elem.findall(_LAYER_TAG)):
cm = ContentMetadata(
elem, parent=parent_metadata, index=index+1,
parse_remote_metadata=parse_remote_metadata)
if cm.id:
if cm.id in self.contents:
raise KeyError('Content metadata for layer "%s" '
'already exists' % cm.id)
self.contents[cm.id] = cm
gather_layers(elem, cm)
gather_layers(caps, None)
self.tilematrixsets = {}
for elem in caps.findall(_TILE_MATRIX_SET_TAG):
tms = TileMatrixSet(elem)
if tms.identifier:
if tms.identifier in self.tilematrixsets:
raise KeyError('TileMatrixSet with identifier "%s" '
'already exists' % tms.identifier)
self.tilematrixsets[tms.identifier] = tms
self.themes = {}
for elem in self._capabilities.findall(_THEMES_TAG + '/' + _THEME_TAG):
theme = Theme(elem)
if theme.identifier:
if theme.identifier in self.themes:
raise KeyError('Theme with identifier "%s" already exists'
% theme.identifier)
self.themes[theme.identifier] = theme
serviceMetadataURL = self._capabilities.find(_SERVICE_METADATA_URL_TAG)
if serviceMetadataURL is not None:
self.serviceMetadataURL = serviceMetadataURL.attrib[_HREF_TAG]
else:
self.serviceMetadataURL = None
def items(self):
'''supports dict-like items() access'''
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def buildTileRequest(self, layer=None, style=None, format=None,
tilematrixset=None, tilematrix=None, row=None,
column=None, **kwargs):
"""Return the URL-encoded parameters for a GetTile request.
Parameters
----------
layer : string
Content layer name.
style : string
Optional style name. Defaults to the first style defined for
the relevant layer in the GetCapabilities response.
format : string
Optional output image format, such as 'image/jpeg'.
Defaults to the first format defined for the relevant layer
in the GetCapabilities response.
tilematrixset : string
Optional name of tile matrix set to use.
Defaults to the first tile matrix set defined for the
relevant layer in the GetCapabilities response.
tilematrix : string
Name of the tile matrix to use.
row : integer
Row index of tile to request.
column : integer
Column index of tile to request.
**kwargs : extra arguments
anything else e.g. vendor specific parameters
Example
-------
>>> url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
>>> wmts = WebMapTileService(url)
>>> wmts.buildTileRequest(layer='VIIRS_CityLights_2012',
... tilematrixset='EPSG4326_500m',
... tilematrix='6',
... row=4, column=4)
'SERVICE=WMTS&REQUEST=GetTile&VERSION=1.0.0&\
LAYER=VIIRS_CityLights_2012&STYLE=default&TILEMATRIXSET=EPSG4326_500m&\
TILEMATRIX=6&TILEROW=4&TILECOL=4&FORMAT=image%2Fjpeg'
"""
if (layer is None):
raise ValueError("layer is mandatory (cannot be None)")
if style is None:
style = list(self[layer].styles.keys())[0]
if format is None:
format = self[layer].formats[0]
if tilematrixset is None:
tilematrixset = sorted(self[layer].tilematrixsetlinks.keys())[0]
if tilematrix is None:
msg = 'tilematrix (zoom level) is mandatory (cannot be None)'
raise ValueError(msg)
if row is None:
raise ValueError("row is mandatory (cannot be None)")
if column is None:
raise ValueError("column is mandatory (cannot be None)")
request = list()
request.append(('SERVICE', 'WMTS'))
request.append(('REQUEST', 'GetTile'))
request.append(('VERSION', '1.0.0'))
request.append(('LAYER', layer))
request.append(('STYLE', style))
request.append(('TILEMATRIXSET', tilematrixset))
request.append(('TILEMATRIX', tilematrix))
request.append(('TILEROW', str(row)))
request.append(('TILECOL', str(column)))
request.append(('FORMAT', format))
for key, value in six.iteritems(kwargs):
request.append((key, value))
data = urlencode(request, True)
return data
def buildTileResource(self, layer=None, style=None, format=None,
tilematrixset=None, tilematrix=None, row=None,
column=None, **kwargs):
tileresourceurls = []
for resourceURL in self[layer].resourceURLs:
if resourceURL['resourceType'] == 'tile':
tileresourceurls.append(resourceURL)
numres = len(tileresourceurls)
if numres > 0:
# choose random ResourceURL if more than one available
resindex = randint(0, numres - 1)
resurl = tileresourceurls[resindex]['template']
if tilematrixset:
resurl = resurl.replace('{TileMatrixSet}', tilematrixset)
resurl = resurl.replace('{TileMatrix}', tilematrix)
resurl = resurl.replace('{TileRow}', row)
resurl = resurl.replace('{TileCol}', column)
if style:
resurl = resurl.replace('{Style}', style)
return resurl
return None
@property
def restonly(self):
# if OperationsMetadata is missing completely --> use REST
if len(self.operations) == 0:
return True
# check if KVP or RESTful are available
restenc = False
kvpenc = False
for operation in self.operations:
if operation.name == 'GetTile':
for method in operation.methods:
if 'kvp' in str(method['constraints']).lower():
kvpenc = True
if 'rest' in str(method['constraints']).lower():
restenc = True
# if KVP is available --> use KVP
if kvpenc:
return False
# if the operation has no constraint --> use KVP
if not kvpenc and not restenc:
return False
return restenc
def gettile(self, base_url=None, layer=None, style=None, format=None,
tilematrixset=None, tilematrix=None, row=None, column=None,
**kwargs):
"""Return a tile from the WMTS.
Returns the tile image as a file-like object.
Parameters
----------
base_url : string
Optional URL for request submission. Defaults to the URL of
the GetTile operation as declared in the GetCapabilities
response.
layer : string
Content layer name.
style : string
Optional style name. Defaults to the first style defined for
the relevant layer in the GetCapabilities response.
format : string
Optional output image format, such as 'image/jpeg'.
Defaults to the first format defined for the relevant layer
in the GetCapabilities response.
tilematrixset : string
Optional name of tile matrix set to use.
Defaults to the first tile matrix set defined for the
relevant layer in the GetCapabilities response.
tilematrix : string
Name of the tile matrix to use.
row : integer
Row index of tile to request.
column : integer
Column index of tile to request.
**kwargs : extra arguments
anything else e.g. vendor specific parameters
Example
-------
>>> url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
>>> wmts = WebMapTileService(url)
>>> img = wmts.gettile(layer='VIIRS_CityLights_2012',\
tilematrixset='EPSG4326_500m',\
tilematrix='6',\
row=4, column=4)
>>> out = open('tile.jpg', 'wb')
>>> bytes_written = out.write(img.read())
>>> out.close()
"""
vendor_kwargs = self.vendor_kwargs or {}
vendor_kwargs.update(kwargs)
# REST only WMTS
if self.restonly:
resurl = self.buildTileResource(
layer, style, format, tilematrixset, tilematrix,
row, column, **vendor_kwargs)
u = openURL(resurl, username=self.username, password=self.password)
return u
# KVP implemetation
data = self.buildTileRequest(layer, style, format, tilematrixset,
tilematrix, row, column, **vendor_kwargs)
if base_url is None:
base_url = self.url
try:
methods = self.getOperationByName('GetTile').methods
get_verbs = [x for x in methods
if x.get('type').lower() == 'get']
if len(get_verbs) > 1:
# Filter by constraints
base_url = next(
x for x in filter(
list,
([pv.get('url')
for const in pv.get('constraints')
if 'kvp' in [x.lower() for x in const.values]]
for pv in get_verbs if pv.get('constraints'))))[0]
elif len(get_verbs) == 1:
base_url = get_verbs[0].get('url')
except StopIteration:
pass
u = openURL(base_url, data, username=self.username,
password=self.password)
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = six.text_type(se_tree.find('ServiceException').text)
raise ServiceException(err_message.strip(), se_xml)
return u
def getServiceXML(self):
xml = None
if self._capabilities is not None:
xml = etree.tostring(self._capabilities)
return xml
def getfeatureinfo(self):
raise NotImplementedError
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class TileMatrixSet(object):
'''Holds one TileMatrixSet'''
def __init__(self, elem):
if elem.tag != _TILE_MATRIX_SET_TAG:
raise ValueError('%s should be a TileMatrixSet' % (elem,))
self.identifier = testXMLValue(elem.find(_IDENTIFIER_TAG)).strip()
self.crs = testXMLValue(elem.find(_SUPPORTED_CRS_TAG)).strip()
if self.crs is None or self.identifier is None:
raise ValueError('%s incomplete TileMatrixSet' % (elem,))
self.tilematrix = {}
for tilematrix in elem.findall(_TILE_MATRIX_TAG):
tm = TileMatrix(tilematrix)
if tm.identifier:
if tm.identifier in self.tilematrix:
raise KeyError('TileMatrix with identifier "%s" '
'already exists' % tm.identifier)
self.tilematrix[tm.identifier] = tm
class TileMatrix(object):
'''Holds one TileMatrix'''
def __init__(self, elem):
if elem.tag != _TILE_MATRIX_TAG:
raise ValueError('%s should be a TileMatrix' % (elem,))
self.identifier = testXMLValue(elem.find(_IDENTIFIER_TAG)).strip()
sd = testXMLValue(elem.find(_SCALE_DENOMINATOR_TAG))
if sd is None:
raise ValueError('%s is missing ScaleDenominator' % (elem,))
self.scaledenominator = float(sd)
tl = testXMLValue(elem.find(_TOP_LEFT_CORNER_TAG))
if tl is None:
raise ValueError('%s is missing TopLeftCorner' % (elem,))
(lon, lat) = tl.split(" ")
self.topleftcorner = (float(lon), float(lat))
width = testXMLValue(elem.find(_TILE_WIDTH_TAG))
height = testXMLValue(elem.find(_TILE_HEIGHT_TAG))
if (width is None) or (height is None):
msg = '%s is missing TileWidth and/or TileHeight' % (elem,)
raise ValueError(msg)
self.tilewidth = int(width)
self.tileheight = int(height)
mw = testXMLValue(elem.find(_MATRIX_WIDTH_TAG))
mh = testXMLValue(elem.find(_MATRIX_HEIGHT_TAG))
if (mw is None) or (mh is None):
msg = '%s is missing MatrixWidth and/or MatrixHeight' % (elem,)
raise ValueError(msg)
self.matrixwidth = int(mw)
self.matrixheight = int(mh)
class Theme:
"""
Abstraction for a WMTS theme
"""
def __init__(self, elem):
if elem.tag != _THEME_TAG:
raise ValueError('%s should be a Theme' % (elem,))
self.identifier = testXMLValue(elem.find(_IDENTIFIER_TAG)).strip()
title = testXMLValue(elem.find(_TITLE_TAG))
if title is not None:
self.title = title.strip()
else:
self.title = None
abstract = testXMLValue(elem.find(_ABSTRACT_TAG))
if abstract is not None:
self.abstract = abstract.strip()
else:
self.abstract = None
self.layerRefs = []
layerRefs = elem.findall(_LAYER_REF_TAG)
for layerRef in layerRefs:
if layerRef.text is not None:
self.layerRefs.append(layerRef.text)
class TileMatrixLimits(object):
"""
Represents a WMTS TileMatrixLimits element.
"""
def __init__(self, elem):
if elem.tag != _TILE_MATRIX_LIMITS_TAG:
raise ValueError('%s should be a TileMatrixLimits' % elem)
tm = elem.find(_TILE_MATRIX_TAG)
if tm is None:
raise ValueError('Missing TileMatrix in %s' % elem)
self.tilematrix = tm.text.strip()
self.mintilerow = getXMLInteger(elem, _MIN_TILE_ROW_TAG)
self.maxtilerow = getXMLInteger(elem, _MAX_TILE_ROW_TAG)
self.mintilecol = getXMLInteger(elem, _MIN_TILE_COL_TAG)
self.maxtilecol = getXMLInteger(elem, _MAX_TILE_COL_TAG)
def __repr__(self):
fmt = ('<TileMatrixLimits: {self.tilematrix}'
', minRow={self.mintilerow}, maxRow={self.maxtilerow}'
', minCol={self.mintilecol}, maxCol={self.maxtilecol}>')
return fmt.format(self=self)
class TileMatrixSetLink(object):
"""
Represents a WMTS TileMatrixSetLink element.
"""
@staticmethod
def from_elements(link_elements):
"""
Return a list of TileMatrixSetLink instances derived from the
given list of <TileMatrixSetLink> XML elements.
"""
# NB. The WMTS spec is contradictory re. the multiplicity
# relationships between Layer and TileMatrixSetLink, and
# TileMatrixSetLink and tileMatrixSet (URI).
# Try to figure out which model has been used by the server.
links = []
for link_element in link_elements:
matrix_set_elements = link_element.findall(_TILE_MATRIX_SET_TAG)
if len(matrix_set_elements) == 0:
raise ValueError('Missing TileMatrixSet in %s' % link_element)
elif len(matrix_set_elements) > 1:
set_limits_elements = link_element.findall(
_TILE_MATRIX_SET_LIMITS_TAG)
if set_limits_elements:
raise ValueError('Multiple instances of TileMatrixSet'
' plus TileMatrixSetLimits in %s' %
link_element)
for matrix_set_element in matrix_set_elements:
uri = matrix_set_element.text.strip()
links.append(TileMatrixSetLink(uri))
else:
uri = matrix_set_elements[0].text.strip()
tilematrixlimits = {}
path = '%s/%s' % (_TILE_MATRIX_SET_LIMITS_TAG,
_TILE_MATRIX_LIMITS_TAG)
for limits_element in link_element.findall(path):
tml = TileMatrixLimits(limits_element)
if tml.tilematrix:
if tml.tilematrix in tilematrixlimits:
msg = ('TileMatrixLimits with tileMatrix "%s" '
'already exists' % tml.tilematrix)
raise KeyError(msg)
tilematrixlimits[tml.tilematrix] = tml
links.append(TileMatrixSetLink(uri, tilematrixlimits))
return links
def __init__(self, tilematrixset, tilematrixlimits=None):
self.tilematrixset = tilematrixset
if tilematrixlimits is None:
self.tilematrixlimits = {}
else:
self.tilematrixlimits = tilematrixlimits
def __repr__(self):
fmt = ('<TileMatrixSetLink: {self.tilematrixset}'
', tilematrixlimits={{...}}>')
return fmt.format(self=self)
class ContentMetadata:
"""
Abstraction for WMTS layer metadata.
Implements IContentMetadata.
"""
def __init__(self, elem, parent=None, index=0,
parse_remote_metadata=False):
if elem.tag != _LAYER_TAG:
raise ValueError('%s should be a Layer' % (elem,))
self.parent = parent
if parent:
self.index = "%s.%d" % (parent.index, index)
else:
self.index = str(index)
self.id = self.name = testXMLValue(elem.find(_IDENTIFIER_TAG))
# title is mandatory property
self.title = None
title = testXMLValue(elem.find(_TITLE_TAG))
if title is not None:
self.title = title.strip()
self.abstract = testXMLValue(elem.find(_ABSTRACT_TAG))
# bboxes
b = elem.find(_WGS84_BOUNDING_BOX_TAG)
self.boundingBox = None
if b is not None:
lc = b.find(_LOWER_CORNER_TAG)
uc = b.find(_UPPER_CORNER_TAG)
ll = [float(s) for s in lc.text.split()]
ur = [float(s) for s in uc.text.split()]
self.boundingBoxWGS84 = (ll[0], ll[1], ur[0], ur[1])
# TODO: there is probably some more logic here, and it should
# probably be shared code
self._tilematrixsets = [f.text.strip() for f in
elem.findall(_TILE_MATRIX_SET_LINK_TAG + '/' +
_TILE_MATRIX_SET_TAG)]
link_elements = elem.findall(_TILE_MATRIX_SET_LINK_TAG)
tile_matrix_set_links = TileMatrixSetLink.from_elements(link_elements)
self.tilematrixsetlinks = {}
for tmsl in tile_matrix_set_links:
if tmsl.tilematrixset:
if tmsl.tilematrixset in self.tilematrixsetlinks:
raise KeyError('TileMatrixSetLink with tilematrixset "%s"'
' already exists' %
tmsl.tilematrixset)
self.tilematrixsetlinks[tmsl.tilematrixset] = tmsl
self.resourceURLs = []
for resourceURL in elem.findall(_RESOURCE_URL_TAG):
resource = {}
for attrib in ['format', 'resourceType', 'template']:
resource[attrib] = resourceURL.attrib[attrib]
self.resourceURLs.append(resource)
# Styles
self.styles = {}
for s in elem.findall(_STYLE_TAG):
style = {}
isdefaulttext = s.attrib.get('isDefault')
style['isDefault'] = (isdefaulttext == "true")
identifier = s.find(_IDENTIFIER_TAG)
if identifier is None:
raise ValueError('%s missing identifier' % (s,))
title = s.find(_TITLE_TAG)
if title is not None:
style['title'] = title.text
self.styles[identifier.text] = style
self.formats = [f.text for f in elem.findall(_FORMAT_TAG)]
self.keywords = [f.text for f in elem.findall(
_KEYWORDS_TAG+'/'+_KEYWORD_TAG)]
self.infoformats = [f.text for f in elem.findall(_INFO_FORMAT_TAG)]
self.layers = []
for child in elem.findall(_LAYER_TAG):
self.layers.append(ContentMetadata(child, self))
@property
def tilematrixsets(self):
# NB. This attribute has been superseeded by the
# `tilematrixsetlinks` attribute defined below, but is included
# for now to provide continuity.
warnings.warn("The 'tilematrixsets' attribute has been deprecated"
" and will be removed in a future version of OWSLib."
" Please use 'tilematrixsetlinks' instead.")
return self._tilematrixsets
def __str__(self):
return 'Layer Name: %s Title: %s' % (self.name, self.title)
class WMTSCapabilitiesReader:
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.0.0', url=None, un=None, pw=None):
"""Initialize"""
self.version = version
self._infoset = None
self.url = url
self.username = un
self.password = pw
def capabilities_url(self, service_url, vendor_kwargs=None):
"""Return a capabilities url
"""
# Ensure the 'service', 'request', and 'version' parameters,
# and any vendor-specific parameters are included in the URL.
pieces = urlparse(service_url)
args = parse_qs(pieces.query)
if 'service' not in args:
args['service'] = 'WMTS'
if 'request' not in args:
args['request'] = 'GetCapabilities'
if 'version' not in args:
args['version'] = self.version
if vendor_kwargs:
args.update(vendor_kwargs)
query = urlencode(args, doseq=True)
pieces = ParseResult(pieces.scheme, pieces.netloc,
pieces.path, pieces.params,
query, pieces.fragment)
return urlunparse(pieces)
def read(self, service_url, vendor_kwargs=None):
"""Get and parse a WMTS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
version, and request parameters. Optional vendor-specific
parameters can also be supplied as a dict.
"""
getcaprequest = self.capabilities_url(service_url, vendor_kwargs)
# now split it up again to use the generic openURL function...
spliturl = getcaprequest.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get',
username=self.username, password=self.password)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WMTS capabilities document, returning an elementtree instance
string should be an XML capabilities document
"""
if not isinstance(st, str) and not isinstance(st, bytes):
msg = 'String must be of type string or bytes, not %s' % type(st)
raise ValueError(msg)
return etree.fromstring(st)
| bsd-3-clause | -384,863,083,138,570,700 | 37.690621 | 80 | 0.577459 | false |
stevenmirabito/GoalSentry | api/goalsentry/__init__.py | 1 | 5717 | """
Goal Sentry API
Main Application
"""
from flask import Flask, request
from database import session as db
from json import dumps as jsonify
from utilities import row2dict
import authentication
import models
app = Flask(__name__)
@app.route("/")
def hello():
return "<h1>Goal Sentry API</h1>"
"""
Standard Return Format Helpers
"""
def return_user(user):
user_dict = row2dict(user)
scores_list = []
scores = models.Score.query.filter_by(user_id=user.id)
for score in scores:
scores_list.append(row2dict(score))
user_dict["scores"] = scores_list
return user_dict
def return_game(game):
game_dict = row2dict(game)
game_dict["completed"] = (not not game.time_completed)
scores_list = []
for score in game_dict.scores:
scores_list.append(row2dict(score))
return game_dict
def return_table(table):
table_dict = row2dict(table)
last_game = models.Game.query.filter_by(table_id=table.id).order_by(models.Game.time_started.desc()).first()
if not last_game.time_completed:
# Game is still in progress
table_dict["in_use"] = True
table_dict["game"] = row2dict(last_game)
else:
table_dict["in_use"] = False
return table_dict
def return_success():
return {
"status": {
"success": True
}
}
def return_error(e):
return {
"status": {
"success": False,
"message": e.message
}
}
"""
User Routes
"""
@app.route("/users", methods=["GET"])
def get_all_users():
users = []
for user in models.User.query.all():
user_dict = row2dict(user)
users.append(user_dict)
return jsonify(users)
@app.route("/user/<user_id>", methods=["GET"])
def get_user_by_id(user_id):
try:
user = models.User.query.filter_by(id=user_id).first()
response = return_user(user)
except Exception as e:
response = return_error(e)
return jsonify(response)
@app.route("/users", methods=["POST"])
def register_user(user_data=None):
if not user_data:
request.get_json(force=True)["user"]
try:
user = models.User(username=user_data["username"], name=user_data["name"], email=user_data["email"])
db.add(user)
db.commit()
response = return_success()
except Exception as e:
response = return_error(e)
return jsonify(response)
"""
Game Routes
"""
@app.route("/games", methods=["GET"])
def get_all_games():
games = []
for game in models.Game.query.all():
game_dict = row2dict(game)
games.append(game_dict)
return jsonify(games)
@app.route("/game/<game_id>", methods=["GET"])
def get_game_by_id(game_id):
try:
game = models.Game.query.filter_by(id=game_id).first()
response = row2dict(game)
except Exception as e:
response = return_error(e)
return jsonify(response)
@app.route("/games", methods=["POST"])
def new_game():
data = request.get_json(force=True)
try:
game = models.Game(table_id=data["game"]["table_id"])
db.add(game)
db.commit()
response = return_success()
except Exception as e:
response = return_error(e)
return jsonify(response)
@app.route("/game/<game_id>/authenticate", methods=["POST"])
def authenticate_to_game(game_id):
data = request.get_json(force=True)
try:
auth = authentication.Authentication()
user_data = auth.user_from_identifier(data["authenticate"]["identifier"])
users = models.User.query.filter_by(username=user_data["username"])
if users.length > 0:
# User is already registered
user_id = users.first().id
else:
# User is not registered, register them
register_user(user_data=user_data)
user_id = models.User.query.filter_by(username=user_data["username"]).first().id
game = models.Game.query.filter_by(id=game_id).first()
score = models.Score(user_id=user_id, game_id=game.id)
db.add(score)
db.commit()
response = return_success()
except Exception as e:
response = return_error(e)
return jsonify(response)
@app.route("/game/<game_id>", methods=["DELETE"])
def delete_game(game_id):
try:
game = models.Game.query.filter_by(id=game_id).first()
db.delete(game)
for score in models.Score.query.filter_by(game_id=game_id):
db.delete(score)
db.commit()
response = {
"status": {
"success": True
}
}
except Exception as e:
response = return_error(e)
return jsonify(response)
"""
Table Routes
"""
@app.route("/tables", methods=["GET"])
def get_all_tables():
tables = []
for table in models.Table.query.all():
tables.append(return_table(table))
return jsonify(tables)
@app.route("/tables", methods=["POST"])
def new_table():
data = request.get_json(force=True)
try:
table = models.Table(name=data["table"]["name"])
db.add(table)
db.commit()
response = return_success()
except Exception as e:
response = return_error(e)
return jsonify(response)
@app.route("/table/<table_id>", methods=["GET"])
def get_table_by_id(table_id):
try:
table = models.Table.query.filter_by(id=table_id).first()
response = return_table(table)
except Exception as e:
response = return_error(e)
return jsonify(response)
@app.teardown_appcontext
def shutdown_session(exception=None):
db.remove()
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| mit | 2,658,503,353,929,986,000 | 21.245136 | 112 | 0.603463 | false |
recurser/trac-gantt-calendar | ganttcalendar/ticketcalendar.py | 1 | 5253 | import re, calendar, time
from datetime import datetime, date, timedelta
from genshi.builder import tag
from trac.core import *
from trac.web import IRequestHandler
from trac.web.chrome import INavigationContributor, ITemplateProvider
from trac.util.datefmt import to_datetime, utc
class TicketCalendarPlugin(Component):
implements(INavigationContributor, IRequestHandler, ITemplateProvider)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'ticketcalendar'
def get_navigation_items(self, req):
if req.perm.has_permission('TICKET_VIEW'):
yield ('mainnav', 'ticketcalendar',
tag.a('Calendar', href=req.href.ticketcalendar()))
# IRequestHandler methods
def match_request(self, req):
return re.match(r'/ticketcalendar(?:_trac)?(?:/.*)?$', req.path_info)
def calendarRange(self, y, m):
w,mdays = calendar.monthrange(y,m)
w = (w + 1) % 7
firstDay = date(y,m,1)-timedelta(days=w)
lastDay = date(y,m,mdays)
w = (lastDay.weekday()+1)%7
lastDay = lastDay + timedelta(days=(6-w))
return firstDay, lastDay
def dateToString(self, dt):
m = dt.month
if m < 10:
m = '0'+str(m)
d = dt.day
if d < 10:
d = '0'+str(d)
return str(dt.year)+"/"+str(m)+"/"+str(d)
def process_request(self, req):
ymonth = req.args.get('month')
yyear = req.args.get('year')
show_my_ticket = req.args.get('show_my_ticket')
selected_milestone = req.args.get('selected_milestone')
cday = date.today()
if not (not ymonth or not yyear):
cday = date(int(yyear),int(ymonth),1)
# cal next month
nm = cday.month + 1
ny = cday.year
if nm > 12:
ny = ny + 1
nm = 1
nmonth = datetime(ny,nm,1)
# cal previous month
pm = cday.month - 1
py = cday.year
if pm < 1:
py = py -1
pm = 12
pmonth = date(py,pm,1)
first,last = self.calendarRange(cday.year, cday.month)
# process ticket
db = self.env.get_db_cnx()
cursor = db.cursor();
my_ticket_sql = ""
self.log.debug("myticket")
self.log.debug(show_my_ticket)
if show_my_ticket=="on":
my_ticket_sql = "AND owner = '" + req.authname + "'"
selected_milestone_sql = ""
if selected_milestone != None and selected_milestone != "":
selected_milestone_sql = "AND milestone = '" + selected_milestone + "'"
sql = ("SELECT id, type, summary, owner, description, status, a.value, c.value from ticket t "
"JOIN ticket_custom a ON a.ticket = t.id AND a.name = 'due_assign' "
"JOIN ticket_custom c ON c.ticket = t.id AND c.name = 'due_close' "
"WHERE ((a.value > '%s' AND a.value < '%s' ) "
" OR (c.value > '%s' AND c.value < '%s')) %s %s" %
(self.dateToString(first),
self.dateToString(last),
self.dateToString(first),
self.dateToString(last),
my_ticket_sql,
selected_milestone_sql))
self.log.debug(sql)
cursor.execute(sql)
tickets=[]
for id, type, summary, owner, description, status, due_assign, due_close in cursor:
due_assign_date = None
due_close_date = None
try:
t = time.strptime(due_assign,"%Y/%m/%d")
due_assign_date = date(t[0],t[1],t[2])
except ValueError, TypeError:
None
try:
t = time.strptime(due_close,"%Y/%m/%d")
due_close_date = date(t[0],t[1],t[2])
except ValueError, TypeError:
None
ticket = {'id':id, 'type':type, 'summary':summary, 'owner':owner, 'description': description, 'status':status, 'due_assign':due_assign_date, 'due_close':due_close_date}
tickets.append(ticket)
# get roadmap
sql = ("SELECT name, due, completed, description from milestone")
self.log.debug(sql)
cursor.execute(sql)
milestones = [""]
for name, due, completed, description in cursor:
if due!=0:
due_time = to_datetime(due, utc)
due_date = date(due_time.year, due_time.month, due_time.day)
milestone = {'name':name, 'due':due_date, 'completed':completed != 0,'description':description}
milestones.append(milestone)
data = {'current':cday, 'prev':pmonth, 'next':nmonth, 'first':first, 'last':last, 'tickets':tickets, 'milestones':milestones,
'show_my_ticket': show_my_ticket, 'selected_milestone': selected_milestone}
return 'calendar.html', data, None
def get_templates_dirs(self):
from pkg_resources import resource_filename
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('tc', resource_filename(__name__, 'htdocs'))]
| bsd-3-clause | 5,199,380,876,074,111,000 | 37.065217 | 179 | 0.555111 | false |
fschulze/pytest-warnings | tests/test_warnings.py | 1 | 2488 | import pytest
import warnings
from pytest_warnings import _setoption
from helper_test_a import deprecated_a
from helper_test_b import user_warning_b
def test_warnings():
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
def test_warnings1():
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
warnings.warn("Foo", DeprecationWarning)
def test_warn():
with pytest.warns(DeprecationWarning):
warnings.warn("Bar", DeprecationWarning)
# This section test the ability to filter selectively warnings using regular
# expressions on messages.
def test_filters_setoption():
"A alone works"
with pytest.warns(DeprecationWarning):
deprecated_a()
def test_filters_setoption_2():
"B alone works"
with pytest.warns(UserWarning) as record:
user_warning_b()
assert len(record) == 1
def test_filters_setoption_3():
"A and B works"
with pytest.warns(None) as record:
user_warning_b()
deprecated_a()
assert len(record) == 2
def test_filters_setoption_4():
"A works, B is filtered"
with pytest.warns(None) as record:
_setoption(warnings, 'ignore:.*message_a.*')
deprecated_a()
user_warning_b()
assert len(record) == 1, "Only `A` should be filtered out"
def test_filters_setoption_4b():
"A works, B is filtered"
with pytest.warns(None) as record:
_setoption(warnings, 'ignore:.*message_b.*')
_setoption(warnings, 'ignore:.*message_a.*')
_setoption(warnings, 'always:::.*helper_test_a.*')
deprecated_a()
user_warning_b()
assert len(record) == 1, "`A` and `B` should be visible, second filter reenable A"
def test_filters_setoption_5():
"B works, A is filtered"
with pytest.warns(None) as records:
_setoption(warnings, 'always:::.*helper_test_a.*')
_setoption(warnings, 'ignore::UserWarning')
deprecated_a()
user_warning_b()
assert len(records) == 1, "Only `B` should be filtered out"
| mit | -2,010,615,504,786,199,800 | 25.189474 | 86 | 0.668006 | false |
popazerty/openhdf-enigma2 | lib/python/Screens/MultiBootStartup.py | 1 | 16626 | from Screens.InfoBar import InfoBar
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components import Harddisk
from os import path, listdir, system
from boxbranding import getMachineBuild
class MultiBootStartup(ConfigListScreen, Screen):
skin = """
<screen name="MultiBootStartupOPT" position="center,center" size="600,250" flags="wfNoBorder" title="MultiBoot STARTUP Selector" backgroundColor="transparent">
<eLabel name="b" position="0,0" size="600,250" backgroundColor="#00ffffff" zPosition="-2" />
<eLabel name="a" position="1,1" size="598,248" backgroundColor="#00000000" zPosition="-1" />
<widget source="Title" render="Label" position="10,10" foregroundColor="#00ffffff" size="580,50" halign="center" font="Regular; 35" backgroundColor="#00000000" />
<eLabel name="line" position="1,69" size="598,1" backgroundColor="#00ffffff" zPosition="1" />
<widget source="config" render="Label" position="10,90" size="580,50" halign="center" font="Regular; 30" backgroundColor="#00000000" foregroundColor="#00ffffff" />
<widget source="options" render="Label" position="10,132" size="580,35" halign="center" font="Regular; 24" backgroundColor="#00000000" foregroundColor="#00ffffff" />
<widget name="description" position="10,170" size="580,26" font="Regular; 19" foregroundColor="#00ffffff" halign="center" backgroundColor="#00000000" valign="center" />
<ePixmap position="555,217" size="35,25" zPosition="2" pixmap="/usr/share/enigma2/skin_default/buttons/key_info.png" alphatest="blend" />
<widget source="key_red" render="Label" position="35,212" size="170,30" noWrap="1" zPosition="1" valign="center" font="Regular; 20" halign="left" backgroundColor="#00000000" foregroundColor="#00ffffff" />
<widget source="key_green" render="Label" position="228,212" size="170,30" noWrap="1" zPosition="1" valign="center" font="Regular; 20" halign="left" backgroundColor="#00000000" foregroundColor="#00ffffff" />
<widget source="key_yellow" render="Label" position="421,212" size="170,30" noWrap="1" zPosition="1" valign="center" font="Regular; 20" halign="left" backgroundColor="#00000000" foregroundColor="#00ffffff" />
<eLabel position="25,209" size="6,40" backgroundColor="#00e61700" />
<eLabel position="216,209" size="6,40" backgroundColor="#0061e500" />
<eLabel position="407,209" size="6,40" backgroundColor="#00e5b243" />
</screen>
"""
def __init__(self, session):
Screen.__init__(self, session)
self.title = _("MultiBoot Selector")
self.skinName = ["MultiBootStartupOPT"]
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] = StaticText(_("Rename"))
self["config"] = StaticText()
self["options"] = StaticText()
self["description"] = Label()
self["actions"] = ActionMap(["WizardActions", "SetupActions", "ColorActions"],
{
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down,
"green": self.save,
"red": self.cancel,
"yellow": self.rename,
"cancel": self.cancel,
"ok": self.save,
"info": self.info,
}, -2)
self.getCurrent()
self.onLayoutFinish.append(self.layoutFinished)
def info(self):
message = (
#message 0
_("*** boxmode=1 (Standard) ***\n\n" +
"+++ Features +++\n" +
"3840x2160p60 10-bit HEVC, 3840x2160p60 8-bit VP9, 1920x1080p60 8-bit AVC,\nMAIN only (no PIP), Limited display usages, UHD only (no SD),\nNo multi-PIP, No transcoding\n\n" +
"--- Restrictions ---\n" +
"Decoder 0: 3840x2160p60 10-bit HEVC, 3840x2160p60 8-bit VP9, 1920x1080p60 8-bit AVC\n" +
"OSD Grafic 0: 1080p60 32 bit ARGB\n" +
"Display 0 Encode Restrictions: 3840x2160p60 12-bit 4:2:0 (HDMI),\n3840x2160p60 12-bit 4:2:2 (HDMI), 3840x2160p60 8-bit 4:4:4 (HDMI),\n1920x1080p60 (component), Only one display format at a time\n\n" +
"If you want 1080p60 component, HDMI also needs to be 1080p60."),
#message 1
_("*** boxmode=12 (Experimental) ***\n\n" +
"+++ Features +++\n" +
"3840x2160p50 10-bit decode for MAIN, 1080p25/50i PIP support, HDMI input (if available),\n UHD display only, No SD display, No transcoding\n\n" +
"--- Restrictions ---\n" +
"Decoder 0: 3840x2160p50 10-bit HEVC, 3840x2160p50 8-bit VP9,\n1920x1080p50 8-bit AVC/MPEG\n" +
"Decoder 1: 1920x1080p25/50i 10-bit HEVC, 1920x1080p25/50i 8-bit VP9/AVC/MPEG2,\nHDMI In (if available), 3840x2160p50\n" +
"OSD Graphic 0 (UHD): 1080p50 32-bit ARGB\n" +
"Window 0 (MAIN/UHD): Limited display capabilities, 1080i50 10-bit de-interlacing\n" +
"Multi-PIP mode (3x): Enigma2 supported no multi-PIP\n" +
"Window 1 (PIP/UHD) (Enigma2 PIP Mode): Up to 1/2 x 1/2 screen display, 576i50 de-interlacing\n" +
"Display 0 (UHD) Encode Restrictions: 3840x2160p50"),
#message 2
_("placeholder message 2"),
)
if not self.option_enabled:
idx = 0
blv = ''
for x in self.bootloaderList:
if idx: blv += ', '
blv += x
idx += 1
message = (_("Your box needs Bootloaderversion(s)\n\n%s\n\nto make compatible with Bootoptions!")%blv,)
self.session.open(MessageBox, message[self.option], MessageBox.TYPE_INFO)
def rename(self):
self.oldname = self.list[self.selection]
if self.oldname:
self.session.openWithCallback(self.renameCB, VirtualKeyBoard, title=_("Please enter new name:"), text=self.oldname)
def renameCB(self, newname):
if newname and newname != 'bootname' and newname != self.oldname:
if not path.exists('/boot/%s' %newname) and path.isfile('/boot/%s' %self.oldname):
ret = system("mv -fn '/boot/%s' '/boot/%s'" %(self.oldname,newname))
if ret:
self.session.open(MessageBox, _('Rename failed!'), MessageBox.TYPE_ERROR)
else:
bootname = self.readlineFile('/boot/bootname').split('=')
if len(bootname) == 2 and bootname[1] == self.oldname:
self.writeFile('/boot/bootname', '%s=%s' %(bootname[0],newname))
self.getCurrent()
return
elif self.bootname == self.oldname:
self.getCurrent()
return
self.list[self.selection] = newname
self["config"].setText(_("Select Image: %s") %newname)
else:
if not path.exists('/boot/%s' %self.oldname):
self.getCurrent()
txt = _("File not found - rename failed!")
else:
txt = _("Name already exists - rename failed!")
self.session.open(MessageBox, txt, MessageBox.TYPE_ERROR)
def writeFile(self, FILE, DATA):
try:
f = open(FILE, 'w')
f.write(DATA)
f.close()
return True
except IOError:
print "[MultiBootStartup] write error file: %s" %FILE
return False
def readlineFile(self, FILE):
data = ''
if path.isfile(FILE):
f = open(FILE, 'r')
data = f.readline().replace('\n', '')
f.close()
return data
def getCurrent(self):
'''
#default
Image 1: boot emmcflash0.kernel1 'root=/dev/mmcblk0p3 rw rootwait'
Image 2: boot emmcflash0.kernel2 'root=/dev/mmcblk0p5 rw rootwait'
Image 3: boot emmcflash0.kernel3 'root=/dev/mmcblk0p7 rw rootwait'
Image 4: boot emmcflash0.kernel4 'root=/dev/mmcblk0p9 rw rootwait'
#options
Standard: hd51_4.boxmode=1 (or no option)
Experimental: hd51_4.boxmode=12
#example
boot emmcflash0.kernel1 'root=/dev/mmcblk0p3 rw rootwait hd51_4.boxmode=12'
'''
self.optionsList = (('boxmode=1', _('2160p60 without PiP (Standard)')), ('boxmode=12', _('2160p50 with PiP (Experimental)')))
self.bootloaderList = ('v1.07-r19',)
#for compatibility to old or other images set 'self.enable_bootnamefile = False'
#if 'False' and more as on file with same kernel exist is possible no exact matching found (only to display)
self.enable_bootnamefile = False
if not self.enable_bootnamefile and path.isfile('/boot/bootname'):
system("rm -f /boot/bootname")
self.list = self.list_files("/boot")
self.option_enabled = self.readlineFile('/sys/firmware/devicetree/base/bolt/tag').replace('\x00', '') in self.bootloaderList
boot = self.readlineFile('/boot/STARTUP')
bootname = self.readlineFile('/boot/bootname').split('=')
self.selection = None
self.option = 0
#read name from bootname file
if len(bootname) == 2:
idx = 0
for x in self.list:
if x == bootname[1]:
self.selection = idx
bootname = x
break
idx += 1
if self.selection is None:
idx = 0
for x in self.list:
if x == bootname[0]:
self.selection = idx
bootname = x
break
idx += 1
#verify bootname
if bootname in self.list:
line = self.readlineFile('/boot/%s' %bootname)
if line[22:23] != boot[22:23]:
self.selection = None
else:
self.selection = None
#bootname searching ...
if self.selection is None:
idx = 0
for x in self.list:
line = self.readlineFile('/boot/%s' %x)
if line[22:23] == boot[22:23]:
bootname = x
self.selection = idx
break
idx += 1
#bootname not found
if self.selection is None:
bootname = _('unknown')
self.selection = 0
self.bootname = bootname
#read current boxmode
try:
bootmode = boot.split('rootwait',1)[1].split('boxmode',1)[1].split("'",1)[0].split('=',1)[1].replace(' ','')
except IndexError:
bootmode = ""
#find and verify current boxmode
if self.option_enabled:
idx = 0
for x in self.optionsList:
if bootmode and bootmode == x[0].split('=')[1]:
self.option = idx
break
elif x[0] + "'" in boot or x[0] + " " in boot:
self.option = idx
break
idx += 1
if bootmode and bootmode != self.optionsList[self.option][0].split('=')[1]:
bootoption = ', boxmode=' + bootmode + _(" (unknown mode)")
elif self.option_enabled:
bootoption = ', ' + self.optionsList[self.option][0]
else:
bootoption = ''
try:
image = 'Image %s' %(int(boot[22:23]))
except:
image = _("Unable to read image number")
self.startup()
self.startup_option()
self["description"].setText(_("Current Bootsettings: %s (%s)%s") %(bootname,image,bootoption))
def layoutFinished(self):
self.setTitle(self.title)
def startup_option(self):
if self.option_enabled:
self["options"].setText(_("Select Bootoption: %s") %self.optionsList[self.option][1])
elif 'up' in self["actions"].actions:
self["options"].setText(_("Select Bootoption: not supported - see info"))
del self["actions"].actions['up']
del self["actions"].actions['down']
def startup(self):
if len(self.list):
self["config"].setText(_("Select Image: %s") %self.list[self.selection])
elif 'left' in self["actions"].actions:
self["config"].setText(_("Select Image: %s") %_("no image found"))
del self["actions"].actions['left']
del self["actions"].actions['right']
del self["actions"].actions['green']
del self["actions"].actions['yellow']
del self["actions"].actions['ok']
def checkBootEntry(self, ENTRY):
try:
ret = False
temp = ENTRY.split(' ')
#read kernel, root as number and device name
kernel = int(temp[1].split("emmcflash0.kernel")[1])
root = int(temp[2].split("'root=/dev/mmcblk0p")[1])
device = temp[2].split("=")[1]
#read boxmode and new boxmode settings
cmdx = 5
cmd4 = "rootwait'"
bootmode = '1'
if 'boxmode' in ENTRY:
cmdx = 6
cmd4 = "rootwait"
bootmode = temp[5].split("%s_4.boxmode=")[1].replace("'",'') %getMachineBuild()
setmode = self.optionsList[self.option][0].split('=')[1]
#verify entries
if cmdx != len(temp) or 'boot' != temp[0] or 'rw' != temp[3] or cmd4 != temp[4] or kernel != root-kernel-1 or "'" != ENTRY[-1:]:
print "[MultiBootStartup] Command line in '/boot/STARTUP' - problem with not matching entries!"
ret = True
#verify length
elif ('boxmode' not in ENTRY and len(ENTRY) > 58) or ('boxmode' in ENTRY and len(ENTRY) > 76):
print "[MultiBootStartup] Command line in '/boot/STARTUP' - problem with line length!"
ret = True
#verify boxmode
elif bootmode != setmode and not self.option_enabled:
print "[MultiBootStartup] Command line in '/boot/STARTUP' - problem with unsupported boxmode!"
ret = True
#verify device
elif not device in Harddisk.getextdevices("ext4"):
print "[MultiBootStartup] Command line in '/boot/STARTUP' - boot device not exist!"
ret = True
except:
print "[MultiBootStartup] Command line in '/boot/STARTUP' - unknown problem!"
ret = True
return ret
def save(self):
print "[MultiBootStartup] select new startup: ", self.list[self.selection]
ret = system("cp -f '/boot/%s' /boot/STARTUP" %self.list[self.selection])
if ret:
self.session.open(MessageBox, _("File '/boot/%s' copy to '/boot/STARTUP' failed!") %self.list[self.selection], MessageBox.TYPE_ERROR)
self.getCurrent()
return
writeoption = already = failboot = False
newboot = boot = self.readlineFile('/boot/STARTUP')
if self.checkBootEntry(boot):
failboot = True
elif self.option_enabled:
for x in self.optionsList:
if (x[0] + "'" in boot or x[0] + " " in boot) and x[0] != self.optionsList[self.option][0]:
newboot = boot.replace(x[0],self.optionsList[self.option][0])
writeoption = True
break
elif (x[0] + "'" in boot or x[0] + " " in boot) and x[0] == self.optionsList[self.option][0]:
already = True
break
if not (writeoption or already):
if "boxmode" in boot:
failboot = True
elif self.option:
newboot = boot.replace("rootwait", "rootwait %s_4.%s" %(getMachineBuild(), self.optionsList[self.option][0]))
writeoption = True
if self.enable_bootnamefile:
if failboot:
self.writeFile('/boot/bootname', 'STARTUP_1=STARTUP_1')
else:
self.writeFile('/boot/bootname', '%s=%s' %('STARTUP_%s' %getMachineBuild() ,boot[22:23], self.list[self.selection]))
message = _("Do you want to reboot now with selected image?")
if failboot:
print "[MultiBootStartup] wrong bootsettings: " + boot
if '/dev/mmcblk0p3' in Harddisk.getextdevices("ext4"):
if self.writeFile('/boot/STARTUP', "boot emmcflash0.kernel1 'root=/dev/mmcblk0p3 rw rootwait'"):
txt = _("Next boot will start from Image 1.")
else:
txt =_("Can not repair file %s") %("'/boot/STARTUP'") + "\n" + _("Caution, next boot is starts with these settings!") + "\n"
else:
txt = _("Alternative Image 1 partition for boot repair not found.") + "\n" + _("Caution, next boot is starts with these settings!") + "\n"
message = _("Wrong Bootsettings detected!") + "\n\n%s\n\n%s\n" %(boot, txt) + _("Do you want to reboot now?")
elif writeoption:
if not self.writeFile('/boot/STARTUP', newboot):
txt = _("Can not write file %s") %("'/boot/STARTUP'") + "\n" + _("Caution, next boot is starts with these settings!") + "\n"
message = _("Write error!") + "\n\n%s\n\n%s\n" %(boot, txt) + _("Do you want to reboot now?")
#verify boot
if failboot or writeoption:
boot = self.readlineFile('/boot/STARTUP')
if self.checkBootEntry(boot):
txt = _("Error in file %s") %("'/boot/STARTUP'") + "\n" + _("Caution, next boot is starts with these settings!") + "\n"
message = _("Command line error!") + "\n\n%s\n\n%s\n" %(boot, txt) + _("Do you want to reboot now?")
self.session.openWithCallback(self.restartBOX,MessageBox, message, MessageBox.TYPE_YESNO)
def cancel(self):
self.close()
def up(self):
self.option = self.option - 1
if self.option == -1:
self.option = len(self.optionsList) - 1
self.startup_option()
def down(self):
self.option = self.option + 1
if self.option == len(self.optionsList):
self.option = 0
self.startup_option()
def left(self):
self.selection = self.selection - 1
if self.selection == -1:
self.selection = len(self.list) - 1
self.startup()
def right(self):
self.selection = self.selection + 1
if self.selection == len(self.list):
self.selection = 0
self.startup()
def read_startup(self, FILE):
self.file = FILE
with open(FILE, 'r') as myfile:
data=myfile.read().replace('\n', '')
myfile.close()
return data
def list_files(self, PATH):
files = []
for name in listdir(PATH):
if path.isfile(path.join(PATH, name)):
try:
cmdline = self.read_startup("/boot/" + name).split("=",1)[1].split(" ",1)[0]
except IndexError:
continue
if cmdline in Harddisk.getextdevices("ext4") and not name == "STARTUP":
files.append(name)
return files
def restartBOX(self, answer):
if answer is True:
self.session.open(TryQuitMainloop, 2)
else:
self.close()
| gpl-2.0 | 6,802,009,735,831,980,000 | 38.212264 | 210 | 0.662456 | false |
alien3211/lom-web | posts/models.py | 1 | 2992 | from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.contrib.auth.models import User
from django.db.models import Q
from django.urls import reverse
from django.utils.text import slugify, mark_safe
from django.db.models.signals import pre_save
from django.utils import timezone
from markdown_deux import markdown
from taggit.managers import TaggableManager
from comments.models import Comment
from .utils import get_read_time
from .categoryModels import Category
# Create your models here.
class PostPublishedManager(models.Manager):
def published(self):
return super(PostPublishedManager, self).get_queryset().filter(status='published').filter(publish__lte=timezone.now())
def draft(self):
return super(PostPublishedManager, self).get_queryset().filter(Q(status='draft') | Q(publish__gte=timezone.now()))
class Post(models.Model):
STATUS_CHOICES = (
('draft', 'Roboczy'),
('published', 'Opublikowany'),
)
title = models.CharField(max_length=250)
slug = models.SlugField(unique=True, max_length=191)
author = models.ForeignKey(User, related_name='blog_posts')
content = models.TextField()
publish = models.DateTimeField(default=timezone.now)
read_time = models.IntegerField(default=0)
updated = models.DateTimeField(auto_now=True, auto_now_add=False)
timestamp = models.DateTimeField(auto_now=False, auto_now_add=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default='draft')
category = models.ForeignKey('posts.Category')
tags = TaggableManager()
objects = PostPublishedManager()
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('posts:detail',
args=[self.slug])
def get_markdown(self):
content = self.content
return mark_safe(markdown(content))
@property
def comments(self):
instance = self
qs = Comment.objects.filter_by_instance(instance)
return qs
@property
def get_content_type(self):
instance = self
content_type = ContentType.objects.get_for_model(instance.__class__)
return content_type
def create_slug(instance, new_slug=None):
slug = slugify(instance.title)
if new_slug is not None:
slug = new_slug
qs = Post.objects.filter(slug=slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" % (slug, qs.first().id)
return create_slug(instance, new_slug=new_slug)
return slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
if instance.content:
html_string = instance.get_markdown()
read_time = get_read_time(html_string)
instance.read_time = read_time
pre_save.connect(pre_save_post_receiver, sender=Post)
| mit | -1,002,450,773,182,722,300 | 29.845361 | 126 | 0.684158 | false |
votervoice/openstates | openstates/il/bills.py | 1 | 27275 | # -*- coding: utf-8 -*-
import re
import os
import datetime
import pytz
import scrapelib
import lxml.html
from pupa.scrape import Scraper, Bill, VoteEvent
from pupa.utils import convert_pdf
from ._utils import canonicalize_url
session_details = {
'100th-special': {
'speaker': 'Madigan',
'president': 'Cullerton',
'params': {'GA': '100', 'SessionID': '92', 'SpecSess': '1'}},
'100th': {
'speaker': 'Madigan',
'president': 'Cullerton',
'params': {'GA': '100', 'SessionId': '91'},
},
'99th': {
'speaker': 'Madigan',
'president': 'Cullerton',
'params': {'GA': '99', 'SessionId': '88'},
},
'98th': {
'speaker': 'Madigan',
'president': 'Cullerton',
'params': {'GA': '98', 'SessionId': '85'},
},
'97th': {
'params': {'GA': '97', 'SessionId': '84'},
'speaker': 'Madigan',
'president': 'Cullerton',
},
'96th': {
'params': {'GA': '96', 'SessionId': '76'},
'speaker': 'Madigan',
'president': 'Cullerton',
},
'96th-special': {
'params': {'GA': '96', 'SessionId': '82', 'SpecSess': '1'},
'speaker': 'Madigan',
'president': 'Cullerton',
},
'95th': {
'params': {'GA': '95', 'SessionId': '51'},
'speaker': 'Madigan',
'president': 'Jones, E.',
},
'95th-special': {
'params': {'GA': '95', 'SessionId': '52', 'SpecSess': '1'},
'speaker': 'Madigan',
'president': 'Jones, E.',
},
'94th': {
'params': {'GA': '94', 'SessionId': '50'},
'speaker': 'Madigan',
'president': 'Jones, E.',
},
'93rd': {
'params': {'GA': '93', 'SessionId': '3'},
'speaker': 'Madigan',
'president': 'Jones, E.',
},
'93rd-special': {
'params': {'GA': '93', 'SessionID': '14', 'SpecSess': '1'},
'speaker': 'Madigan',
'president': 'Jones, E.'
}
}
TITLE_REMOVING_PATTERN = re.compile(".*(Rep|Sen). (.+)$")
SPONSOR_REFINE_PATTERN = re.compile(r'^Added (?P<spontype>.+) (?P<title>Rep|Sen)\. (?P<name>.+)')
SPONSOR_TYPE_REFINEMENTS = {
'Chief Co-Sponsor': 'cosponsor',
'as Chief Co-Sponsor': 'cosponsor',
'Alternate Chief Co-Sponsor': 'cosponsor',
'as Alternate Chief Co-Sponsor': 'cosponsor',
'as Co-Sponsor': 'cosponsor',
'Alternate Co-Sponsor': 'cosponsor',
'as Alternate Co-Sponsor': 'cosponsor',
'Co-Sponsor': 'cosponsor',
}
VERSION_TYPES = ('Introduced', 'Engrossed', 'Enrolled', 'Re-Enrolled')
FULLTEXT_DOCUMENT_TYPES = ('Public Act', "Governor's Message", )
# not as common, but maybe should just be added to FULLTEXT_DOCUMENT_TYPES?
# Amendatory Veto Motion \d{3}
# Conference Committee Report \d{3}
DOC_TYPES = {
'B': 'bill',
'R': 'resolution',
'JR': 'joint resolution',
'JRCA': 'constitutional amendment',
}
# see http://openstates.org/categorization/
_action_classifiers = (
(re.compile(r'Amendment No. \d+ Filed'), ['amendment-introduction']),
(re.compile(r'Amendment No. \d+ Tabled'), ['amendment-failure']),
(re.compile(r'Amendment No. \d+ Adopted'), ['amendment-passage']),
(re.compile(r'(Pref|F)iled with'), ['filing']),
(re.compile(r'Arrived? in'), ['introduction']),
(re.compile(r'First Reading'), ['reading-1']),
(re.compile(r'(Recalled to )?Second Reading'), ['reading-2']),
(re.compile(r'(Re-r|R)eferred to'), ['referral-committee']),
(re.compile(r'(Re-a|A)ssigned to'), ['referral-committee']),
(re.compile(r'Sent to the Governor'), ['executive-receipt']),
(re.compile(r'Governor Approved'), ['executive-signature']),
(re.compile(r'Governor Vetoed'), ['executive-veto']),
(re.compile(r'Governor Item'), ['executive-veto-line-item']),
(re.compile(r'Governor Amendatory Veto'), ['executive-veto']),
(re.compile(
r'^(?:Recommends )?Do Pass(?: as Amended)?(?: / Short Debate)?(?: / Standard Debate)?'),
['committee-passage']
),
(re.compile(r'Amendment.+Concur'), []),
(re.compile(r'Motion Do Pass(?: as Amended)?(?: - Lost)?'), ['committee-failure']),
(re.compile(r'Motion Do Pass(?: as Amended)?'), ['committee-passage']),
(re.compile(r'.*Be Adopted(?: as Amended)?'), ['committee-passage-favorable']),
(re.compile(r'Third Reading .+? Passed'), ['reading-3', 'passage']),
(re.compile(r'Third Reading .+? Lost'), ['reading-3', 'failure']),
(re.compile(r'Third Reading'), ['reading-3']),
(re.compile(r'Resolution Adopted'), ['passage']),
(re.compile(r'Resolution Lost'), ['failure']),
(re.compile(r'Session Sine Die',), ['failure']),
(re.compile(r'Tabled'), ['withdrawal']),
(re.compile(r'Motion To Adopt'), ['passage']),
)
OTHER_FREQUENT_ACTION_PATTERNS_WHICH_ARE_CURRENTLY_UNCLASSIFIED = [
r'Accept Amendatory Veto - (House|Senate) (Passed|Lost) \d+-\d+\d+.?',
r'Amendatory Veto Motion - (.+)',
r'Balanced Budget Note (.+)',
r'Effective Date(\s+.+ \d{4})?(;.+)?',
r'To .*Subcommittee',
r'Note Requested',
r'Note Filed',
r'^Public Act',
r'Appeal Ruling of Chair',
r'Added .*Sponsor',
r'Remove(d)? .*Sponsor',
r'Sponsor Removed',
r'Sponsor Changed',
r'^Chief .*Sponsor',
r'^Co-Sponsor',
r'Deadline Extended.+9\(b\)',
r'Amendment.+Approved for Consideration',
r'Approved for Consideration',
r'Amendment.+Do Adopt',
r'Amendment.+Concurs',
r'Amendment.+Lost',
r'Amendment.+Withdrawn',
r'Amendment.+Motion.+Concur',
r'Amendment.+Motion.+Table',
r'Amendment.+Rules Refers',
r'Amendment.+Motion to Concur Recommends be Adopted',
r'Amendment.+Assignments Refers',
r'Amendment.+Assignments Refers',
r'Amendment.+Held',
r'Motion.+Suspend Rule 25',
r'Motion.+Reconsider Vote',
r'Placed on Calendar',
r'Amendment.+Postponed - (?P<committee>.+)',
r'Postponed - (?P<committee>.+)',
r"Secretary's Desk",
r'Rule 2-10 Committee Deadline Established',
r'^Held in (?P<committee>.+)'
]
VOTE_VALUES = ['NV', 'Y', 'N', 'E', 'A', 'P', '-']
COMMITTEE_CORRECTIONS = {
'Elementary & Secondary Education: School Curriculum & Policies':
'Elem Sec Ed: School Curric Policies',
'Elementary & Secondary Education: Licensing, Administration & Oversight':
'Elem Sec Ed: Licensing, Admin.',
'Elementary & Secondary Education: Charter School Policy':
'Elem Sec Ed: Charter School Policy',
'Transportation: Regulation, Roads & Bridges': 'Transportation: Regulation, Roads',
'Business Incentives for Local Communities': 'Business Incentives for Local Comm.',
'Museums, Arts, & Cultural Enhancement': 'Museums, Arts, & Cultural Enhanceme',
'Health Care Availability & Accessibility': 'Health Care Availability & Access',
'Construction Industry & Code Enforcement': 'Construction Industry & Code Enforc',
'Appropriations-Elementary & Secondary Education': 'Approp-Elementary & Secondary Educ',
'Tourism, Hospitality & Craft Industries': 'Tourism, Hospitality & Craft Ind.',
'Government Consolidation & Modernization': 'Government Consolidation & Modern',
'Community College Access & Affordability': 'Community College Access & Afford.'}
DUPE_VOTES = {'http://ilga.gov/legislation/votehistory/100/house/committeevotes/'
'10000HB2457_16401.pdf'}
def group(lst, n):
# from http://code.activestate.com/recipes/303060-group-a-list-into-sequential-n-tuples/
for i in range(0, len(lst), n):
val = lst[i:i+n]
if len(val) == n:
yield tuple(val)
def _categorize_action(action):
related_orgs = []
for pattern, atype in _action_classifiers:
if pattern.findall(action):
if "referral-committee" in atype:
related_orgs = [pattern.sub("", action).strip()]
for each in atype:
if each.startswith("committee"):
org = pattern.sub("", action).split(';')[0].strip()
org = re.sub(' *Committee *$', '', org)
if org in COMMITTEE_CORRECTIONS:
org = COMMITTEE_CORRECTIONS[org]
related_orgs = [org]
return atype, related_orgs
return None, related_orgs
def chamber_slug(chamber):
if chamber == 'lower':
return 'H'
return 'S'
class IlBillScraper(Scraper):
LEGISLATION_URL = 'http://ilga.gov/legislation/grplist.asp'
localize = pytz.timezone('America/Chicago').localize
def get_bill_urls(self, chamber, session, doc_type):
params = session_details[session]['params']
params['num1'] = '1'
params['num2'] = '10000'
params['DocTypeID'] = doc_type
html = self.get(self.LEGISLATION_URL, params=params).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(self.LEGISLATION_URL)
for bill_url in doc.xpath('//li/a/@href'):
yield bill_url
def scrape(self, session=None):
if session is not None:
session_id = session
else:
session_id = self.latest_session()
for chamber in ('lower', 'upper'):
for doc_type in [chamber_slug(chamber) + doc_type for doc_type in DOC_TYPES]:
for bill_url in self.get_bill_urls(chamber, session_id, doc_type):
bill, votes = self.scrape_bill(chamber, session_id, doc_type, bill_url)
yield bill
yield from votes
# special non-chamber cases
for bill_url in self.get_bill_urls(chamber, session_id, 'AM'):
bill, votes = self.scrape_bill(chamber, session_id, 'AM', bill_url, 'appointment')
yield bill
yield from votes
# TODO: get joint session resolution added to python-opencivicdata
# for bill_url in self.get_bill_urls(chamber, session_id, 'JSR'):
# bill, votes = self.scrape_bill(chamber, session_id, 'JSR', bill_url,
# 'joint session resolution')
# yield bill
# yield from votes
def scrape_bill(self, chamber, session, doc_type, url, bill_type=None):
try:
html = self.get(url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
except scrapelib.HTTPError as e:
assert '500' in e.args[0], "Unexpected error when accessing page: {}".format(e)
self.warning("500 error for bill page; skipping bill")
return
# bill id, title, summary
bill_num = re.findall('DocNum=(\d+)', url)[0]
bill_type = bill_type or DOC_TYPES[doc_type[1:]]
bill_id = doc_type + bill_num
title = doc.xpath('//span[text()="Short Description:"]/following-sibling::span[1]/'
'text()')[0].strip()
summary = doc.xpath('//span[text()="Synopsis As Introduced"]/following-sibling::span[1]/'
'text()')[0].strip()
bill = Bill(identifier=bill_id,
legislative_session=session,
title=title,
classification=bill_type,
chamber=chamber)
bill.add_abstract(summary, note='')
bill.add_source(url)
# sponsors
sponsor_list = build_sponsor_list(doc.xpath('//a[@class="content"]'))
# don't add just yet; we can make them better using action data
committee_actors = {}
# actions
action_tds = doc.xpath('//a[@name="actions"]/following-sibling::table[1]/td')
for date, actor, action_elem in group(action_tds, 3):
date = datetime.datetime.strptime(date.text_content().strip(),
"%m/%d/%Y")
date = self.localize(date).date()
actor = actor.text_content()
if actor == 'House':
actor_id = {'classification': 'lower'}
elif actor == 'Senate':
actor_id = {'classification': 'upper'}
action = action_elem.text_content()
classification, related_orgs = _categorize_action(action)
if (related_orgs and any(c.startswith('committee') for c in classification)):
(name, source), = [(a.text, a.get('href')) for a in
action_elem.xpath('a')
if 'committee' in a.get('href')]
source = canonicalize_url(source)
actor_id = {'sources__url': source,
'classification': 'committee'}
committee_actors[source] = name
bill.add_action(action, date,
organization=actor_id,
classification=classification,
related_entities=related_orgs)
if action.lower().find('sponsor') != -1:
self.refine_sponsor_list(actor, action, sponsor_list, bill_id)
# now add sponsors
for spontype, sponsor, chamber, official_type in sponsor_list:
if official_type == 'primary':
primary = True
else:
primary = False
if chamber:
bill.add_sponsorship(sponsor, spontype, 'person',
primary=primary,
chamber=chamber)
else:
bill.add_sponsorship(spontype, sponsor, 'person',
primary=primary)
# versions
version_url = doc.xpath('//a[text()="Full Text"]/@href')[0]
self.scrape_documents(bill, version_url)
votes_url = doc.xpath('//a[text()="Votes"]/@href')[0]
votes = self.scrape_votes(session, bill, votes_url, committee_actors)
return bill, votes
def scrape_documents(self, bill, version_url):
html = self.get(version_url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(version_url)
for link in doc.xpath('//a[contains(@href, "fulltext")]'):
name = link.text
url = link.get('href')
if name in VERSION_TYPES:
bill.add_version_link(name, url + '&print=true',
media_type='text/html')
elif 'Amendment' in name or name in FULLTEXT_DOCUMENT_TYPES:
bill.add_document_link(name, url)
elif 'Printer-Friendly' in name:
pass
else:
self.warning('unknown document type %s - adding as document' % name)
bill.add_document_link(name, url)
def scrape_votes(self, session, bill, votes_url, committee_actors):
html = self.get(votes_url).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(votes_url)
for link in doc.xpath('//a[contains(@href, "votehistory")]'):
if link.get('href') in DUPE_VOTES:
continue
pieces = link.text.split(' - ')
date = pieces[-1]
vote_type = link.xpath('../ancestor::table[1]//td[1]/text()')[0]
if vote_type == 'Committee Hearing Votes':
name = re.sub(' *Committee *$', '', pieces[1])
chamber = link.xpath('../following-sibling::td/text()')[0].lower()
first_word = name.split()[0]
try:
source, = [url for url, committee
in committee_actors.items()
if committee.startswith(first_word) and
chamber in url]
actor = {'sources__url': source,
'classification': 'committee'}
except ValueError:
self.warning("Can't resolve voting body for %s" %
link.get('href'))
continue
# depends on bill type
motion = 'Do Pass'
if pieces[0].startswith(('SCA', 'HCA')):
amendment_num = int(re.split(r'SCA|HCA', pieces[0])[-1])
amendment = ', Amendment %s' % amendment_num
motion += amendment
else:
if len(pieces) == 3:
motion = pieces[1].strip()
else:
motion = 'Third Reading'
if pieces[0].startswith(('SFA', 'HFA')):
amendment_num = int(re.split(r'SFA|HFA', pieces[0])[-1])
amendment = ', Amendment %s' % amendment_num
motion += amendment
actor = link.xpath('../following-sibling::td/text()')[0]
if actor == 'HOUSE':
actor = {'classification': 'lower'}
elif actor == 'SENATE':
actor = {'classification': 'upper'}
else:
self.warning('unknown actor %s' % actor)
classification, _ = _categorize_action(motion)
for date_format in ["%b %d, %Y", "%A, %B %d, %Y"]:
try:
date = self.localize(datetime.datetime.strptime(date, date_format)).date()
break
except ValueError:
continue
else:
raise AssertionError(
"Date '{}' does not follow a format".format(date))
# manual fix for bad bill. TODO: better error catching here
vote = self.scrape_pdf_for_votes(session, actor, date, motion.strip(),
link.get('href'))
if vote:
vote.set_bill(bill)
yield vote
def fetch_pdf_lines(self, href):
# download the file
try:
fname, resp = self.urlretrieve(href)
pdflines = [line.decode('utf-8') for line in convert_pdf(fname, 'text').splitlines()]
os.remove(fname)
return pdflines
except scrapelib.HTTPError as e:
assert '404' in e.args[0], "File not found: {}".format(e)
self.warning("404 error for vote; skipping vote")
return False
def scrape_pdf_for_votes(self, session, actor, date, motion, href):
warned = False
# vote indicator, a few spaces, a name, newline or multiple spaces
# VOTE_RE = re.compile('(Y|N|E|NV|A|P|-)\s{2,5}(\w.+?)(?:\n|\s{2})')
COUNT_RE = re.compile(
r'^(\d+)\s+YEAS?\s+(\d+)\s+NAYS?\s+(\d+)\s+PRESENT(?:\s+(\d+)\s+NOT\sVOTING)?\s*$'
)
PASS_FAIL_WORDS = {
'PASSED': 'pass',
'PREVAILED': 'fail',
'ADOPTED': 'pass',
'CONCURRED': 'pass',
'FAILED': 'fail',
'LOST': 'fail',
}
pdflines = self.fetch_pdf_lines(href)
if not pdflines:
return False
yes_count = no_count = present_count = 0
yes_votes = []
no_votes = []
present_votes = []
excused_votes = []
not_voting = []
absent_votes = []
passed = None
counts_found = False
vote_lines = []
for line in pdflines:
# consider pass/fail as a document property instead of a result of the vote count
# extract the vote count from the document instead of just using counts of names
if not line.strip():
continue
elif line.strip() in PASS_FAIL_WORDS:
if passed is not None:
raise Exception("Duplicate pass/fail matches in [%s]" % href)
passed = PASS_FAIL_WORDS[line.strip()]
elif COUNT_RE.match(line):
(yes_count, no_count, present_count,
not_voting_count) = COUNT_RE.match(line).groups()
yes_count = int(yes_count)
no_count = int(no_count)
present_count = int(present_count)
counts_found = True
elif counts_found:
for value in VOTE_VALUES:
if re.search(r'^\s*({})\s+\w'.format(value), line):
vote_lines.append(line)
break
votes = find_columns_and_parse(vote_lines)
for name, vcode in votes.items():
if name == 'Mr. Speaker':
name = session_details[session]['speaker']
elif name == 'Mr. President':
name = session_details[session]['president']
else:
# Converts "Davis,William" to "Davis, William".
name = re.sub(r'\,([a-zA-Z])', r', \1', name)
if vcode == 'Y':
yes_votes.append(name)
elif vcode == 'N':
no_votes.append(name)
elif vcode == 'P':
present_votes.append(name)
elif vcode == 'E':
excused_votes.append(name)
elif vcode == 'NV':
not_voting.append(name)
elif vcode == 'A':
absent_votes.append(name)
# fake the counts
if yes_count == 0 and no_count == 0 and present_count == 0:
yes_count = len(yes_votes)
no_count = len(no_votes)
else: # audit
if yes_count != len(yes_votes):
self.warning("Mismatched yes count [expect: %i] [have: %i]" %
(yes_count, len(yes_votes)))
warned = True
if no_count != len(no_votes):
self.warning("Mismatched no count [expect: %i] [have: %i]" %
(no_count, len(no_votes)))
warned = True
if passed is None:
if actor['classification'] == 'lower': # senate doesn't have these lines
self.warning("No pass/fail word found; fall back to comparing yes and no vote.")
warned = True
passed = 'pass' if yes_count > no_count else 'fail'
classification, _ = _categorize_action(motion)
vote_event = VoteEvent(legislative_session=session,
motion_text=motion,
classification=classification,
organization=actor,
start_date=date,
result=passed)
for name in yes_votes:
vote_event.yes(name)
for name in no_votes:
vote_event.no(name)
for name in present_votes:
vote_event.vote('other', name)
for name in excused_votes:
vote_event.vote('excused', name)
for name in not_voting:
vote_event.vote('not voting', name)
for name in absent_votes:
vote_event.vote('absent', name)
vote_event.set_count('yes', yes_count)
vote_event.set_count('no', no_count)
vote_event.set_count('other', present_count)
vote_event.set_count('excused', len(excused_votes))
vote_event.set_count('absent', len(absent_votes))
vote_event.set_count('not voting', len(not_voting))
vote_event.add_source(href)
# for distinguishing between votes with the same id and on same day
vote_event.pupa_id = href
if warned:
self.warning("Warnings were issued. Best to check %s" % href)
return vote_event
def refine_sponsor_list(self, chamber, action, sponsor_list, bill_id):
if action.lower().find('removed') != -1:
return
if action.startswith('Chief'):
self.debug("[%s] Assuming we already caught 'chief' for %s" % (bill_id, action))
return
match = SPONSOR_REFINE_PATTERN.match(action)
if match:
if match.groupdict()['title'] == 'Rep':
chamber = 'lower'
else:
chamber = 'upper'
for i, tup in enumerate(sponsor_list):
spontype, sponsor, this_chamber, otype = tup
if this_chamber == chamber and sponsor == match.groupdict()['name']:
try:
sponsor_list[i] = (
SPONSOR_TYPE_REFINEMENTS[match.groupdict()['spontype']],
sponsor,
this_chamber, match.groupdict()['spontype'].replace("as ", "")
)
except KeyError:
self.warning('[%s] Unknown sponsor refinement type [%s]' %
(bill_id, match.groupdict()['spontype']))
return
self.warning("[%s] Couldn't find sponsor [%s,%s] to refine" %
(bill_id, chamber, match.groupdict()['name']))
else:
self.debug("[%s] Don't know how to refine [%s]" % (bill_id, action))
def find_columns_and_parse(vote_lines):
columns = find_columns(vote_lines)
votes = {}
for line in vote_lines:
for idx in reversed(columns):
bit = line[idx:]
line = line[:idx]
if bit:
vote, name = bit.split(' ', 1)
votes[name.strip()] = vote
return votes
def _is_potential_column(line, i):
for val in VOTE_VALUES:
if re.search(r'^%s\s{2,10}(\w.).*' % val, line[i:]):
return True
return False
def find_columns(vote_lines):
potential_columns = []
for line in vote_lines:
pcols = set()
for i, x in enumerate(line):
if _is_potential_column(line, i):
pcols.add(i)
potential_columns.append(pcols)
starter = potential_columns[0]
for pc in potential_columns[1:-1]:
starter.intersection_update(pc)
last_row_cols = potential_columns[-1]
if not last_row_cols.issubset(starter):
raise Exception("Row's columns [%s] don't align with candidate final columns [%s]: %s" %
(last_row_cols, starter, line))
# we should now only have values that appeared in every line
return sorted(starter)
def build_sponsor_list(sponsor_atags):
"""return a list of (spontype,sponsor,chamber,official_spontype) tuples"""
sponsors = []
house_chief = senate_chief = None
spontype = 'cosponsor'
for atag in sponsor_atags:
sponsor = atag.text
if 'house' in atag.attrib['href'].split('/'):
chamber = 'lower'
elif 'senate' in atag.attrib['href'].split('/'):
chamber = 'upper'
else:
chamber = None
if chamber == 'lower' and house_chief is None:
spontype = 'primary'
official_spontype = 'chief'
house_chief = sponsor
elif chamber == 'upper' and senate_chief is None:
spontype = 'primary'
official_spontype = 'chief'
senate_chief = sponsor
else:
spontype = 'cosponsor'
official_spontype = 'cosponsor' # until replaced
sponsors.append((spontype, sponsor, chamber, official_spontype))
return sponsors
| gpl-3.0 | -341,049,044,906,176,830 | 37.687943 | 97 | 0.531329 | false |
ptkool/spark | python/pyspark/sql/udf.py | 1 | 21057 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
User-defined function related classes and functions
"""
import functools
import sys
from pyspark import SparkContext, since
from pyspark.rdd import _prepare_for_python_RDD, PythonEvalType, ignore_unicode_prefix
from pyspark.sql.column import Column, _to_java_column, _to_seq
from pyspark.sql.types import StringType, DataType, StructType, _parse_datatype_string
from pyspark.sql.pandas.types import to_arrow_type
from pyspark.util import _get_argspec
__all__ = ["UDFRegistration"]
def _wrap_function(sc, func, returnType):
command = (func, returnType)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
def _create_udf(f, returnType, evalType):
if evalType in (PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF):
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
argspec = _get_argspec(f)
if (evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF) and \
len(argspec.args) == 0 and \
argspec.varargs is None:
raise ValueError(
"Invalid function: 0-arg pandas_udfs are not supported. "
"Instead, create a 1-arg pandas_udf and ignore the arg in your function."
)
if evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (1, 2):
raise ValueError(
"Invalid function: pandas_udfs with function type GROUPED_MAP "
"must take either one argument (data) or two arguments (key, data).")
if evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF \
and len(argspec.args) not in (2, 3):
raise ValueError(
"Invalid function: pandas_udfs with function type COGROUPED_MAP "
"must take either two arguments (left, right) "
"or three arguments (key, left, right).")
# Set the name of the UserDefinedFunction object to be the name of function f
udf_obj = UserDefinedFunction(
f, returnType=returnType, name=None, evalType=evalType, deterministic=True)
return udf_obj._wrapped()
class UserDefinedFunction(object):
"""
User defined function in Python
.. versionadded:: 1.3
.. note:: The constructor of this class is not supposed to be directly called.
Use :meth:`pyspark.sql.functions.udf` or :meth:`pyspark.sql.functions.pandas_udf`
to create this instance.
"""
def __init__(self, func,
returnType=StringType(),
name=None,
evalType=PythonEvalType.SQL_BATCHED_UDF,
deterministic=True):
if not callable(func):
raise TypeError(
"Invalid function: not a function or callable (__call__ is not defined): "
"{0}".format(type(func)))
if not isinstance(returnType, (DataType, str)):
raise TypeError(
"Invalid returnType: returnType should be DataType or str "
"but is {}".format(returnType))
if not isinstance(evalType, int):
raise TypeError(
"Invalid evalType: evalType should be an int but is {}".format(evalType))
self.func = func
self._returnType = returnType
# Stores UserDefinedPythonFunctions jobj, once initialized
self._returnType_placeholder = None
self._judf_placeholder = None
self._name = name or (
func.__name__ if hasattr(func, '__name__')
else func.__class__.__name__)
self.evalType = evalType
self.deterministic = deterministic
@property
def returnType(self):
# This makes sure this is called after SparkContext is initialized.
# ``_parse_datatype_string`` accesses to JVM for parsing a DDL formatted string.
if self._returnType_placeholder is None:
if isinstance(self._returnType, DataType):
self._returnType_placeholder = self._returnType
else:
self._returnType_placeholder = _parse_datatype_string(self._returnType)
if self.evalType == PythonEvalType.SQL_SCALAR_PANDAS_UDF or \
self.evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid returnType with scalar Pandas UDFs: %s is "
"not supported" % str(self._returnType_placeholder))
elif self.evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid returnType with grouped map Pandas UDFs: "
"%s is not supported" % str(self._returnType_placeholder))
else:
raise TypeError("Invalid returnType for grouped map Pandas "
"UDFs: returnType must be a StructType.")
elif self.evalType == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid returnType with map iterator Pandas UDFs: "
"%s is not supported" % str(self._returnType_placeholder))
else:
raise TypeError("Invalid returnType for map iterator Pandas "
"UDFs: returnType must be a StructType.")
elif self.evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
if isinstance(self._returnType_placeholder, StructType):
try:
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid returnType with cogrouped map Pandas UDFs: "
"%s is not supported" % str(self._returnType_placeholder))
else:
raise TypeError("Invalid returnType for cogrouped map Pandas "
"UDFs: returnType must be a StructType.")
elif self.evalType == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
try:
# StructType is not yet allowed as a return type, explicitly check here to fail fast
if isinstance(self._returnType_placeholder, StructType):
raise TypeError
to_arrow_type(self._returnType_placeholder)
except TypeError:
raise NotImplementedError(
"Invalid returnType with grouped aggregate Pandas UDFs: "
"%s is not supported" % str(self._returnType_placeholder))
return self._returnType_placeholder
@property
def _judf(self):
# It is possible that concurrent access, to newly created UDF,
# will initialize multiple UserDefinedPythonFunctions.
# This is unlikely, doesn't affect correctness,
# and should have a minimal performance impact.
if self._judf_placeholder is None:
self._judf_placeholder = self._create_judf()
return self._judf_placeholder
def _create_judf(self):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
wrapped_func = _wrap_function(sc, self.func, self.returnType)
jdt = spark._jsparkSession.parseDataType(self.returnType.json())
judf = sc._jvm.org.apache.spark.sql.execution.python.UserDefinedPythonFunction(
self._name, wrapped_func, jdt, self.evalType, self.deterministic)
return judf
def __call__(self, *cols):
judf = self._judf
sc = SparkContext._active_spark_context
return Column(judf.apply(_to_seq(sc, cols, _to_java_column)))
# This function is for improving the online help system in the interactive interpreter.
# For example, the built-in help / pydoc.help. It wraps the UDF with the docstring and
# argument annotation. (See: SPARK-19161)
def _wrapped(self):
"""
Wrap this udf with a function and attach docstring from func
"""
# It is possible for a callable instance without __name__ attribute or/and
# __module__ attribute to be wrapped here. For example, functools.partial. In this case,
# we should avoid wrapping the attributes from the wrapped function to the wrapper
# function. So, we take out these attribute names from the default names to set and
# then manually assign it after being wrapped.
assignments = tuple(
a for a in functools.WRAPPER_ASSIGNMENTS if a != '__name__' and a != '__module__')
@functools.wraps(self.func, assigned=assignments)
def wrapper(*args):
return self(*args)
wrapper.__name__ = self._name
wrapper.__module__ = (self.func.__module__ if hasattr(self.func, '__module__')
else self.func.__class__.__module__)
wrapper.func = self.func
wrapper.returnType = self.returnType
wrapper.evalType = self.evalType
wrapper.deterministic = self.deterministic
wrapper.asNondeterministic = functools.wraps(
self.asNondeterministic)(lambda: self.asNondeterministic()._wrapped())
return wrapper
def asNondeterministic(self):
"""
Updates UserDefinedFunction to nondeterministic.
.. versionadded:: 2.3
"""
# Here, we explicitly clean the cache to create a JVM UDF instance
# with 'deterministic' updated. See SPARK-23233.
self._judf_placeholder = None
self.deterministic = False
return self
class UDFRegistration(object):
"""
Wrapper for user-defined function registration. This instance can be accessed by
:attr:`spark.udf` or :attr:`sqlContext.udf`.
.. versionadded:: 1.3.1
"""
def __init__(self, sparkSession):
self.sparkSession = sparkSession
@ignore_unicode_prefix
@since("1.3.1")
def register(self, name, f, returnType=None):
"""Register a Python function (including lambda function) or a user-defined function
as a SQL function.
:param name: name of the user-defined function in SQL statements.
:param f: a Python function, or a user-defined function. The user-defined function can
be either row-at-a-time or vectorized. See :meth:`pyspark.sql.functions.udf` and
:meth:`pyspark.sql.functions.pandas_udf`.
:param returnType: the return type of the registered user-defined function. The value can
be either a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:return: a user-defined function.
To register a nondeterministic Python function, users need to first build
a nondeterministic user-defined function for the Python function and then register it
as a SQL function.
`returnType` can be optionally specified when `f` is a Python function but not
when `f` is a user-defined function. Please see below.
1. When `f` is a Python function:
`returnType` defaults to string type and can be optionally specified. The produced
object must match the specified type. In this case, this API works as if
`register(name, f, returnType=StringType())`.
>>> strlen = spark.udf.register("stringLengthString", lambda x: len(x))
>>> spark.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> spark.sql("SELECT 'foo' AS text").select(strlen("text")).collect()
[Row(stringLengthString(text)=u'3')]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> _ = spark.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> spark.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
2. When `f` is a user-defined function:
Spark uses the return type of the given user-defined function as the return type of
the registered user-defined function. `returnType` should not be specified.
In this case, this API works as if `register(name, f)`.
>>> from pyspark.sql.types import IntegerType
>>> from pyspark.sql.functions import udf
>>> slen = udf(lambda s: len(s), IntegerType())
>>> _ = spark.udf.register("slen", slen)
>>> spark.sql("SELECT slen('test')").collect()
[Row(slen(test)=4)]
>>> import random
>>> from pyspark.sql.functions import udf
>>> from pyspark.sql.types import IntegerType
>>> random_udf = udf(lambda: random.randint(0, 100), IntegerType()).asNondeterministic()
>>> new_random_udf = spark.udf.register("random_udf", random_udf)
>>> spark.sql("SELECT random_udf()").collect() # doctest: +SKIP
[Row(random_udf()=82)]
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> _ = spark.udf.register("add_one", add_one) # doctest: +SKIP
>>> spark.sql("SELECT add_one(id) FROM range(3)").collect() # doctest: +SKIP
[Row(add_one(id)=1), Row(add_one(id)=2), Row(add_one(id)=3)]
>>> @pandas_udf("integer", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def sum_udf(v):
... return v.sum()
...
>>> _ = spark.udf.register("sum_udf", sum_udf) # doctest: +SKIP
>>> q = "SELECT sum_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
>>> spark.sql(q).collect() # doctest: +SKIP
[Row(sum_udf(v1)=1), Row(sum_udf(v1)=5)]
.. note:: Registration for a user-defined function (case 2.) was added from
Spark 2.3.0.
"""
# This is to check whether the input function is from a user-defined function or
# Python function.
if hasattr(f, 'asNondeterministic'):
if returnType is not None:
raise TypeError(
"Invalid returnType: data type can not be specified when f is"
"a user-defined function, but got %s." % returnType)
if f.evalType not in [PythonEvalType.SQL_BATCHED_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF]:
raise ValueError(
"Invalid f: f must be SQL_BATCHED_UDF, SQL_SCALAR_PANDAS_UDF, "
"SQL_SCALAR_PANDAS_ITER_UDF, SQL_GROUPED_AGG_PANDAS_UDF or "
"SQL_MAP_PANDAS_ITER_UDF.")
register_udf = UserDefinedFunction(f.func, returnType=f.returnType, name=name,
evalType=f.evalType,
deterministic=f.deterministic)
return_udf = f
else:
if returnType is None:
returnType = StringType()
register_udf = UserDefinedFunction(f, returnType=returnType, name=name,
evalType=PythonEvalType.SQL_BATCHED_UDF)
return_udf = register_udf._wrapped()
self.sparkSession._jsparkSession.udf().registerPython(name, register_udf._judf)
return return_udf
@ignore_unicode_prefix
@since(2.3)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a Java user-defined function as a SQL function.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the user-defined function
:param javaClassName: fully qualified name of java class
:param returnType: the return type of the registered Java function. The value can be either
a :class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> spark.udf.registerJavaFunction(
... "javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> spark.sql("SELECT javaStringLength('test')").collect()
[Row(javaStringLength(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength2", "test.org.apache.spark.sql.JavaStringLength")
>>> spark.sql("SELECT javaStringLength2('test')").collect()
[Row(javaStringLength2(test)=4)]
>>> spark.udf.registerJavaFunction(
... "javaStringLength3", "test.org.apache.spark.sql.JavaStringLength", "integer")
>>> spark.sql("SELECT javaStringLength3('test')").collect()
[Row(javaStringLength3(test)=4)]
"""
jdt = None
if returnType is not None:
if not isinstance(returnType, DataType):
returnType = _parse_datatype_string(returnType)
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
@ignore_unicode_prefix
@since(2.3)
def registerJavaUDAF(self, name, javaClassName):
"""Register a Java user-defined aggregate function as a SQL function.
:param name: name of the user-defined aggregate function
:param javaClassName: fully qualified name of java class
>>> spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg")
>>> df = spark.createDataFrame([(1, "a"),(2, "b"), (3, "a")],["id", "name"])
>>> df.createOrReplaceTempView("df")
>>> spark.sql("SELECT name, javaUDAF(id) as avg from df group by name order by name desc") \
.collect()
[Row(name=u'b', avg=102.0), Row(name=u'a', avg=102.0)]
"""
self.sparkSession._jsparkSession.udf().registerJavaUDAF(name, javaClassName)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.udf
globs = pyspark.sql.udf.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.udf tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.udf, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | 2,952,520,590,962,127,400 | 44.776087 | 100 | 0.609916 | false |
yamahigashi/SynopticGenerator | synopticgenerator/image/set_transparent_by_color.py | 1 | 1975 | """ coding: utf-8 """
import os
import tempfile
import shutil
from PIL import Image
import synopticgenerator.util as util
class Transparenter(object):
def __init__(self, config, environ):
self.config = config
self.environ = environ
self.color_table = environ.setdefault("color_table", None)
def execute(self, content):
input_file = self.config["input"]
color = util.color(self.config["color"], self.color_table)
res = transparent(input_file, color)
shutil.copyfile(res, self.config["output"])
shutil.rmtree(os.path.dirname(res))
return content
def transparent(f, color=util.color("255, 255, 255")):
""" 指定パス画像の指定カラー部分透明にする """
if isinstance(f, file):
file_path = f.name
elif isinstance(f, str):
file_path = f
img = Image.open(file_path)
img = img.convert("RGBA")
r = color.r
g = color.g
b = color.b
newData = []
for item in img.getdata():
if item[0] == r and item[1] == g and item[2] == b:
newData.append((r, g, b, 0))
else:
newData.append(item)
img.putdata(newData)
tmpdir = tempfile.mkdtemp()
f_name = os.path.basename(file_path)
output_path = os.path.join(tmpdir, os.path.splitext(f_name)[0]) + ".png"
img.save(output_path, "PNG")
return output_path
def create(config, environ):
return Transparenter(config, environ)
if __name__ == "__main__":
import argparse
recognizer = argparse.ArgumentParser()
recognizer.add_argument(
"-i", "--input", dest="input_file",
help="input file", metavar="FILE", type=file)
recognizer.add_argument(
"-c", "--color",
dest="color", default="255, 255, 255", type=util.color,
help="don't print status messages to stdout")
args = recognizer.parse_args()
path = transparent(args.input_file, args.color)
print path
| mit | 6,518,124,224,156,784,000 | 24.8 | 76 | 0.607235 | false |
n3011/deeprl | core/solver.py | 1 | 8788 | # -------------------------------------------------------------------#
# Released under the MIT license (https://opensource.org/licenses/MIT)
# Contact: [email protected]
# Copyright 2017, Mrinal Haloi
# -------------------------------------------------------------------#
import tensorflow as tf
import numpy as np
from tqdm import tqdm
import time
from models.custom_model import Model
from core.base import Base
from utils import utils
from core import logger
class Solver(Base):
def __init__(self, cfg, environment, sess, model_dir, double_q=True, **kwargs):
self.inputs = tf.placeholder('float32', [
None, cfg.screen_height, cfg.screen_width, cfg.history_length], name='inputs')
self.target_inputs = tf.placeholder('float32', [
None, cfg.screen_height, cfg.screen_width, cfg.history_length], name='target_inputs')
self.target_q_t = tf.placeholder('float32', [None], name='target_q_t')
self.action = tf.placeholder('int64', [None], name='action')
self.double_q = double_q
super(Solver, self).__init__(
cfg, environment, sess, model_dir, **kwargs)
def train(self):
start_time = time.time()
num_game, self.update_count, ep_reward = 0, 0, 0.
total_reward, self.total_loss, self.total_q = 0., 0., 0.
max_avg_ep_reward = 0
ep_rewards, actions = [], []
screen, reward, action, terminal = self.env.new_random_game()
self.optim, self.loss, self.end_points_q, self.end_points_target_q = self.tower_loss(
self.inputs, self.target_inputs)
self.targetops = self.update_target_graph(
tf.trainable_variables(), self.cfg.tau)
self.saver = tf.train.Saver(max_to_keep=None)
init = tf.global_variables_initializer()
self.sess.run(init)
start_step = self.step_op.eval()
for _ in range(self.cfg.history_length):
self.history.add(screen)
for self.step in tqdm(range(start_step, self.cfg.max_step), ncols=70, initial=start_step):
if self.step == self.cfg.learn_start:
num_game, self.update_count, ep_reward = 0, 0, 0.
total_reward, self.total_loss, self.total_q = 0., 0., 0.
ep_rewards, actions = [], []
self.updated_lr = self.lr_policy.initial_lr
ep = (self.cfg.ep_end + max(0., (self.cfg.ep_start - self.cfg.ep_end) *
(self.cfg.ep_end_t - max(0., self.step - self.cfg.learn_start)) / self.cfg.ep_end_t))
# 1. predict
action = self.predict(
self.end_points_q['pred_action'], self.history.get(), ep=ep)
# 2. act
screen, reward, terminal = self.env.act(action, is_training=True)
# 3. observe
self.observe(screen, reward, action, terminal)
if terminal:
screen, reward, action, terminal = self.env.new_random_game()
num_game += 1
ep_rewards.append(ep_reward)
ep_reward = 0.
else:
ep_reward += reward
logger.debug('Step: %d, Reward: %f' % (self.step, reward))
logger.debug('Step: %d, Expected Reward: %f' %
(self.step, ep_reward))
actions.append(action)
total_reward += reward
logger.debug('Step: %d, Total Reward: %f' %
(self.step, total_reward))
if self.step >= self.cfg.learn_start:
if self.step % self.cfg.test_step == self.cfg.test_step - 1:
avg_reward = total_reward / self.cfg.test_step
avg_loss = self.total_loss / self.update_count
avg_q = self.total_q / self.update_count
try:
max_ep_reward = np.max(ep_rewards)
min_ep_reward = np.min(ep_rewards)
avg_ep_reward = np.mean(ep_rewards)
except:
max_ep_reward, min_ep_reward, avg_ep_reward = 0, 0, 0
print '\navg_r: %.4f, avg_l: %.6f, avg_q: %3.6f, avg_ep_r: %.4f, max_ep_r: %.4f, min_ep_r: %.4f, # game: %d' \
% (avg_reward, avg_loss, avg_q, avg_ep_reward, max_ep_reward, min_ep_reward, num_game)
logger.debug('Step: %d, Avg_R: %.4f, Avg_L: %.6f, Avg_Q: %3.6f, Avg_EP_R: %.4f, Max_EP_R: %.4f, Min_EP_R: %.4f, # Game: %d' % (
self.step, avg_reward, avg_loss, avg_q, avg_ep_reward, max_ep_reward, min_ep_reward, num_game))
if max_avg_ep_reward * 0.9 <= avg_ep_reward:
self.step_assign_op.eval(
{self.step_input: self.step + 1})
utils.save_model(self.saver, self.sess,
self.model_dir, self.step + 1)
max_avg_ep_reward = max(
max_avg_ep_reward, avg_ep_reward)
num_game = 0
total_reward = 0.
self.total_loss = 0.
self.total_q = 0.
self.update_count = 0
ep_reward = 0.
ep_rewards = []
actions = []
end_time = time.time()
print('Total training time %6.1fs' % start_time - end_time)
def observe(self, screen, reward, action, terminal):
reward = max(self.cfg.min_reward, min(self.cfg.max_reward, reward))
self.history.add(screen)
self.memory.add(screen, reward, action, terminal)
if self.step > self.cfg.learn_start:
if self.step % self.cfg.train_frequency == 0:
self.train_mini_batch()
if self.step % self.cfg.target_q_update_step == self.cfg.target_q_update_step - 1:
self.update_target(self.targetops, self.sess)
def train_mini_batch(self):
if self.memory.count < self.cfg.history_length:
return
else:
s_t, action, reward, s_t_plus_1, terminal = self.memory.sample()
if self.double_q:
# Double Q-learning
pred_action = self.end_points_q[
'pred_action'].eval({self.inputs: s_t_plus_1})
q_t_plus_1_with_pred_action = self.end_points_target_q['target_q_with_idx'].eval({
self.target_inputs: s_t_plus_1,
self.end_points_target_q['target_q_idx']: [[idx, pred_a] for idx, pred_a in enumerate(pred_action)]})
target_q_t = (1. - terminal) * self.cfg.discount * \
q_t_plus_1_with_pred_action + reward
else:
q_t_plus_1 = self.end_points_target_q[
'q'].eval({self.target_inputs: s_t_plus_1})
terminal = np.array(terminal) + 0.
max_q_t_plus_1 = np.max(q_t_plus_1, axis=1)
target_q_t = (1. - terminal) * self.cfg.discount * \
max_q_t_plus_1 + reward
_, q_t, loss = self.sess.run([self.optim, self.end_points_q['q'], self.loss], {
self.target_q_t: target_q_t,
self.action: action,
self.inputs: s_t,
self.learning_rate: self.updated_lr})
# self.writer.add_summary(summary_str, self.step)
self.total_loss += loss
self.total_q += q_t.mean()
self.update_count += 1
def tower_loss(self, inputs, target_inputs):
model_q = Model()
model_target_q = Model(is_target_q=True)
end_points_q = model_q.model_def(inputs, self.env, name='main_q')
end_points_target_q = model_target_q.model_def(
target_inputs, self.env, name='target_q')
action_one_hot = tf.one_hot(
self.action, self.env.action_size, 1.0, 0.0, name='action_one_hot')
q_acted = tf.reduce_sum(
end_points_q['q'] * action_one_hot, reduction_indices=1, name='q_acted')
delta = self.target_q_t - q_acted
clipped_delta = tf.clip_by_value(
delta, self.cfg.min_delta, self.cfg.max_delta, name='clipped_delta')
loss = tf.reduce_mean(tf.square(clipped_delta), name='loss')
opt = self.optimizer(self.learning_rate, optname='rmsprop', decay=self.cfg.TRAIN.rmsprop_decay,
momentum=self.cfg.TRAIN.rmsprop_momentum, epsilon=self.cfg.TRAIN.rmsprop_epsilon, beta1=self.cfg.TRAIN.rmsprop_beta1, beta2=self.cfg.TRAIN.rmsprop_beta2)
self.grads_and_vars = opt.compute_gradients(loss)
optim = opt.apply_gradients(self.grads_and_vars)
return optim, loss, end_points_q, end_points_target_q
| mit | 6,081,689,562,235,361,000 | 44.533679 | 182 | 0.529358 | false |
klahnakoski/MoDataSubmission | pyLibrary/thread/threads.py | 1 | 26992 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
# THIS THREADING MODULE IS PERMEATED BY THE please_stop SIGNAL.
# THIS SIGNAL IS IMPORTANT FOR PROPER SIGNALLING WHICH ALLOWS
# FOR FAST AND PREDICTABLE SHUTDOWN AND CLEANUP OF THREADS
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from collections import deque
from copy import copy
from datetime import datetime, timedelta
import thread
import threading
import time
import sys
from pyLibrary import strings
from pyLibrary.dot import coalesce, Dict
from pyLibrary.times.dates import Date
from pyLibrary.times.durations import SECOND, MINUTE, Duration
_Log = None
_Except = None
DEBUG = True
MAX_DATETIME = datetime(2286, 11, 20, 17, 46, 39)
DEFAULT_WAIT_TIME = timedelta(minutes=5)
def _late_import():
global _Log
global _Except
from pyLibrary.debugs.logs import Log as _Log
from pyLibrary.debugs.exceptions import Except as _Except
_ = _Log
_ = _Except
class Lock(object):
"""
SIMPLE LOCK (ACTUALLY, A PYTHON threadind.Condition() WITH notify() BEFORE EVERY RELEASE)
"""
def __init__(self, name=""):
self.monitor = threading.Condition()
# if not name:
# if "extract_stack" not in globals():
# from pyLibrary.debugs.logs import extract_stack
#
# self.name = extract_stack(1)[0].method
def __enter__(self):
# with pyLibrary.times.timer.Timer("get lock"):
self.monitor.acquire()
return self
def __exit__(self, a, b, c):
self.monitor.notify()
self.monitor.release()
def wait(self, timeout=None, till=None):
if till:
timeout = (till - Date.now()).seconds
if timeout < 0:
return
if isinstance(timeout, Duration):
timeout = timeout.seconds
try:
self.monitor.wait(timeout=float(timeout) if timeout!=None else None)
except Exception, e:
_Log.error("logic error using timeout {{timeout}}", timeout=timeout, cause=e)
def notify_all(self):
self.monitor.notify_all()
class Queue(object):
"""
SIMPLE MESSAGE QUEUE, multiprocessing.Queue REQUIRES SERIALIZATION, WHICH
IS DIFFICULT TO USE JUST BETWEEN THREADS (SERIALIZATION REQUIRED)
"""
def __init__(self, name, max=None, silent=False, unique=False):
"""
max - LIMIT THE NUMBER IN THE QUEUE, IF TOO MANY add() AND extend() WILL BLOCK
silent - COMPLAIN IF THE READERS ARE TOO SLOW
unique - SET True IF YOU WANT ONLY ONE INSTANCE IN THE QUEUE AT A TIME
"""
self.name = name
self.max = coalesce(max, 2 ** 10)
self.silent = silent
self.unique = unique
self.keep_running = True
self.lock = Lock("lock for queue " + name)
self.queue = deque()
self.next_warning = Date.now() # FOR DEBUGGING
def __iter__(self):
while self.keep_running:
try:
value = self.pop()
if value is not Thread.STOP:
yield value
except Exception, e:
_Log.warning("Tell me about what happened here", e)
_Log.note("queue iterator is done")
def add(self, value, timeout=None):
with self.lock:
self._wait_for_queue_space(timeout=None)
if self.keep_running:
if self.unique:
if value not in self.queue:
self.queue.append(value)
else:
self.queue.append(value)
return self
def push(self, value):
"""
SNEAK value TO FRONT OF THE QUEUE
"""
with self.lock:
self._wait_for_queue_space()
if self.keep_running:
self.queue.appendleft(value)
return self
def extend(self, values):
with self.lock:
# ONCE THE queue IS BELOW LIMIT, ALLOW ADDING MORE
self._wait_for_queue_space()
if self.keep_running:
if self.unique:
for v in values:
if v not in self.queue:
self.queue.append(v)
else:
self.queue.extend(values)
return self
def _wait_for_queue_space(self, timeout=DEFAULT_WAIT_TIME):
"""
EXPECT THE self.lock TO BE HAD, WAITS FOR self.queue TO HAVE A LITTLE SPACE
"""
wait_time = 5 * SECOND
now = Date.now()
if timeout:
time_to_stop_waiting = now + timeout
else:
time_to_stop_waiting = datetime(2286, 11, 20, 17, 46, 39)
if self.next_warning < now:
self.next_warning = now + wait_time
while self.keep_running and len(self.queue) > self.max:
if now > time_to_stop_waiting:
if not _Log:
_late_import()
_Log.error(Thread.TIMEOUT)
if self.silent:
self.lock.wait()
else:
self.lock.wait(wait_time)
if len(self.queue) > self.max:
now = Date.now()
if self.next_warning < now:
self.next_warning = now + wait_time
_Log.alert(
"Queue by name of {{name|quote}} is full with ({{num}} items), thread(s) have been waiting {{wait_time}} sec",
name=self.name,
num=len(self.queue),
wait_time=wait_time
)
def __len__(self):
with self.lock:
return len(self.queue)
def __nonzero__(self):
with self.lock:
return any(r != Thread.STOP for r in self.queue)
def pop(self, till=None, timeout=None):
"""
WAIT FOR NEXT ITEM ON THE QUEUE
RETURN Thread.STOP IF QUEUE IS CLOSED
IF till IS PROVIDED, THEN pop() CAN TIMEOUT AND RETURN None
"""
if timeout:
till = Date.now() + timeout
with self.lock:
if till == None:
while self.keep_running:
if self.queue:
value = self.queue.popleft()
if value is Thread.STOP: # SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION
self.keep_running = False
return value
try:
self.lock.wait()
except Exception, e:
pass
else:
while self.keep_running:
if self.queue:
value = self.queue.popleft()
if value is Thread.STOP: # SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION
self.keep_running = False
return value
elif Date.now() > till:
break
try:
self.lock.wait(till=till)
except Exception, e:
pass
if self.keep_running:
return None
if DEBUG or not self.silent:
_Log.note(self.name + " queue stopped")
return Thread.STOP
def pop_all(self):
"""
NON-BLOCKING POP ALL IN QUEUE, IF ANY
"""
with self.lock:
if not self.keep_running:
return [Thread.STOP]
if not self.queue:
return []
for v in self.queue:
if v is Thread.STOP: # SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION
self.keep_running = False
output = list(self.queue)
self.queue.clear()
return output
def pop_one(self):
"""
NON-BLOCKING POP IN QUEUE, IF ANY
"""
with self.lock:
if not self.keep_running:
return [Thread.STOP]
elif not self.queue:
return None
else:
v =self.queue.pop()
if v is Thread.STOP: # SENDING A STOP INTO THE QUEUE IS ALSO AN OPTION
self.keep_running = False
return v
def close(self):
with self.lock:
self.keep_running = False
def commit(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class AllThread(object):
"""
RUN ALL ADDED FUNCTIONS IN PARALLEL, BE SURE TO HAVE JOINED BEFORE EXIT
"""
def __init__(self):
if not _Log:
_late_import()
self.threads = []
def __enter__(self):
return self
# WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
def __exit__(self, type, value, traceback):
self.join()
def join(self):
exceptions = []
try:
for t in self.threads:
response = t.join()
if "exception" in response:
exceptions.append(response["exception"])
except Exception, e:
_Log.warning("Problem joining", e)
if exceptions:
_Log.error("Problem in child threads", exceptions)
def add(self, target, *args, **kwargs):
"""
target IS THE FUNCTION TO EXECUTE IN THE THREAD
"""
t = Thread.run(target.__name__, target, *args, **kwargs)
self.threads.append(t)
class MainThread(object):
def __init__(self):
self.name = "Main Thread"
self.id = thread.get_ident()
self.children = []
def add_child(self, child):
self.children.append(child)
def remove_child(self, child):
try:
self.children.remove(child)
except Exception, _:
pass
def stop(self):
"""
BLOCKS UNTIL ALL THREADS HAVE STOPPED
"""
children = copy(self.children)
for c in reversed(children):
if c.name:
_Log.note("Stopping thread {{name|quote}}", name=c.name)
c.stop()
for c in children:
c.join()
MAIN_THREAD = MainThread()
ALL_LOCK = Lock("threads ALL_LOCK")
ALL = dict()
ALL[thread.get_ident()] = MAIN_THREAD
class Thread(object):
"""
join() ENHANCED TO ALLOW CAPTURE OF CTRL-C, AND RETURN POSSIBLE THREAD EXCEPTIONS
run() ENHANCED TO CAPTURE EXCEPTIONS
"""
num_threads = 0
STOP = "stop"
TIMEOUT = "TIMEOUT"
def __init__(self, name, target, *args, **kwargs):
if not _Log:
_late_import()
self.id = -1
self.name = name
self.target = target
self.end_of_thread = None
self.synch_lock = Lock("response synch lock")
self.args = args
# ENSURE THERE IS A SHARED please_stop SIGNAL
self.kwargs = copy(kwargs)
self.kwargs["please_stop"] = self.kwargs.get("please_stop", Signal())
self.please_stop = self.kwargs["please_stop"]
self.thread = None
self.stopped = Signal()
self.cprofiler = None
self.children = []
if "parent_thread" in kwargs:
del self.kwargs["parent_thread"]
self.parent = kwargs["parent_thread"]
else:
self.parent = Thread.current()
self.parent.add_child(self)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if isinstance(type, BaseException):
self.please_stop.go()
# TODO: AFTER A WHILE START KILLING THREAD
self.join()
self.args = None
self.kwargs = None
def start(self):
if not _Log:
_late_import()
try:
self.thread = thread.start_new_thread(Thread._run, (self, ))
return self
except Exception, e:
_Log.error("Can not start thread", e)
def stop(self):
for c in copy(self.children):
c.stop()
self.please_stop.go()
def add_child(self, child):
self.children.append(child)
def remove_child(self, child):
try:
self.children.remove(child)
except Exception, e:
_Log.error("not expected", e)
def _run(self):
if _Log.cprofiler:
import cProfile
_Log.note("starting cprofile for thread {{thread}}", thread=self.name)
self.cprofiler = cProfile.Profile()
self.cprofiler.enable()
self.id = thread.get_ident()
with ALL_LOCK:
ALL[self.id] = self
try:
if self.target is not None:
a, k, self.args, self.kwargs = self.args, self.kwargs, None, None
response = self.target(*a, **k)
with self.synch_lock:
self.end_of_thread = Dict(response=response)
except Exception, e:
with self.synch_lock:
self.end_of_thread = Dict(exception=_Except.wrap(e))
try:
_Log.fatal("Problem in thread {{name|quote}}", name=self.name, cause=e)
except Exception, f:
sys.stderr.write("ERROR in thread: " + str(self.name) + " " + str(e) + "\n")
finally:
children = copy(self.children)
for c in children:
try:
c.stop()
except Exception:
pass
for c in children:
try:
c.join()
except Exception, _:
pass
self.stopped.go()
del self.target, self.args, self.kwargs
with ALL_LOCK:
del ALL[self.id]
if self.cprofiler:
import pstats
if DEBUG:
_Log.note("Adding cprofile stats for thread {{thread|quote}}", thread=self.name)
self.cprofiler.disable()
_Log.cprofiler_stats.add(pstats.Stats(self.cprofiler))
del self.cprofiler
def is_alive(self):
return not self.stopped
def join(self, timeout=None, till=None):
"""
RETURN THE RESULT {"response":r, "exception":e} OF THE THREAD EXECUTION (INCLUDING EXCEPTION, IF EXISTS)
"""
if timeout is not None:
if till is None:
till = datetime.utcnow() + timedelta(seconds=timeout)
else:
_Log.error("Can not except both `timeout` and `till`")
children = copy(self.children)
for c in children:
c.join(till=till)
if till is None:
while True:
with self.synch_lock:
for i in range(10):
if self.stopped:
self.parent.remove_child(self)
if not self.end_of_thread.exception:
return self.end_of_thread.response
else:
_Log.error("Thread did not end well", cause=self.end_of_thread.exception)
self.synch_lock.wait(0.5)
if DEBUG:
_Log.note("Waiting on thread {{thread|json}}", thread=self.name)
else:
self.stopped.wait_for_go(till=till)
if self.stopped:
self.parent.remove_child(self)
if not self.end_of_thread.exception:
return self.end_of_thread.response
else:
_Log.error("Thread did not end well", cause=self.end_of_thread.exception)
else:
from pyLibrary.debugs.exceptions import Except
raise Except(type=Thread.TIMEOUT)
@staticmethod
def run(name, target, *args, **kwargs):
if not _Log:
_late_import()
# ENSURE target HAS please_stop ARGUMENT
if "please_stop" not in target.__code__.co_varnames:
_Log.error("function must have please_stop argument for signalling emergency shutdown")
Thread.num_threads += 1
output = Thread(name, target, *args, **kwargs)
output.start()
return output
@staticmethod
def sleep(seconds=None, till=None, timeout=None, please_stop=None):
if please_stop is not None or isinstance(till, Signal):
if isinstance(till, Signal):
please_stop = till
till = MAX_DATETIME
if seconds is not None:
till = datetime.utcnow() + timedelta(seconds=seconds)
elif timeout is not None:
till = datetime.utcnow() + timedelta(seconds=timeout.seconds)
elif till is None:
till = MAX_DATETIME
while not please_stop:
time.sleep(1)
if till < datetime.utcnow():
break
return
if seconds != None:
if isinstance(seconds, Duration):
time.sleep(seconds.total_seconds)
else:
time.sleep(seconds)
elif till != None:
if isinstance(till, datetime):
duration = (till - datetime.utcnow()).total_seconds()
else:
duration = (till - datetime.utcnow()).total_seconds
if duration > 0:
try:
time.sleep(duration)
except Exception, e:
raise e
else:
while True:
time.sleep(10)
@staticmethod
def wait_for_shutdown_signal(
please_stop=False, # ASSIGN SIGNAL TO STOP EARLY
allow_exit=False # ALLOW "exit" COMMAND ON CONSOLE TO ALSO STOP THE APP
):
"""
SLEEP UNTIL keyboard interrupt
"""
if not isinstance(please_stop, Signal):
please_stop = Signal()
please_stop.on_go(lambda: thread.start_new_thread(lambda: MAIN_THREAD.stop(), ()))
if Thread.current() != MAIN_THREAD:
if not _Log:
_late_import()
_Log.error("Only the main thread can sleep forever (waiting for KeyboardInterrupt)")
try:
if allow_exit:
_wait_for_exit(please_stop)
else:
_wait_for_interrupt(please_stop)
except (KeyboardInterrupt, SystemExit), _:
please_stop.go()
_Log.alert("SIGINT Detected! Stopping...")
MAIN_THREAD.stop()
@staticmethod
def current():
id = thread.get_ident()
with ALL_LOCK:
try:
return ALL[id]
except KeyError, e:
return MAIN_THREAD
class Signal(object):
"""
SINGLE-USE THREAD SAFE SIGNAL
go() - ACTIVATE SIGNAL (DOES NOTHING IF SIGNAL IS ALREADY ACTIVATED)
wait_for_go() - PUT THREAD IN WAIT STATE UNTIL SIGNAL IS ACTIVATED
is_go() - TEST IF SIGNAL IS ACTIVATED, DO NOT WAIT (you can also check truthiness)
on_go() - METHOD FOR OTHER THREAD TO RUN WHEN ACTIVATING SIGNAL
"""
def __init__(self):
self.lock = Lock()
self._go = False
self.job_queue = []
def __str__(self):
return str(self._go)
def __bool__(self):
with self.lock:
return self._go
def __nonzero__(self):
with self.lock:
return self._go
def wait_for_go(self, timeout=None, till=None):
"""
PUT THREAD IN WAIT STATE UNTIL SIGNAL IS ACTIVATED
"""
with self.lock:
while not self._go:
self.lock.wait(timeout=timeout, till=till)
return True
def go(self):
"""
ACTIVATE SIGNAL (DOES NOTHING IF SIGNAL IS ALREADY ACTIVATED)
"""
with self.lock:
if self._go:
return
self._go = True
jobs = self.job_queue
self.job_queue = []
self.lock.notify_all()
for j in jobs:
try:
j()
except Exception, e:
_Log.warning("Trigger on Signal.go() failed!", cause=e)
def is_go(self):
"""
TEST IF SIGNAL IS ACTIVATED, DO NOT WAIT
"""
with self.lock:
return self._go
def on_go(self, target):
"""
RUN target WHEN SIGNALED
"""
if not target:
_Log.error("expecting target")
with self.lock:
if self._go:
target()
else:
self.job_queue.append(target)
class ThreadedQueue(Queue):
"""
TODO: Check that this queue is not dropping items at shutdown
DISPATCH TO ANOTHER (SLOWER) queue IN BATCHES OF GIVEN size
"""
def __init__(
self,
name,
queue, # THE SLOWER QUEUE
batch_size=None, # THE MAX SIZE OF BATCHES SENT TO THE SLOW QUEUE
max_size=None, # SET THE MAXIMUM SIZE OF THE QUEUE, WRITERS WILL BLOCK IF QUEUE IS OVER THIS LIMIT
period=None, # MAX TIME BETWEEN FLUSHES TO SLOWER QUEUE
silent=False # WRITES WILL COMPLAIN IF THEY ARE WAITING TOO LONG
):
if not _Log:
_late_import()
batch_size = coalesce(batch_size, int(coalesce(max_size, 0) / 2), 900)
max_size = coalesce(max_size, batch_size * 2) # REASONABLE DEFAULT
period = coalesce(period, SECOND)
bit_more_time = 5 * SECOND
Queue.__init__(self, name=name, max=max_size, silent=silent)
def worker_bee(please_stop):
def stopper():
self.add(Thread.STOP)
please_stop.on_go(stopper)
_buffer = []
next_time = Date.now() + period # THE TIME WE SHOULD DO A PUSH
while not please_stop:
try:
if not _buffer:
item = self.pop()
now = Date.now()
if item is Thread.STOP:
queue.extend(_buffer)
please_stop.go()
break
elif item is not None:
_buffer.append(item)
next_time = now + period # NO NEED TO SEND TOO EARLY
continue
item = self.pop(till=next_time)
now = Date.now()
if item is Thread.STOP:
queue.extend(_buffer)
please_stop.go()
break
elif item is not None:
_buffer.append(item)
except Exception, e:
_Log.warning(
"Unexpected problem",
name=name,
cause=e
)
try:
if len(_buffer) >= batch_size or now > next_time:
next_time = now + period
if _buffer:
queue.extend(_buffer)
_buffer = []
# A LITTLE MORE TIME TO FILL THE NEXT BUFFER
now = Date.now()
if now > next_time:
next_time = now + bit_more_time
except Exception, e:
_Log.warning(
"Problem with {{name}} pushing {{num}} items to data sink",
name=name,
num=len(_buffer),
cause=e
)
if _buffer:
# ONE LAST PUSH, DO NOT HAVE TIME TO DEAL WITH ERRORS
queue.extend(_buffer)
self.thread = Thread.run("threaded queue for " + name, worker_bee, parent_thread=self)
def add(self, value, timeout=None):
with self.lock:
self._wait_for_queue_space(timeout=timeout)
if self.keep_running:
self.queue.append(value)
return self
def extend(self, values):
with self.lock:
# ONCE THE queue IS BELOW LIMIT, ALLOW ADDING MORE
self._wait_for_queue_space()
if self.keep_running:
self.queue.extend(values)
return self
def __enter__(self):
return self
def __exit__(self, a, b, c):
self.add(Thread.STOP)
if isinstance(b, BaseException):
self.thread.please_stop.go()
self.thread.join()
def stop(self):
self.add(Thread.STOP)
self.thread.join()
def _wait_for_exit(please_stop):
"""
/dev/null SPEWS INFINITE LINES, DO NOT POLL AS OFTEN
"""
cr_count = 0 # COUNT NUMBER OF BLANK LINES
while not please_stop:
# if DEBUG:
# Log.note("inside wait-for-shutdown loop")
if cr_count > 30:
Thread.sleep(seconds=3, please_stop=please_stop)
try:
line = sys.stdin.readline()
except Exception, e:
if "Bad file descriptor" in e:
_wait_for_interrupt(please_stop)
break
# if DEBUG:
# Log.note("read line {{line|quote}}, count={{count}}", line=line, count=cr_count)
if line == "":
cr_count += 1
else:
cr_count = -1000000 # NOT /dev/null
if strings.strip(line) == "exit":
_Log.alert("'exit' Detected! Stopping...")
return
def _wait_for_interrupt(please_stop):
while not please_stop:
if DEBUG:
_Log.note("inside wait-for-shutdown loop")
try:
Thread.sleep(please_stop=please_stop)
except Exception, _:
pass
class Till(Signal):
"""
MANAGE THE TIMEOUT LOGIC
"""
def __init__(self, till=None, timeout=None, seconds=None):
Signal.__init__(self)
timers = []
def go():
self.go()
for t in timers:
t.cancel()
if isinstance(till, Date):
t = threading.Timer((till - Date.now()).seconds, go)
t.start()
timers.append(t)
if timeout:
t = threading.Timer(timeout.seconds, go)
t.start()
timers.append(t)
if seconds:
t = threading.Timer(seconds, go)
t.start()
timers.append(t)
if isinstance(till, Signal):
till.on_go(go)
| mpl-2.0 | 7,233,190,474,011,335,000 | 28.957825 | 138 | 0.507928 | false |
vga101/gaiasky | assets/scripts/showcases/line-objects-update.py | 1 | 1773 | # This script showcases lines and parked runnables
#
# The script creates a line object between the positions of the Earth and the Moon. Then,
# it parks a runnable which updates the line points with the new positions of the
# objects, so that the line is always up to date, even when the objects move. Finally,
# time is started to showcase the line movement.
# Created by Toni Sagrista
from gaia.cu9.ari.gaiaorbit.script import EventScriptingInterface
from gaia.cu9.ari.gaiaorbit.scenegraph import Polyline
from java.lang import Runnable
class LineUpdaterRunnable(Runnable):
def __init__(self, polyline):
self.polyline = polyline
def run(self):
earthp = gs.getObjectPosition("Earth")
moonp = gs.getObjectPosition("Moon")
pl = self.polyline.getPolyline()
pl.x.set(0, earthp[0])
pl.y.set(0, earthp[1])
pl.z.set(0, earthp[2])
pl.x.set(1, moonp[0])
pl.y.set(1, moonp[1])
pl.z.set(1, moonp[2])
self.polyline.markForUpdate()
gs = EventScriptingInterface.instance()
gs.cameraStop()
gs.stopSimulationTime()
gs.setFov(49)
gs.goToObject("Earth", 91.38e-2)
print("We will now add a line between the Earth and Moon")
gs.sleep(2)
earthp = gs.getObjectPosition("Earth")
moonp = gs.getObjectPosition("Moon")
gs.addPolyline("line-em", earthp + moonp, [ 1., .2, .2, .8 ], 1 )
gs.sleep(1.0)
line_em = gs.getObject("line-em")
gs.parkRunnable("line-updater", LineUpdaterRunnable(line_em))
gs.setSimulationPace(65536.0)
gs.startSimulationTime()
gs.sleep(30)
gs.stopSimulationTime()
print("Cleaning up and ending")
gs.unparkRunnable("line-updater")
gs.removeModelObject("line-em")
gs.cameraStop()
gs.maximizeInterfaceWindow()
gs.enableInput()
| mpl-2.0 | -6,904,757,617,755,331,000 | 23.287671 | 89 | 0.692611 | false |
kuke/models | fluid/PaddleCV/rcnn/models/resnet.py | 1 | 6075 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Constant
from paddle.fluid.regularizer import L2Decay
from config import cfg
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
name=None):
conv1 = fluid.layers.conv2d(
input=input,
num_filters=ch_out,
filter_size=filter_size,
stride=stride,
padding=padding,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=ParamAttr(name=name + "_biases"),
name=name + '.conv2d.output.1')
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
return fluid.layers.batch_norm(
input=conv1,
act=act,
name=bn_name + '.output.1',
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance',
is_test=True)
def conv_affine_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=ch_out,
filter_size=filter_size,
stride=stride,
padding=padding,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False,
name=name + '.conv2d.output.1')
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
scale = fluid.layers.create_parameter(
shape=[conv.shape[1]],
dtype=conv.dtype,
attr=ParamAttr(
name=bn_name + '_scale', learning_rate=0.),
default_initializer=Constant(1.))
scale.stop_gradient = True
bias = fluid.layers.create_parameter(
shape=[conv.shape[1]],
dtype=conv.dtype,
attr=ParamAttr(
bn_name + '_offset', learning_rate=0.),
default_initializer=Constant(0.))
bias.stop_gradient = True
out = fluid.layers.affine_channel(x=conv, scale=scale, bias=bias)
if act == 'relu':
out = fluid.layers.relu(x=out)
return out
def shortcut(input, ch_out, stride, name):
ch_in = input.shape[1] # if args.data_format == 'NCHW' else input.shape[-1]
if ch_in != ch_out:
return conv_affine_layer(input, ch_out, 1, stride, 0, None, name=name)
else:
return input
def basicblock(input, ch_out, stride, name):
short = shortcut(input, ch_out, stride, name=name)
conv1 = conv_affine_layer(input, ch_out, 3, stride, 1, name=name)
conv2 = conv_affine_layer(conv1, ch_out, 3, 1, 1, act=None, name=name)
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu', name=name)
def bottleneck(input, ch_out, stride, name):
short = shortcut(input, ch_out * 4, stride, name=name + "_branch1")
conv1 = conv_affine_layer(
input, ch_out, 1, stride, 0, name=name + "_branch2a")
conv2 = conv_affine_layer(conv1, ch_out, 3, 1, 1, name=name + "_branch2b")
conv3 = conv_affine_layer(
conv2, ch_out * 4, 1, 1, 0, act=None, name=name + "_branch2c")
return fluid.layers.elementwise_add(
x=short, y=conv3, act='relu', name=name + ".add.output.5")
def layer_warp(block_func, input, ch_out, count, stride, name):
res_out = block_func(input, ch_out, stride, name=name + "a")
for i in range(1, count):
res_out = block_func(res_out, ch_out, 1, name=name + chr(ord("a") + i))
return res_out
ResNet_cfg = {
18: ([2, 2, 2, 1], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}
def add_ResNet50_conv4_body(body_input):
stages, block_func = ResNet_cfg[50]
stages = stages[0:3]
conv1 = conv_affine_layer(
body_input, ch_out=64, filter_size=7, stride=2, padding=3, name="conv1")
pool1 = fluid.layers.pool2d(
input=conv1,
pool_type='max',
pool_size=3,
pool_stride=2,
pool_padding=1)
res2 = layer_warp(block_func, pool1, 64, stages[0], 1, name="res2")
if cfg.TRAIN.freeze_at == 2:
res2.stop_gradient = True
res3 = layer_warp(block_func, res2, 128, stages[1], 2, name="res3")
if cfg.TRAIN.freeze_at == 3:
res3.stop_gradient = True
res4 = layer_warp(block_func, res3, 256, stages[2], 2, name="res4")
if cfg.TRAIN.freeze_at == 4:
res4.stop_gradient = True
return res4
def add_ResNet_roi_conv5_head(head_input, rois):
if cfg.roi_func == 'RoIPool':
pool = fluid.layers.roi_pool(
input=head_input,
rois=rois,
pooled_height=cfg.roi_resolution,
pooled_width=cfg.roi_resolution,
spatial_scale=cfg.spatial_scale)
elif cfg.roi_func == 'RoIAlign':
pool = fluid.layers.roi_align(
input=head_input,
rois=rois,
pooled_height=cfg.roi_resolution,
pooled_width=cfg.roi_resolution,
spatial_scale=cfg.spatial_scale,
sampling_ratio=cfg.sampling_ratio)
res5 = layer_warp(bottleneck, pool, 512, 3, 2, name="res5")
return res5
| apache-2.0 | 242,667,341,747,501,120 | 32.563536 | 80 | 0.58963 | false |
joshl8n/school-projects | bank-accounts/main.py | 1 | 2889 | from account import Account
from checking import Checking
from savings import Savings
def acc_type(): # initializes which account to create
print
print "Which account would you like to open?"
print "[c] Checking Account"
print "[s] Savings Account"
print "[q] Quit"
while True:
account = raw_input("Enter an option: ")
if account in ("c", "C"):
account = "c"
return account
if account in ("s", "S"):
account = "s"
return account
if account in ("q", "Q"):
account = "q"
return account # created in case user quits before selecting account, needed for displaying final balance
def acc_select(acc_type): # creates instance of account
#if not i_account_set:
if acc_type == "c":
account = Checking(name, acc_num, balance, overdraft_limit)
print
print "=== " + account.getType() + " Account ==="
return account
if acc_type == "s":
account = Savings(name, acc_num, balance, interest)
print
print "=== " + account.getType() + " Account ==="
return account
if acc_type == "q":
account = Account(name, acc_num, balance)
return account
def get_balance(account):
return account.getBalance()
def main(account):
if account.getType() == "Account": # quits if user quits before selecting account
return False
print
print "What would you like to do?"
print "[d] Deposit"
print "[w] Withdraw"
if account.getType() == "Savings": # ensures interest option isn't displayed for checking account
print "[a] Add monthly interest"
print "[q] Quit"
while True:
print "Your account balance is $" + str(account.getBalance())
option = raw_input("Enter an option: ")
if option in ("d", "D", "w", "W", "q", "Q"): # allowed inputs
break
if account.getType() == "Savings" and option in ("a", "A"): # only allows "a" input for savings account
break
print "Try again."
if option in ("d", "D"):
amount = input("How much would you like to deposit?: ")
account.deposit(amount)
print "Your account balance is $" + str(account.getBalance())
return True
if option in ("w", "W"):
amount = input("How much would you like to withdraw?: ")
if not account.withdraw(amount):
print "Whoa! Insufficent funds available."
print "Your account balance is $" + str(account.getBalance())
return True
if option in ("a", "A"):
print "Interest Added!", account.getInterest()
print "Your account balance is $" + str(account.monthlyInterest())
return True
if option == "q":
return False
name = raw_input("What is the account holder\'s name?: ")
acc_num = input("What is the account number?: ")
balance = input("What is the starting balance?: ")
overdraft_limit = input("What is the overdraft limit?: ")
interest = input("What is the interest rate?: ")
selected_acc = acc_select(acc_type())
while main(selected_acc):
continue
print "Goodbye! Your final account balance is $" + str(get_balance(selected_acc))
| gpl-3.0 | -3,457,749,278,412,813,000 | 27.048544 | 108 | 0.672897 | false |
asm-products/nested-communities-core | volunteering/tests/test_models.py | 1 | 27071 | import unittest
from datetime import date, time
from django.db import IntegrityError
from django.test import TestCase
import factories as f
from volunteering.models import (Activity, Assignment, Attribute, Campaign,
Duty, Event, Location, Sendable, TriggerBase,
TriggerByDate, Message, Volunteer)
class TestAttribute(TestCase):
def testHasAName(self):
attribute = Attribute(name='yada')
self.assertEqual('yada', attribute.name)
def testAttributeIsUnique(self):
f.AttributeFactory.create(name='yada')
with self.assertRaises(Exception):
f.AttributeFactory.create(name='yada')
class TestVolunteer(TestCase):
def testVolunteerFactory(self):
v = f.VolunteerFactory()
self.assertTrue(Volunteer.objects.filter(id=v.id).exists())
def testSettingAnAttribute(self):
v = f.VolunteerFactory.create(surname='tester')
a = f.AttributeFactory.create(name='an attr')
v.attributes.add(a)
self.assertEqual('an attr', v.attributes.all()[0].name)
def testHasAUniqueSlugThatHas8PlusOneChars(self):
v = f.VolunteerFactory.create()
self.assertEqual(9, len(v.slug))
def testHasSlugWithDashInTheMiddle(self):
v = f.VolunteerFactory.create()
self.assertEqual('-', v.slug[4])
def testTheSlugIsUnique(self):
v1 = f.VolunteerFactory.build()
v2 = f.VolunteerFactory.build()
fam = f.FamilyFactory.create()
self.assertEqual('', v1.slug)
self.assertEqual('', v2.slug)
v1.family = fam
v2.family = fam
v1.save()
v2.save()
self.assertNotEqual(v1.slug, v2.slug)
def testInitials(self):
volunteer = f.VolunteerFactory(first_name="Jon", surname="George")
self.assertEqual('J.G.', volunteer.initials())
def testFormalName(self):
volunteer = f.VolunteerFactory(first_name="", dear_name="",
surname="Smith", title="Mr.")
self.assertEqual('Mr. Smith', volunteer.formal_name())
def testFormalName_NoTitle(self):
volunteer = f.VolunteerFactory(first_name="", dear_name="",
surname="Smith", title="")
self.assertEqual('Smith', volunteer.formal_name())
def testToName_WithDearName(self):
volunteer = f.VolunteerFactory(first_name="Jon", dear_name="Sam")
self.assertEqual('Sam', volunteer.to_name())
def testToName_WithNoDearName(self):
volunteer = f.VolunteerFactory(first_name="Jon", dear_name="")
self.assertEqual('Jon', volunteer.to_name())
def testToName_WithNoDearNameNorTitle(self):
volunteer = f.VolunteerFactory(first_name="", dear_name="",
surname="Smith", title="Mr.")
self.assertEqual('Mr. Smith', volunteer.to_name())
def testInitialsMultipleLastName(self):
volunteer = f.VolunteerFactory(first_name="Jon Smith",
surname="Stuart")
self.assertEqual('J.S.', volunteer.initials())
def testFamilyLink(self):
volunteer = f.VolunteerFactory()
self.assertRegexpMatches(
volunteer.family_link(),
'<a href="/admin/volunteering/family/\d*/">FM\d*</a>')
def testHasClaimed_IsFalseWhenFalse(self):
volunteer = f.VolunteerFactory()
duty = f.DutyFactory()
self.assertFalse(volunteer.has_claimed(duty))
def testHasClaimed_IsTrueWhenTrue(self):
volunteer = f.VolunteerFactory.create()
duty = f.DutyFactory()
Assignment.objects.create(volunteer=volunteer, duty=duty)
self.assertTrue(volunteer.has_claimed(duty))
class TestDuty(TestCase):
@unittest.SkipTest
def testSettingAnAttribute(self):
d = f.DutyFactory()
a = f.AttributeFactory(name='attr')
d.attributes.add(a)
self.assertEqual('attr', d.attributes.all()[0].name)
def testHasVolunteerMultiple(self):
d = f.DutyFactory.build(multiple=5)
self.assertEqual(5, d.multiple)
def testHasOptionalLocation(self):
l = f.LocationFactory.build(name="a location")
d = f.DutyFactory.build(location=l)
self.assertEqual(l.id, d.location_id)
def testHasOptionalEvent(self):
e = f.EventFactory(name="a event")
d = f.DutyFactory(event=e)
self.assertEqual(e.id, d.event_id)
def testDuplicatesEventLocationActivitySet(self):
a_time = time(10, 0)
d = f.FullDutyFactory.create(start_time=a_time, end_time=a_time)
with self.assertRaises(IntegrityError):
Duty(activity=d.activity, location=d.location, event=d.event,
start_time=d.start_time, end_time=d.end_time).save()
def testOneOfOneIsAssignable(self):
f.DutyFactory.create()
self.assertEqual(1, Duty.objects.assignable().count())
def testOneOfOneIsAssignableWhenMultiple2And1Assignment(self):
duty = f.DutyFactory.create(multiple=2)
f.AssignmentFactory(duty=duty)
self.assertEqual(1, Duty.objects.assignable().count())
def testNoneIsAssignableWhenMultiple2And2Assignments(self):
duty = f.DutyFactory.create(multiple=2)
f.AssignmentFactory(duty=duty)
f.AssignmentFactory(duty=duty)
self.assertEqual(0, Duty.objects.assignable().count())
def testNoneIsAssignable(self):
f.DutyFactory.create(multiple=0)
self.assertEqual(0, Duty.objects.assignable().count())
def testOneIsAlreadyAssigned(self):
d = f.DutyFactory.create(multiple=1)
f.AssignmentFactory(duty=d)
self.assertEqual(0, Duty.objects.assignable().count())
def testOneIsAlreadyAssignedOfTwo(self):
d = f.DutyFactory.create(multiple=2)
f.AssignmentFactory(duty=d)
self.assertEqual(1, Duty.objects.assignable().count())
def testOneDutyIsAssignableToVolunteer_NoAttributes(self):
f.DutyFactory.create()
v = f.VolunteerFactory()
self.assertEqual(1, Duty.objects.assignable_to(v).count())
class TestCampaign(TestCase):
def testHasSlug(self):
c = Campaign(slug='a slug')
self.assertEqual('a slug', c.slug)
def testDuties(self):
campaign = f.CampaignFactory()
duty1 = f.FullDutyFactory()
duty2 = f.FullDutyFactory()
duty3 = f.FullDutyFactory()
duty4 = f.FullDutyFactory()
campaign.events.add(duty1.event)
campaign.events.add(duty2.event)
campaign.events.add(duty3.event)
campaign.events.add(duty4.event)
qs = campaign.duties().order_by('id')
expected = [duty1, duty2, duty3, duty4]
self.assertQuerysetEqual(qs, [repr(d) for d in expected])
def testRecipientsViaAssignable(self):
campaign = f.CampaignFactory()
attribute = f.AttributeFactory()
duty1 = f.FullDutyFactory()
duty1.activity.attributes.add(attribute)
volunteer = f.VolunteerFactory()
volunteer.attributes.add(attribute)
campaign.activities.add(duty1.activity)
qs = campaign.recipients(True, True).order_by('id')
self.assertQuerysetEqual(qs, [repr(volunteer)])
def testRecipientsViaAssignableTwice(self):
campaign = f.CampaignFactory()
attribute = f.AttributeFactory()
duty1 = f.FullDutyFactory()
duty1.activity.attributes.add(attribute)
duty2 = f.FullDutyFactory()
duty2.activity.attributes.add(attribute)
volunteer = f.VolunteerFactory()
volunteer.attributes.add(attribute)
campaign.activities.add(duty1.activity)
qs = campaign.recipients(True, True).order_by('id')
self.assertQuerysetEqual(qs, [repr(volunteer)])
def testRecipientsViaAssigned(self):
campaign = f.CampaignFactory()
duty = f.FullDutyFactory()
campaign.events.add(duty.event)
volunteer = f.VolunteerFactory()
f.AssignmentFactory(duty=duty, volunteer=volunteer)
qs = campaign.recipients(assigned=True).order_by('id')
self.assertQuerysetEqual(qs, [repr(volunteer)])
def testRecipientsViaAssignedAndAssignable(self):
campaign = f.CampaignFactory()
attribute = f.AttributeFactory()
duty1 = f.FullDutyFactory()
duty1.activity.attributes.add(attribute)
campaign.activities.add(duty1.activity)
volunteer1 = f.VolunteerFactory()
volunteer1.attributes.add(attribute)
duty2 = f.FullDutyFactory()
campaign.activities.add(duty2.activity)
volunteer2 = f.VolunteerFactory()
f.AssignmentFactory(duty=duty2, volunteer=volunteer2)
qs = campaign.recipients(True, True).order_by('id')
self.assertQuerysetEqual(qs, [repr(volunteer1), repr(volunteer2)])
def testRecipientsWithInvalidArgs(self):
campaign = f.CampaignFactory()
with self.assertRaises(ValueError):
campaign.recipients(True, False, True)
with self.assertRaises(ValueError):
campaign.recipients(False, True, True)
with self.assertRaises(ValueError):
campaign.recipients(True, True, True)
def testRecipientsViaUnassigned(self):
campaign = f.CampaignFactory()
attribute = f.AttributeFactory()
duty1 = f.FullDutyFactory()
duty1.activity.attributes.add(attribute)
campaign.activities.add(duty1.activity)
volunteer1 = f.VolunteerFactory()
volunteer1.attributes.add(attribute)
duty2 = f.FullDutyFactory()
campaign.activities.add(duty2.activity)
volunteer2 = f.VolunteerFactory()
f.AssignmentFactory(duty=duty2, volunteer=volunteer2)
qs = campaign.recipients(unassigned=True).order_by('id')
self.assertQuerysetEqual(qs, [repr(volunteer1)])
def testRecipientsCount(self):
campaign = f.CampaignFactory()
duty = f.FullDutyFactory()
campaign.events.add(duty.event)
volunteer = f.VolunteerFactory()
f.AssignmentFactory(duty=duty, volunteer=volunteer)
self.assertEqual(1, campaign.recipient_count())
def testRecipientNames(self):
campaign = f.CampaignFactory()
duty = f.FullDutyFactory()
campaign.events.add(duty.event)
volunteer1 = f.VolunteerFactory()
f.AssignmentFactory(duty=duty, volunteer=volunteer1)
volunteer2 = f.VolunteerFactory(first_name='Abe')
f.AssignmentFactory(duty=duty, volunteer=volunteer2)
expected = "<ul><li>%s - %s</li><li>%s - %s</li></ul>" % (
volunteer2.name(), volunteer2.email_address,
volunteer1.name(), volunteer1.email_address)
self.assertEqual(expected, campaign.recipient_names())
def testPercentAssigned_NoDuties(self):
campaign = f.CampaignFactory()
self.assertEqual("0%", campaign.percent_assigned())
def testPercentAssigned_IncreasinglyAssigned(self):
duty1 = f.FullDutyFactory(multiple=2)
duty2 = f.FullDutyFactory(multiple=2)
campaign = f.CampaignFactory()
campaign.events.add(duty1.event)
campaign.events.add(duty2.event)
volunteer1 = f.VolunteerFactory()
volunteer2 = f.VolunteerFactory()
self.assertEqual("0%", campaign.percent_assigned())
f.AssignmentFactory(duty=duty1, volunteer=volunteer1)
self.assertEqual("25%", campaign.percent_assigned())
f.AssignmentFactory(duty=duty1, volunteer=volunteer2)
self.assertEqual("50%", campaign.percent_assigned())
f.AssignmentFactory(duty=duty2, volunteer=volunteer1)
self.assertEqual("75%", campaign.percent_assigned())
f.AssignmentFactory(duty=duty2, volunteer=volunteer2)
self.assertEqual("100%", campaign.percent_assigned())
def testVolunteersNeeded_NoDuties(self):
campaign = f.CampaignFactory()
self.assertEqual(0, campaign.volunteers_needed())
def testVolunteersNeeded_OneDutyWithMultiple(self):
campaign = f.CampaignFactory()
duty = f.FullDutyFactory(multiple=5)
campaign.events.add(duty.event)
self.assertEqual(5, campaign.volunteers_needed())
def testVolunteersNeeded_MultipleDutyWithMultiple(self):
campaign = f.CampaignFactory()
duty1 = f.FullDutyFactory(multiple=5)
duty2 = f.FullDutyFactory(multiple=5)
campaign.events.add(duty1.event)
campaign.events.add(duty2.event)
self.assertEqual(10, campaign.volunteers_needed())
def testVolunteersAssigned_NoDuties(self):
campaign = f.CampaignFactory()
self.assertEqual(0, campaign.volunteers_assigned())
def testVolunteersAssigned_OneDutyWithMultiple(self):
campaign = f.CampaignFactory()
duty = f.FullDutyFactory(multiple=5)
f.AssignmentFactory(duty=duty)
campaign.events.add(duty.event)
self.assertEqual(1, campaign.volunteers_assigned())
def testVolunteersAssigned_MultipleDutyWithMultiple(self):
campaign = f.CampaignFactory()
duty1 = f.FullDutyFactory(multiple=5)
duty2 = f.FullDutyFactory(multiple=5)
campaign.events.add(duty1.event)
campaign.events.add(duty2.event)
f.AssignmentFactory(duty=duty1)
f.AssignmentFactory(duty=duty2)
self.assertEqual(2, campaign.volunteers_assigned())
class TestAssignment(TestCase):
def testHasTimestamps(self):
assignment = f.AssignmentFactory()
self.assertTrue(assignment.created)
self.assertTrue(assignment.modified)
def testNoDuplicates(self):
a = f.AssignmentFactory.create()
with self.assertRaises(IntegrityError):
f.AssignmentFactory.create(volunteer=a.volunteer,
duty=a.duty)
@unittest.SkipTest
def testHHServiceLocation(self):
a = f.AssignmentFactory.create()
self.assertEqual(a.hh_service_location(),
a.volunteer.family.hh_location_2014.name)
class TestActivity(TestCase):
def setUp(self):
self.a = f.ActivityFactory.build(
name="the name", description="the short description")
def testHasAName(self):
self.assertEqual(self.a.name, 'the name')
def testNameIsUnique(self):
self.a.save()
with self.assertRaises(IntegrityError):
Activity.objects.create(name='the name')
def testHasADescription(self):
self.assertEqual(self.a.description, 'the short description')
class TestEvent(TestCase):
def setUp(self):
self.a = Event(name="the name",
description="the short description",
date=date(2001, 1, 1))
def testHasAName(self):
self.assertEqual(self.a.name, 'the name')
def testNameIsUnique(self):
self.a.save()
with self.assertRaises(IntegrityError):
Event.objects.create(name='the name')
def testHasADescription(self):
self.assertEqual(self.a.description, 'the short description')
def testHasADate(self):
self.assertEqual(self.a.date, date(2001, 1, 1))
class TestLocation(TestCase):
def setUp(self):
self.l = Location(name="the name",
description="the short description")
def testHasAName(self):
self.assertEqual(self.l.name, 'the name')
def testNameIsUnique(self):
self.l.save()
with self.assertRaises(IntegrityError):
Location.objects.create(name='the name')
def testHasADescription(self):
self.assertEqual(self.l.description, 'the short description')
class TestFamily(TestCase):
def testSingleSurnames(self):
v = f.VolunteerFactory(surname="Abba")
family = v.family
self.assertEqual("Abba", family.surnames())
def testMultipleSurnames(self):
v = f.VolunteerFactory(surname="Abba")
family = v.family
f.VolunteerFactory(family=family, surname="Bacca")
self.assertEqual('Abba, Bacca', family.surnames())
def testSingleNames(self):
v = f.VolunteerFactory(first_name="Joe", surname="Abba")
family = v.family
self.assertEqual("Joe Abba", family.names())
def testMultipleNames(self):
v = f.VolunteerFactory(first_name="Joe", surname="Abba")
family = v.family
f.VolunteerFactory(family=family, first_name="Bob", surname="Bacca")
self.assertEqual('Bob Bacca, Joe Abba', family.names())
class TestSendable(TestCase):
def testSendableFactory(self):
s = f.SendableFactory()
self.assertTrue(Sendable.objects.filter(pk=s.pk).exists())
def testSendableAssignmentFactory(self):
s = f.SendableAssignmentFactory()
self.assertTrue(Sendable.objects.filter(pk=s.pk).exists())
def testSendable_DateCollectSendablesAssignable(self):
fix_to_date = date(2005, 5, 5)
c = f.CampaignFactory()
d = f.FullDutyFactory()
c.events.add(d.event)
v = f.VolunteerFactory()
a = f.AttributeFactory()
v.attributes.add(a)
d.activity.attributes.add(a)
f.TriggerByDateFactory.create_batch(
3, fixed_date=fix_to_date,
assignment_state=TriggerBase.ASSIGNABLE, campaign=c)
result = Sendable.collect_from_fixed_triggers(fix_to_date)
self.assertEqual(3, result)
all_qs = Sendable.objects.all().order_by('id')
self.assertQuerysetEqual(all_qs, [v, v, v],
transform=lambda s: s.volunteer)
def testSendable_DateCollectSendablesAssignableButAlreadyAssigned(self):
fix_to_date = date(2005, 5, 5)
c = f.CampaignFactory()
d = f.FullDutyFactory()
c.events.add(d.event)
v = f.VolunteerFactory()
a = f.AttributeFactory()
v.attributes.add(a)
d.activity.attributes.add(a)
f.AssignmentFactory(volunteer=v, duty=d)
f.TriggerByDateFactory.create(
fixed_date=fix_to_date,
assignment_state=TriggerBase.ASSIGNABLE,
campaign=c)
result = Sendable.collect_from_fixed_triggers(fix_to_date)
self.assertEqual(0, result)
all_qs = Sendable.objects.all()
self.assertQuerysetEqual(all_qs, [])
def testSendable_DateCollectSndblesUnassignedButAssignedOnce(self):
fix_to_date = date(2005, 5, 5)
c = f.CampaignFactory()
d, d2 = f.FullDutyFactory.create_batch(2)
c.events.add(d.event)
c.events.add(d2.event)
v, v2, _ = f.VolunteerFactory.create_batch(3)
a = f.AttributeFactory()
v.attributes.add(a)
v2.attributes.add(a)
d.activity.attributes.add(a)
d2.activity.attributes.add(a)
f.AssignmentFactory(volunteer=v, duty=d)
f.TriggerByDateFactory.create(
fixed_date=fix_to_date,
assignment_state=TriggerBase.UNASSIGNED,
campaign=c)
result = Sendable.collect_from_fixed_triggers(fix_to_date)
self.assertEqual(1, result)
all_qs = Sendable.objects.all()
self.assertQuerysetEqual(all_qs, [v2],
transform=lambda s: s.volunteer)
def testSendable_DateCollectSendablesAssigned(self):
fix_to_date = date(2005, 5, 5)
c = f.CampaignFactory()
d = f.FullDutyFactory()
c.events.add(d.event)
v = f.VolunteerFactory()
a = f.AttributeFactory()
v.attributes.add(a)
d.activity.attributes.add(a)
f.TriggerByDateFactory.create_batch(
3, fixed_date=fix_to_date,
assignment_state=TriggerBase.ASSIGNED,
campaign=c)
result = Sendable.collect_from_fixed_triggers(fix_to_date)
self.assertEqual(0, result)
all_qs = Sendable.objects.all()
self.assertQuerysetEqual(all_qs, [])
def testSendable_EventCollectSendablesAssignable(self):
fix_to_date = date(2225, 5, 5) # This must be in the future.
e = f.EventFactory(date=fix_to_date)
c = f.CampaignFactory()
d = f.FullDutyFactory(event=e)
c.events.add(e)
v = f.VolunteerFactory()
f.AssignmentFactory(volunteer=v, duty=d)
f.TriggerByEventFactory.create_batch(
3, assignment_state=TriggerBase.ASSIGNED, campaign=c)
result = Sendable.collect_from_event_only_assigned_triggers(
fix_to_date)
self.assertEqual(3, result)
all_qs = Sendable.objects.all().order_by('id')
self.assertQuerysetEqual(all_qs, [v, v, v],
transform=lambda s: s.volunteer)
# def testSendable_EventCollectSendablesAssignableButAlreadyAssigned(self):
# fix_to_date = date(2005, 5, 5)
# c = f.CampaignFactory()
# d = f.FullDutyFactory()
# c.events.add(d.event)
# v = f.VolunteerFactory()
# a = f.AttributeFactory()
# v.attributes.add(a)
# d.activity.attributes.add(a)
# f.AssignmentFactory(volunteer=v, duty=d)
# f.TriggerByEventFactory.create(
# fixed_date=fix_to_date,
# assignment_state=TriggerBase.ASSIGNABLE,
# campaign=c)
# result = Sendable.collect_from_fixed_triggers(fix_to_date)
# self.assertEqual(0, result)
# all_qs = Sendable.objects.all()
# self.assertQuerysetEqual(all_qs, [])
# def testSendable_EventCollectSndblsUnassignedButAssignedOnce(self):
# fix_to_date = date(2005, 5, 5)
# c = f.CampaignFactory()
# d, d2 = f.FullDutyFactory.create_batch(2)
# c.events.add(d.event)
# c.events.add(d2.event)
# v, v2, _ = f.VolunteerFactory.create_batch(3)
# a = f.AttributeFactory()
# v.attributes.add(a)
# v2.attributes.add(a)
# d.activity.attributes.add(a)
# d2.activity.attributes.add(a)
# f.AssignmentFactory(volunteer=v, duty=d)
# f.TriggerByEventFactory.create(
# fixed_date=fix_to_date,
# assignment_state=TriggerBase.UNASSIGNED,
# campaign=c)
# result = Sendable.collect_from_fixed_triggers(fix_to_date)
# self.assertEqual(1, result)
# all_qs = Sendable.objects.all()
# self.assertQuerysetEqual(all_qs, [v2],
# transform=lambda s: s.volunteer)
# def testSendable_EventCollectSendablesAssigned(self):
# fix_to_date = date(2005, 5, 5)
# c = f.CampaignFactory()
# d = f.FullDutyFactory()
# c.events.add(d.event)
# v = f.VolunteerFactory()
# a = f.AttributeFactory()
# v.attributes.add(a)
# d.activity.attributes.add(a)
# f.TriggerByEventFactory.create_batch(
# 3, fixed_date=fix_to_date,
# assignment_state=TriggerBase.ASSIGNED,
# campaign=c)
# result = Sendable.collect_from_fixed_triggers(fix_to_date)
# self.assertEqual(0, result)
# all_qs = Sendable.objects.all()
# self.assertQuerysetEqual(all_qs, [])
class TestTriggerByDate(TestCase):
def testTriggerByDateFactory(self):
t = f.TriggerByDateFactory()
self.assertTrue(TriggerByDate.objects.filter(id=t.id).exists())
def testGetSetForADateAllAssignmentStates(self):
fix_to_date = date(2005, 5, 5)
triggers = f.TriggerByDateFactory.create_batch(3,
fixed_date=fix_to_date)
f.TriggerByDateFactory(fixed_date=date(2001, 1, 1))
result = TriggerByDate.objects.triggered(fix_to_date).order_by('id')
self.assertQuerysetEqual(result, [repr(t) for t in triggers])
def testGetSetForADateAssignedWithNoAssigned(self):
fix_to_date = date(2005, 5, 5)
f.TriggerByDateFactory.create_batch(
3, fixed_date=fix_to_date,
assignment_state=TriggerBase.ASSIGNED)
result = TriggerByDate.objects.triggered(fix_to_date).order_by('id')
self.assertQuerysetEqual(result, [])
def testGetSetForADateAssignedWithAssigned(self):
fix_to_date = date(2005, 5, 5)
d = f.FullDutyFactory()
c = f.CampaignFactory()
c.events.add(d.event)
v = f.VolunteerFactory()
f.AssignmentFactory(volunteer=v, duty=d)
triggers = f.TriggerByDateFactory.create_batch(
3, fixed_date=fix_to_date,
assignment_state=TriggerBase.ASSIGNED,
campaign=c)
result = TriggerByDate.objects.triggered(fix_to_date).order_by('id')
self.assertQuerysetEqual(result, [repr(t) for t in triggers])
# Skipped until I figure out how to use annotations for this.
# def testGetFixedDateTriggersSetForADateAssignableWithAssigned(self):
# fix_to_date = date(2005, 5, 5)
# d = f.FullDutyFactory()
# c = f.CampaignFactory()
# c.events.add(d.event)
# v = f.VolunteerFactory()
# f.AssignmentFactory(volunteer=v, duty=d)
# f.TriggerFactory.create_batch(3, fixed_date=fix_to_date,
# assignment_state=Trigger.ASSIGNABLE,
# campaign=c)
# result = Trigger.objects.triggered(fix_to_date).order_by('id')
# self.assertQuerysetEqual(result, [])
def testGetSetForADateAssignableWithAssignable(self):
fix_to_date = date(2005, 5, 5)
c = f.CampaignFactory()
d = f.FullDutyFactory()
c.events.add(d.event)
v = f.VolunteerFactory()
a = f.AttributeFactory()
v.attributes.add(a)
d.activity.attributes.add(a)
f.AssignmentFactory(volunteer=v, duty=d)
triggers = f.TriggerByDateFactory.create_batch(
3, fixed_date=fix_to_date,
assignment_state=TriggerBase.ASSIGNABLE, campaign=c)
result = TriggerByDate.objects.triggered(fix_to_date).order_by('id')
self.assertQuerysetEqual(result, [repr(t) for t in triggers])
class TestMessage(TestCase):
def testMessageFactory(self):
m = f.MessageFactory()
self.assertTrue(Message.objects.filter(id=m.id).exists())
def testSMSFactory(self):
m = f.MessageSMSFactory()
self.assertTrue(Message.objects.filter(id=m.id).exists())
self.assertEqual('sms', m.get_mode_display())
def testRenderedBody(self):
v = f.VolunteerFactory(dear_name="Joe")
message = f.MessageFactory(body="Hi {{ volunteer.to_name }}")
expected = "Hi Joe"
result = message.rendered_body({'volunteer': v})
self.assertEqual(expected, result)
def testRenderedSubject(self):
v = f.VolunteerFactory(dear_name="Joe")
message = f.MessageFactory(subject="Go {{ volunteer.to_name }}")
expected = "Go Joe"
result = message.rendered_subject({'volunteer': v})
self.assertEqual(expected, result)
| agpl-3.0 | 2,781,549,438,229,064,000 | 36.703343 | 79 | 0.63544 | false |
lukebakken/riak-python-pbcpp | riak/benchmark.py | 1 | 4509 | from __future__ import print_function
import os
import gc
import sys
import traceback
__all__ = ['measure', 'measure_with_rehearsal']
def measure_with_rehearsal():
"""
Runs a benchmark when used as an iterator, injecting a garbage
collection between iterations. Example::
for b in riak.benchmark.measure_with_rehearsal():
with b.report("pow"):
for _ in range(10000):
math.pow(2,10000)
with b.report("factorial"):
for i in range(100):
math.factorial(i)
"""
return Benchmark(True)
def measure():
"""
Runs a benchmark once when used as a context manager. Example::
with riak.benchmark.measure() as b:
with b.report("pow"):
for _ in range(10000):
math.pow(2,10000)
with b.report("factorial"):
for i in range(100):
math.factorial(i)
"""
return Benchmark()
class Benchmark(object):
"""
A benchmarking run, which may consist of multiple steps. See
measure_with_rehearsal() and measure() for examples.
"""
def __init__(self, rehearse=False):
"""
Creates a new benchmark reporter.
:param rehearse: whether to run twice to take counter the effects
of garbage collection
:type rehearse: boolean
"""
self.rehearse = rehearse
if rehearse:
self.count = 2
else:
self.count = 1
self._report = None
def __enter__(self):
if self.rehearse:
raise ValueError("measure_with_rehearsal() cannot be used in with "
"statements, use measure() or the for..in "
"statement")
print_header()
self._report = BenchmarkReport()
self._report.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._report:
return self._report.__exit__(exc_type, exc_val, exc_tb)
else:
print
return True
def __iter__(self):
return self
def next(self):
"""
Runs the next iteration of the benchmark.
"""
if self.count == 0:
raise StopIteration
elif self.count > 1:
print_rehearsal_header()
else:
if self.rehearse:
gc.collect()
print("-" * 59)
print()
print_header()
self.count -= 1
return self
def __next__(self):
# Python 3.x Version
return self.next()
def report(self, name):
"""
Returns a report for the current step of the benchmark.
"""
self._report = None
return BenchmarkReport(name)
def print_rehearsal_header():
"""
Prints the header for the rehearsal phase of a benchmark.
"""
print
print("Rehearsal -------------------------------------------------")
def print_report(label, user, system, real):
"""
Prints the report of one step of a benchmark.
"""
print("{:<12s} {:12f} {:12f} ( {:12f} )".format(label,
user,
system,
real))
def print_header():
"""
Prints the header for the normal phase of a benchmark.
"""
print("{:<12s} {:<12s} {:<12s} ( {:<12s} )"
.format('', 'user', 'system', 'real'))
class BenchmarkReport(object):
"""
A labeled step in a benchmark. Acts as a context-manager, printing
its timing results when the context exits.
"""
def __init__(self, name='benchmark'):
self.name = name
self.start = None
def __enter__(self):
self.start = os.times()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
user1, system1, _, _, real1 = self.start
user2, system2, _, _, real2 = os.times()
print_report(self.name, user2 - user1, system2 - system1,
real2 - real1)
elif exc_type is KeyboardInterrupt:
return False
else:
msg = "EXCEPTION! type: %r val: %r" % (exc_type, exc_val)
print(msg, file=sys.stderr)
traceback.print_tb(exc_tb)
return True if exc_type is None else False
| unlicense | 1,267,404,760,362,510,800 | 26.662577 | 79 | 0.505877 | false |
raimund89/PyShow | main.py | 1 | 1289 | # PyShow - a slide show IDE and scripting language.
#
# Copyright (C) 2017 Raimond Frentrop
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
The main startup code for PyShow.
When this script is run, it starts a QApplication with the PyShowWindow as
the main window container.
"""
import sys
import ctypes
import os
from PyQt5.QtWidgets import QApplication
from Interface.PyShowWindow import PyShowWindow
if __name__ == '__main__':
# Main entry point of PyShow
app = QApplication(sys.argv)
if os.name == 'nt':
appid = u'pyshow.application'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(appid)
w = PyShowWindow(sys.argv)
sys.exit(app.exec_())
| gpl-3.0 | 9,134,828,114,468,982,000 | 31.225 | 76 | 0.74166 | false |
timsavage/denim | denim/package/__init__.py | 1 | 1384 | # -*- encoding:utf8 -*-
"""
Methods for managing OS packages.
An alternative package manager can be selected by settings the:
``deploy_package_manager`` env variable.
Note: Different distributions/operating systems have different naming
conventions for packages. For example Apache is referred to as apache on
Debian systems but httpd on Redhat systems.
Options:
- debian (default) Debian package management tools (apt, dpkg etc)
"""
from denim.environment import Proxy
__proxy = Proxy('deploy_package_manager', globals(), 'debian')
is_installed = __proxy.method(
'is_installed', False,
doc="""
Check if a particular package has been installed.
:param name: name of the package to check for.
"""
)
install = __proxy.method(
'install', False,
doc="""
Install a package.
:param name: name of the package to install.
"""
)
def ensure_installed(*package_names):
"""
Install all packages in the list if they are not already installed.
"""
for package_name in package_names:
if package_name and not is_installed(package_name):
install(package_name)
def check_installed(*package_names):
"""
Check that certain packages are installed.
Returns a list of packages that are not installed.
"""
return [n for n in package_names if n and not is_installed(n)]
__all__ = __proxy.methods
| bsd-2-clause | 6,832,159,986,885,219,000 | 22.862069 | 72 | 0.687139 | false |
CognizantOneDevOps/Insights | PlatformAgents/com/cognizant/devops/platformagents/agents/ci/teamcity/TeamCityAgent3.py | 1 | 3153 | #-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
'''
Created on 30 March 2017
@author: 446620
'''
import json
from ....core.BaseAgent3 import BaseAgent
class TeamCityAgent(BaseAgent):
@BaseAgent.timed
def process(self):
self.baseLogger.info('Inside process')
BaseUrl = self.config.get("baseUrl", '')
UserID = self.getCredential("userid")
Passwd = self.getCredential("passwd")
if not self.tracking.get("sinceBuild",None):
getBuildsUrl = BaseUrl + '/httpAuth/app/rest/builds/'
else:
sinceBuild = self.tracking.get("sinceBuild",None)
getBuildsUrl = BaseUrl + '/httpAuth/app/rest/builds/?sinceBuild='+ str(sinceBuild)
teamcityBuilds = self.getResponse(getBuildsUrl, 'GET', UserID, Passwd, None, None)
responseTemplate = self.getResponseTemplate()
data = []
buildCount = teamcityBuilds["count"]
for build in range(buildCount):
injectData = {}
getBuildDetailsUrl = BaseUrl+"/httpAuth/app/rest/builds/"+ str(teamcityBuilds["build"][build]["id"])
teamcityBuildDetails = self.getResponse(getBuildDetailsUrl, 'GET', UserID, Passwd, None)
if "lastChanges" in teamcityBuildDetails:
getBuildChangesUrl = BaseUrl+"/httpAuth/app/rest/changes?locator=build:"+str(teamcityBuilds["build"][build]["id"])
teamcityBuildChanges = self.getResponse(getBuildChangesUrl, 'GET', UserID, Passwd, None)
changeCount = teamcityBuildChanges["count"]
version = []
for change in range(changeCount):
version.append(teamcityBuildChanges["change"][change]["version"])
# getChangeDetailsUrl = BaseUrl+"/httpAuth/app/rest/changes/"+ str(teamcityBuildChanges["change"][change]["id"])
# teamcityChangeDetails = self.getResponse(getChangeDetailsUrl, 'GET', UserID, Passwd, None)
# version.append(teamcityChangeDetails["version"])
injectData["version"]=version
data += self.parseResponse(responseTemplate, teamcityBuildDetails, injectData)
print((json.dumps(data)))
if buildCount>0:
self.tracking["sinceBuild"] = teamcityBuilds["build"][0]["id"]
self.updateTrackingJson(self.tracking)
self.publishToolsData(data)
if __name__ == "__main__":
TeamCityAgent()
| apache-2.0 | -7,294,705,972,704,179,000 | 49.047619 | 132 | 0.623216 | false |
ottovan/PhotoDataLab | import.py | 1 | 2885 | #! /bin/python3
import csv #For csv file handling
import re #Regular expressions
import sys #System processes
import os #Work on files and folders depending on your OS
#import pdb #Debugger
#pdb.set_trace()
#Input arguments
#sys.argv[1] = Input file name
#sys.argv[2] = Output file name
#sys.argv[3] = Folder name (Default = Current Working Directory)
#If the folder name is given as third argument, use it, otherwise use current directory
if len(sys.argv) > 3:
folder=sys.argv[3]
else:
folder=os.getcwd()
#Define input/output file names joining the folder and file names ensuring portability between different OSs
in_txt=os.path.join(folder,sys.argv[1])
tmp_txt=os.path.join(folder,"tmp.txt")
out_txt=os.path.join(folder,sys.argv[2])
#Add a blank line at the beginning and one at the end of the file
#Replace all lines containing letters except "e" or "E" with a blank line
#Remove spaces and tabs from beginning and end of line
with open(tmp_txt, 'w+') as outfile:
with open(in_txt, 'r+') as infile:
outfile.write("\n")
for row in infile:
if re.search('[A-DF-Za-df-z]',row):
outfile.write("\n")
else:
outfile.write(row.strip(" \t"))
outfile.write("\n")
#Make a list with the number of the lines not containing numbers
#Find the largest interval between lines not containing numbers
#Make a new file containing only the data series, separated by ";"
with open(out_txt, 'w+') as outfile:
with open(tmp_txt, 'r+') as infile:
#Find lines not containing numbers
lines=[]
for num, row in enumerate(infile, 1):
if re.search('[0-9]',row):
pass
else:
lines.append(num)
#Find the largest interval between non-numeric lines (our dataset) and find the first and last lines of the interval
diff=[]
for i in range(len(lines)-1):
diff.append(lines[i+1]-lines[i])
max_value=max(diff)
max_index=diff.index(max_value)
line_start=lines[max_index]
line_end=lines[max_index + 1]
infile.seek(0)
#Find the delimiter in the first data line
sniffer = csv.Sniffer()
for num,row in enumerate(infile,1):
#if re.search('[0-9]',row):
if num == (line_start + 1):
dialect = sniffer.sniff(row)
break
delimiter=dialect.delimiter
infile.seek(0)
#Replace the delimiter with ";" and remove ";" if it is at the end of the line
for num, row in enumerate(infile, 1):
if num <= line_start or num >= line_end:
pass
else:
outfile.write(row.replace(delimiter,";").rstrip(";\n") + "\n")
#Delete temporary files
os.remove(tmp_txt)
| gpl-3.0 | 8,225,624,620,948,556,000 | 32.16092 | 124 | 0.606932 | false |
googleads/google-ads-python | google/ads/googleads/v7/services/services/group_placement_view_service/transports/base.py | 1 | 3708 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v7.resources.types import group_placement_view
from google.ads.googleads.v7.services.types import group_placement_view_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class GroupPlacementViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for GroupPlacementViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_group_placement_view: gapic_v1.method.wrap_method(
self.get_group_placement_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_group_placement_view(
self,
) -> typing.Callable[
[group_placement_view_service.GetGroupPlacementViewRequest],
group_placement_view.GroupPlacementView,
]:
raise NotImplementedError
__all__ = ("GroupPlacementViewServiceTransport",)
| apache-2.0 | 4,312,947,143,067,942,400 | 36.08 | 79 | 0.662621 | false |
pachyderm/pfs | examples/ml/neon/inference/auto_inference.py | 2 | 3880 | #!/usr/bin/env python
"""
Example that does inference on an LSTM networks for amazon review analysis
$ python examples/imdb/auto_inference.py --model_weights imdb.p --vocab_file imdb.vocab
--review_files /pfs/reviews --output_dir /pfs/out
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases() # triggers E402, hence noqa below
from builtins import input # noqa
import numpy as np # noqa
from neon.backends import gen_backend # noqa
from neon.initializers import Uniform, GlorotUniform # noqa
from neon.layers import LSTM, Affine, Dropout, LookupTable, RecurrentSum # noqa
from neon.models import Model # noqa
from neon.transforms import Logistic, Tanh, Softmax # noqa
from neon.util.argparser import NeonArgparser, extract_valid_args # noqa
from neon.util.compat import pickle # noqa
from neon.data.text_preprocessing import clean_string # noqa
import os
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--model_weights', required=True,
help='pickle file of trained weights')
parser.add_argument('--vocab_file', required=True,
help='vocabulary file')
parser.add_argument('--review_files', required=True,
help='directory containing reviews in text files')
parser.add_argument('--output_dir', required=True,
help='directory where results will be saved')
args = parser.parse_args()
# hyperparameters from the reference
batch_size = 1
clip_gradients = True
gradient_limit = 5
vocab_size = 20000
sentence_length = 128
embedding_dim = 128
hidden_size = 128
reset_cells = True
num_epochs = args.epochs
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
be.bsz = 1
# define same model as in train
init_glorot = GlorotUniform()
init_emb = Uniform(low=-0.1 / embedding_dim, high=0.1 / embedding_dim)
nclass = 2
layers = [
LookupTable(vocab_size=vocab_size, embedding_dim=embedding_dim, init=init_emb,
pad_idx=0, update=True),
LSTM(hidden_size, init_glorot, activation=Tanh(),
gate_activation=Logistic(), reset_cells=True),
RecurrentSum(),
Dropout(keep=0.5),
Affine(nclass, init_glorot, bias=init_glorot, activation=Softmax())
]
# load the weights
print("Initialized the models - ")
model_new = Model(layers=layers)
print("Loading the weights from {0}".format(args.model_weights))
model_new.load_params(args.model_weights)
model_new.initialize(dataset=(sentence_length, batch_size))
# setup buffers before accepting reviews
xdev = be.zeros((sentence_length, 1), dtype=np.int32) # bsz is 1, feature size
xbuf = np.zeros((1, sentence_length), dtype=np.int32)
oov = 2
start = 1
index_from = 3
pad_char = 0
vocab, rev_vocab = pickle.load(open(args.vocab_file, 'rb'))
# walk over the reviews in the text files, making inferences
for dirpath, dirs, files in os.walk(args.review_files):
for file in files:
with open(os.path.join(dirpath, file), 'r') as myfile:
data=myfile.read()
# clean the input
tokens = clean_string(data).strip().split()
# check for oov and add start
sent = [len(vocab) + 1 if t not in vocab else vocab[t] for t in tokens]
sent = [start] + [w + index_from for w in sent]
sent = [oov if w >= vocab_size else w for w in sent]
# pad sentences
xbuf[:] = 0
trunc = sent[-sentence_length:]
xbuf[0, -len(trunc):] = trunc
xdev[:] = xbuf.T.copy()
y_pred = model_new.fprop(xdev, inference=True) # inference flag dropout
with open(os.path.join(args.output_dir, file), "w") as output_file:
output_file.write("Pred - {0}\n".format(y_pred.get().T))
| apache-2.0 | -5,668,632,449,984,851,000 | 34.925926 | 88 | 0.663144 | false |
donbright/piliko | experiment/bernoulli/pythbernlem3d.py | 1 | 2317 | from fractions import Fraction
import sys
# rational paramterization / approximation of bernoulli's lemniscate
# in a 3 dimensional 'dumbbell' arrangement.
# (note - this uses terms from Norman Wildberger's rational
# trigonometry/chromogeometry. briefly for a vector from 0,0 to x,y:
#
# blue quadrance (x,y) = x^2 + y^2
# red quadrance (x,y) = x^2 - y^2
# green quadrance (x,y) = 2*x*y
# )
# theory:
#
# step one is the rational paramterization of bernoulli's lemniscate
# we found this in pythbern.py
#
# step two is to 'grow' it into three d as some kind of dumbbell shape.
#
# how..? hrm.
#
# consider each 'x' as a 'distance' from origin for generating a circle.
# consider 'y' as the radius of the circle.
# now, draw the circle--- using rational points
# we will end up with a 'stack' of circles in the dumbbell shape
# as though we had sliced the dumbbell.
# imagine
def sqr(x): return x*x
def greenq_pts(x,y,x2,y2): return 2*(x2-x)*(y2-y)
def redq_pts(x,y,x2,y2): return sqr(x2-x)-sqr(y2-y)
def blueq_pts(x,y,x2,y2): return sqr(x2-x)+sqr(y2-y)
def greenq(m,n): return greenq_pts(0,0,m,n)
def redq(m,n): return redq_pts(0,0,m,n)
def blueq(m,n): return blueq_pts(0,0,m,n)
xs,ys,zs=[],[],[]
depth = 10
for m in range(-depth,depth):
for n in range(0,depth):
if redq(m,n)==0: continue
x = Fraction(blueq(m,n),redq(m,n))
y = Fraction(greenq(m,n),redq(m,n))
oh = blueq(x,y)
lambd = Fraction( 1, oh )
x = x * lambd
y = y * lambd
distance = x
radius = y
circpts = int(radius*20)
for m2 in range(-circpts,circpts):
for n2 in range(-circpts,circpts):
if blueq(m2,n2)==0: continue
xdumb = distance
ydumb = Fraction(redq(m2,n2),blueq(m2,n2))
zdumb = Fraction(greenq(m2,n2),blueq(m2,n2))
ydumb *= radius
zdumb *= radius
xs += [xdumb]
ys += [ydumb]
zs += [zdumb]
max=max(xs+ys+zs)
for i in range(0,2):
print str(xs[i])+','+str(ys[i])+','+str(zs[i]),
print '....'
for i in range(0,len(xs)):
xs[i] = Fraction( xs[i], max )
ys[i] = Fraction( ys[i], max )
zs[i] = Fraction( zs[i], max )
print len(xs), 'points'
import numpy as np
import matplotlib.pylab as plt
fig,ax = plt.subplots(figsize=(8,8))
ax.set_ylim([-1.2,1.2])
ax.set_xlim([-1.2,1.2])
for i in range(0,len(xs)):
xs[i]=xs[i]+zs[i]/4
ys[i]=ys[i]+zs[i]/4
ax.scatter(xs,ys)
plt.show()
| bsd-3-clause | 4,990,532,558,360,858,000 | 25.94186 | 72 | 0.644368 | false |
updownlife/multipleK | dependencies/biopython-1.65/Scripts/GenBank/find_parser_problems.py | 1 | 1430 | #!/usr/bin/env python
"""Find GenBank records that the parser has problems with within a big file.
This is meant to make it easy to get accession numbers for records that
don't parse properly.
Usage:
find_parser_problems.py <GenBank file to parse>
"""
# standard library
from __future__ import print_function
import sys
# GenBank
from Bio import GenBank
verbose = 0
if len(sys.argv) != 2:
print("Usage ./find_parser_problems <GenBank file to parse>")
sys.exit()
feature_parser = GenBank.FeatureParser(debug_level=0)
parser = GenBank.ErrorParser(feature_parser)
handle = open(sys.argv[1], 'r')
iterator = GenBank.Iterator(handle, parser, has_header=1)
while True:
have_record = 0
while have_record == 0:
try:
cur_record = next(iterator)
have_record = 1
except GenBank.ParserFailureError as msg:
print("Parsing Problem: %s" % msg)
sys.exit()
if cur_record is None:
break
print("Successfully parsed record %s" % cur_record.id)
if verbose:
print("***Record")
print("Seq: %s" % cur_record.seq)
print("Id: %s" % cur_record.id)
print("Name: %s" % cur_record.name)
print("Description: %s" % cur_record.description)
print("Annotations: %s" % cur_record.annotations)
print("Feaures")
for feature in cur_record.features:
print(feature)
handle.close()
| gpl-2.0 | -4,434,093,523,328,598,000 | 24.087719 | 76 | 0.638462 | false |
claudiofinizio/learn-python | TDD/learn_tests/wxGUI_modal.py | 1 | 1681 | import wx
class MyFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title)
btn = wx.Button(self, label="SomeProcessing")
self.Bind(wx.EVT_BUTTON, self.SomeProcessing, btn)
def SomeProcessing(self,event):
self.dlg = Dlg_GetUserInput(self)
if self.dlg.ShowModal() == wx.ID_OK:
if self.dlg.sel1.GetValue():
print 'sel1 processing'
self.data_after_processing = 'bar'
if self.dlg.sel2.GetValue():
print 'sel2 processing'
self.data_after_processing = 'foo'
class Dlg_GetUserInput(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent)
self.sel1 = wx.CheckBox(self, label='Selection 1')
self.sel2 = wx.CheckBox(self, label='Selection 2')
self.OK = wx.Button(self, wx.ID_OK)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.sel1)
sizer.Add(self.sel2)
sizer.Add(self.OK)
self.SetSizer(sizer)
def test():
app = wx.App()
mf = MyFrame(None, 'testgui')
for item in mf.GetChildren():
if item.GetLabel() == 'SomeProcessing':
btn = item
break
def clickOK():
dlg = wx.GetActiveWindow()
dlg.sel2.SetValue(True)
clickEvent = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_OK)
dlg.ProcessEvent(clickEvent)
event = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, btn.GetId())
wx.CallAfter(clickOK)
mf.GetEventHandler().ProcessEvent(event)
print 'data_after_processing:', mf.data_after_processing
mf.Destroy()
test() | mit | 8,824,939,908,386,655,000 | 30.735849 | 79 | 0.599048 | false |
nathanielvarona/airflow | airflow/contrib/operators/dynamodb_to_s3.py | 1 | 1200 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.amazon.aws.transfers.dynamodb_to_s3`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.transfers.dynamodb_to_s3 import DynamoDBToS3Operator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.dynamodb_to_s3`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | -2,640,637,616,557,468,000 | 40.37931 | 105 | 0.77 | false |
mytest-e2/ritest-e2 | lib/python/Plugins/SystemPlugins/SoftwareManager/plugin.py | 1 | 104940 | from Plugins.Plugin import PluginDescriptor
from Screens.Console import Console
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.Ipkg import Ipkg
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Input import Input
from Components.Ipkg import IpkgComponent
from Components.Sources.StaticText import StaticText
from Components.ScrollLabel import ScrollLabel
from Components.Pixmap import Pixmap
from Components.MenuList import MenuList
from Components.Sources.List import List
from Components.Slider import Slider
from Components.Harddisk import harddiskmanager
from Components.config import config,getConfigListEntry, ConfigSubsection, ConfigText, ConfigLocations, ConfigYesNo, ConfigSelection
from Components.ConfigList import ConfigListScreen
from Components.Console import Console
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.SelectionList import SelectionList
from Components.PluginComponent import plugins
from Plugins.Extensions.Infopanel.SoftwarePanel import SoftwarePanel
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.AVSwitch import AVSwitch
from Components.Task import job_manager
from Tools.Directories import pathExists, fileExists, resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_PLUGIN, SCOPE_ACTIVE_SKIN, SCOPE_METADIR, SCOPE_CURRENT_SKIN
from Tools.LoadPixmap import LoadPixmap
from Tools.NumericalTextInput import NumericalTextInput
from enigma import eTimer, RT_HALIGN_LEFT, RT_VALIGN_CENTER, eListboxPythonMultiContent, eListbox, gFont, getDesktop, ePicLoad, eRCInput, getPrevAsciiCode, eEnv, iRecordableService, getEnigmaVersionString
from cPickle import dump, load
from os import path as os_path, system as os_system, unlink, stat, mkdir, popen, makedirs, listdir, access, rename, remove, W_OK, R_OK, F_OK
from time import time, gmtime, strftime, localtime
from stat import ST_MTIME
from datetime import date, timedelta
from twisted.web import client
from twisted.internet import reactor
from ImageBackup import ImageBackup
from Flash_online import FlashOnline
from ImageWizard import ImageWizard
from BackupRestore import BackupSelection, RestoreMenu, BackupScreen, RestoreScreen, getBackupPath, getOldBackupPath, getBackupFilename, RestoreMyMetrixHD
from BackupRestore import InitConfig as BackupRestore_InitConfig
from SoftwareTools import iSoftwareTools
import os
from boxbranding import getBoxType, getMachineBrand, getMachineName, getBrandOEM
boxtype = getBoxType()
brandoem = getBrandOEM()
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/dFlash"):
from Plugins.Extensions.dFlash.plugin import dFlash
DFLASH = True
else:
DFLASH = False
if os.path.exists("/usr/lib/enigma2/python/Plugins/Extensions/dBackup"):
from Plugins.Extensions.dBackup.plugin import dBackup
DBACKUP = True
else:
DBACKUP = False
config.plugins.configurationbackup = BackupRestore_InitConfig()
config.plugins.softwaremanager = ConfigSubsection()
config.plugins.softwaremanager.overwriteSettingsFiles = ConfigYesNo(default=False)
config.plugins.softwaremanager.overwriteDriversFiles = ConfigYesNo(default=True)
config.plugins.softwaremanager.overwriteEmusFiles = ConfigYesNo(default=True)
config.plugins.softwaremanager.overwritePiconsFiles = ConfigYesNo(default=True)
config.plugins.softwaremanager.overwriteBootlogoFiles = ConfigYesNo(default=True)
config.plugins.softwaremanager.overwriteSpinnerFiles = ConfigYesNo(default=True)
config.plugins.softwaremanager.overwriteConfigFiles = ConfigSelection(
[
("Y", _("Yes, always")),
("N", _("No, never")),
("ask", _("Always ask"))
], "Y")
config.plugins.softwaremanager.updatetype = ConfigSelection(
[
("hot", _("Upgrade with GUI")),
("cold", _("Unattended upgrade without GUI")),
], "hot")
config.plugins.softwaremanager.epgcache = ConfigYesNo(default=False)
def write_cache(cache_file, cache_data):
#Does a cPickle dump
if not os_path.isdir( os_path.dirname(cache_file) ):
try:
mkdir( os_path.dirname(cache_file) )
except OSError:
print os_path.dirname(cache_file), 'is a file'
fd = open(cache_file, 'w')
dump(cache_data, fd, -1)
fd.close()
def valid_cache(cache_file, cache_ttl):
#See if the cache file exists and is still living
try:
mtime = stat(cache_file)[ST_MTIME]
except:
return 0
curr_time = time()
if (curr_time - mtime) > cache_ttl:
return 0
else:
return 1
def load_cache(cache_file):
#Does a cPickle load
fd = open(cache_file)
cache_data = load(fd)
fd.close()
return cache_data
def Check_Softcam():
found = False
for x in os.listdir('/etc'):
if x.find('.emu') > -1:
found = True
break;
return found
class UpdatePluginMenu(Screen):
skin = """
<screen name="UpdatePluginMenu" position="center,center" size="610,410" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<ePixmap pixmap="skin_default/border_menu_350.png" position="5,50" zPosition="1" size="350,300" transparent="1" alphatest="on" />
<widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (330, 24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 25
}
</convert>
</widget>
<widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (240, 300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 300
}
</convert>
</widget>
<widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
Screen.setTitle(self, _("Software management"))
self.skin_path = plugin_path
self.menu = args
self.list = []
self.oktext = _("\nPress OK on your remote control to continue.")
self.menutext = _("Press MENU on your remote control for additional options.")
self.infotext = _("Press INFO on your remote control for additional information.")
self.text = ""
if self.menu == 0:
print "building menu entries"
self.list.append(("install-extensions", _("Manage extensions"), _("\nManage extensions or plugins for your %s %s") % (getMachineBrand(), getMachineName()) + self.oktext, None))
self.list.append(("software-update", _("Software update"), _("\nOnline update of your %s %s software.") % (getMachineBrand(), getMachineName()) + self.oktext, None))
self.list.append(("software-restore", _("Software restore"), _("\nRestore your %s %s with a new firmware.") % (getMachineBrand(), getMachineName()) + self.oktext, None))
if not boxtype.startswith('az') and not boxtype.startswith('dm') and not brandoem.startswith('cube'):
self.list.append(("flash-online", _("Flash Online"), _("\nFlash on the fly your %s %s.") % (getMachineBrand(), getMachineName()) + self.oktext, None))
if not boxtype.startswith('az') and not brandoem.startswith('cube'):
self.list.append(("backup-image", _("Backup Image"), _("\nBackup your running %s %s image to HDD or USB.") % (getMachineBrand(), getMachineName()) + self.oktext, None))
self.list.append(("system-backup", _("Backup system settings"), _("\nBackup your %s %s settings.") % (getMachineBrand(), getMachineName()) + self.oktext + "\n\n" + self.infotext, None))
self.list.append(("system-restore",_("Restore system settings"), _("\nRestore your %s %s settings.") % (getMachineBrand(), getMachineName()) + self.oktext, None))
self.list.append(("ipkg-install", _("Install local extension"), _("\nScan for local extensions and install them.") + self.oktext, None))
for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER):
if p.__call__.has_key("SoftwareSupported"):
callFnc = p.__call__["SoftwareSupported"](None)
if callFnc is not None:
if p.__call__.has_key("menuEntryName"):
menuEntryName = p.__call__["menuEntryName"](None)
else:
menuEntryName = _('Extended Software')
if p.__call__.has_key("menuEntryDescription"):
menuEntryDescription = p.__call__["menuEntryDescription"](None)
else:
menuEntryDescription = _('Extended Software Plugin')
self.list.append(('default-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(("advanced", _("Advanced options"), _("\nAdvanced options and settings." ) + self.oktext, None))
elif self.menu == 1:
self.list.append(("advancedrestore", _("Advanced restore"), _("\nRestore your backups by date." ) + self.oktext, None))
self.list.append(("backuplocation", _("Select backup location"), _("\nSelect your backup device.\nCurrent device: " ) + config.plugins.configurationbackup.backuplocation.value + self.oktext, None))
self.list.append(("backupfiles", _("Show default backup files"), _("Here you can browse (but not modify) the files that are added to the backupfile by default (E2-setup, channels, network).") + self.oktext + "\n\n" + self.infotext, None))
self.list.append(("backupfiles_addon", _("Select additional backup files"), _("Here you can specify additional files that should be added to the backup file.") + self.oktext + "\n\n" + self.infotext, None))
self.list.append(("backupfiles_exclude", _("Select excluded backup files"), _("Here you can select which files should be excluded from the backup.") + self.oktext + "\n\n" + self.infotext, None))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(("ipkg-manager", _("Packet management"), _("\nView, install and remove available or installed packages." ) + self.oktext, None))
self.list.append(("ipkg-source",_("Select upgrade source"), _("\nEdit the upgrade source address." ) + self.oktext, None))
for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER):
if p.__call__.has_key("AdvancedSoftwareSupported"):
callFnc = p.__call__["AdvancedSoftwareSupported"](None)
if callFnc is not None:
if p.__call__.has_key("menuEntryName"):
menuEntryName = p.__call__["menuEntryName"](None)
else:
menuEntryName = _('Advanced software')
if p.__call__.has_key("menuEntryDescription"):
menuEntryDescription = p.__call__["menuEntryDescription"](None)
else:
menuEntryDescription = _('Advanced software plugin')
self.list.append(('advanced-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc))
self["menu"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["status"] = StaticText(self.menutext)
self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "InfobarEPGActions", "MenuActions", "NumberActions"],
{
"ok": self.go,
"back": self.close,
"red": self.close,
"menu": self.handleMenu,
"info": self.handleInfo,
"1": self.go,
"2": self.go,
"3": self.go,
"4": self.go,
"5": self.go,
"6": self.go,
"7": self.go,
"8": self.go,
"9": self.go,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
self.backuppath = getBackupPath()
if not os.path.isdir(self.backuppath):
self.backuppath = getOldBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.onShown.append(self.setWindowTitle)
self.onChangedEntry = []
self["menu"].onSelectionChanged.append(self.selectionChanged)
def createSummary(self):
from Screens.PluginBrowser import PluginBrowserSummary
return PluginBrowserSummary
def selectionChanged(self):
item = self["menu"].getCurrent()
if item:
name = item[1]
desc = item[2]
else:
name = "-"
desc = ""
for cb in self.onChangedEntry:
cb(name, desc)
def layoutFinished(self):
idx = 0
self["menu"].index = idx
def setWindowTitle(self):
self.setTitle(_("Software management"))
def cleanup(self):
iSoftwareTools.cleanupSoftwareTools()
def getUpdateInfos(self):
if iSoftwareTools.NetworkConnectionAvailable is True:
if iSoftwareTools.available_updates is not 0:
self.text = _("There are at least %s updates available.") % (str(iSoftwareTools.available_updates))
else:
self.text = "" #_("There are no updates available.")
if iSoftwareTools.list_updating is True:
self.text += "\n" + _("A search for available updates is currently in progress.")
else:
self.text = _("No network connection available.")
self["status"].setText(self.text)
def handleMenu(self):
self.session.open(SoftwareManagerSetup)
def handleInfo(self):
current = self["menu"].getCurrent()
if current:
currentEntry = current[0]
if currentEntry in ("system-backup","backupfiles","backupfiles_exclude","backupfiles_addon"):
self.session.open(SoftwareManagerInfo, mode = "backupinfo", submode = currentEntry)
def go(self, num = None):
if num is not None:
num -= 1
if not num < self["menu"].count():
return
self["menu"].setIndex(num)
current = self["menu"].getCurrent()
if current:
currentEntry = current[0]
if self.menu == 0:
if (currentEntry == "software-update"):
self.session.open(SoftwarePanel, self.skin_path)
elif (currentEntry == "software-restore"):
self.session.open(ImageWizard)
elif (currentEntry == "install-extensions"):
self.session.open(PluginManager, self.skin_path)
elif (currentEntry == "flash-online"):
self.session.open(FlashOnline)
elif (currentEntry == "backup-image"):
if DFLASH == True:
self.session.open(dFlash)
elif DBACKUP == True:
self.session.open(dBackup)
else:
self.session.open(ImageBackup)
elif (currentEntry == "system-backup"):
self.session.openWithCallback(self.backupDone,BackupScreen, runBackup = True)
elif (currentEntry == "system-restore"):
if os_path.exists(self.fullbackupfilename):
self.session.openWithCallback(self.startRestore, MessageBox, _("Are you sure you want to restore the backup?\nYour receiver will restart after the backup has been restored!"), default = False)
else:
self.session.open(MessageBox, _("Sorry, no backups found!"), MessageBox.TYPE_INFO, timeout = 10)
elif (currentEntry == "ipkg-install"):
try:
from Plugins.Extensions.MediaScanner.plugin import main
main(self.session)
except:
self.session.open(MessageBox, _("Sorry, %s has not been installed!") % ("MediaScanner"), MessageBox.TYPE_INFO, timeout = 10)
elif (currentEntry == "default-plugin"):
self.extended = current[3]
self.extended(self.session, None)
elif (currentEntry == "advanced"):
self.session.open(UpdatePluginMenu, 1)
elif self.menu == 1:
if (currentEntry == "ipkg-manager"):
self.session.open(PacketManager, self.skin_path)
elif (currentEntry == "backuplocation"):
parts = [ (r.description, r.mountpoint, self.session) for r in harddiskmanager.getMountedPartitions(onlyhotplug = False)]
for x in parts:
if not access(x[1], F_OK|R_OK|W_OK) or x[1] == '/':
parts.remove(x)
if len(parts):
self.session.openWithCallback(self.backuplocation_choosen, ChoiceBox, title = _("Please select medium to use as backup location"), list = parts)
elif (currentEntry == "backupfiles"):
self.session.open(BackupSelection,title=_("Default files/folders to backup"),configBackupDirs=config.plugins.configurationbackup.backupdirs_default,readOnly=True)
elif (currentEntry == "backupfiles_addon"):
self.session.open(BackupSelection,title=_("Additional files/folders to backup"),configBackupDirs=config.plugins.configurationbackup.backupdirs,readOnly=False)
elif (currentEntry == "backupfiles_exclude"):
self.session.open(BackupSelection,title=_("Files/folders to exclude from backup"),configBackupDirs=config.plugins.configurationbackup.backupdirs_exclude,readOnly=False)
elif (currentEntry == "advancedrestore"):
self.session.open(RestoreMenu, self.skin_path)
elif (currentEntry == "ipkg-source"):
self.session.open(IPKGMenu, self.skin_path)
elif (currentEntry == "advanced-plugin"):
self.extended = current[3]
self.extended(self.session, None)
def backuplocation_choosen(self, option):
oldpath = config.plugins.configurationbackup.backuplocation.value
if option is not None:
config.plugins.configurationbackup.backuplocation.setValue(str(option[1]))
config.plugins.configurationbackup.backuplocation.save()
config.plugins.configurationbackup.save()
config.save()
newpath = config.plugins.configurationbackup.backuplocation.value
if newpath != oldpath:
self.createBackupfolders()
def createBackupfolders(self):
print "Creating backup folder if not already there..."
self.backuppath = getBackupPath()
try:
if (os_path.exists(self.backuppath) == False):
makedirs(self.backuppath)
except OSError:
self.session.open(MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10)
def backupDone(self,retval = None):
if retval is True:
self.session.open(MessageBox, _("Backup completed."), MessageBox.TYPE_INFO, timeout = 10)
else:
self.session.open(MessageBox, _("Backup failed."), MessageBox.TYPE_INFO, timeout = 10)
def startRestore(self, ret = False):
if (ret == True):
self.exe = True
self.session.open(RestoreScreen, runRestore = True)
class SoftwareManagerSetup(Screen, ConfigListScreen):
skin = """
<screen name="SoftwareManagerSetup" position="center,center" size="560,440" title="SoftwareManager setup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="config" position="5,50" size="550,290" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/div-h.png" position="0,300" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="5,310" size="550,80" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, skin_path = None):
Screen.__init__(self, session)
self.session = session
self.skin_path = skin_path
if self.skin_path == None:
self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager")
self.onChangedEntry = [ ]
self.setup_title = _("Software manager setup")
self.overwriteConfigfilesEntry = None
self.overwriteSettingsfilesEntry = None
self.overwriteDriversfilesEntry = None
self.overwriteEmusfilesEntry = None
self.overwritePiconsfilesEntry = None
self.overwriteBootlogofilesEntry = None
self.overwriteSpinnerfilesEntry = None
self.updatetypeEntry = None
self.list = [ ]
ConfigListScreen.__init__(self, self.list, session = session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.apply,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText()
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
self.list = [ ]
self.overwriteConfigfilesEntry = getConfigListEntry(_("Overwrite configuration files?"), config.plugins.softwaremanager.overwriteConfigFiles)
self.overwriteSettingsfilesEntry = getConfigListEntry(_("Overwrite Setting Files ?"), config.plugins.softwaremanager.overwriteSettingsFiles)
self.overwriteDriversfilesEntry = getConfigListEntry(_("Overwrite Driver Files ?"), config.plugins.softwaremanager.overwriteDriversFiles)
self.overwriteEmusfilesEntry = getConfigListEntry(_("Overwrite Emu Files ?"), config.plugins.softwaremanager.overwriteEmusFiles)
self.overwritePiconsfilesEntry = getConfigListEntry(_("Overwrite Picon Files ?"), config.plugins.softwaremanager.overwritePiconsFiles)
self.overwriteBootlogofilesEntry = getConfigListEntry(_("Overwrite Bootlogo Files ?"), config.plugins.softwaremanager.overwriteBootlogoFiles)
self.overwriteSpinnerfilesEntry = getConfigListEntry(_("Overwrite Spinner Files ?"), config.plugins.softwaremanager.overwriteSpinnerFiles)
self.updatetypeEntry = getConfigListEntry(_("Select Software Update"), config.plugins.softwaremanager.updatetype)
if boxtype.startswith('et'):
self.list.append(self.updatetypeEntry)
self.list.append(self.overwriteConfigfilesEntry)
self.list.append(self.overwriteSettingsfilesEntry)
self.list.append(self.overwriteDriversfilesEntry)
if Check_Softcam():
self.list.append(self.overwriteEmusfilesEntry)
self.list.append(self.overwritePiconsfilesEntry)
self.list.append(self.overwriteBootlogofilesEntry)
self.list.append(self.overwriteSpinnerfilesEntry)
self["config"].list = self.list
self["config"].l.setSeperation(400)
self["config"].l.setList(self.list)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def selectionChanged(self):
if self["config"].getCurrent() == self.overwriteConfigfilesEntry:
self["introduction"].setText(_("Overwrite configuration files during software upgrade?"))
elif self["config"].getCurrent() == self.overwriteSettingsfilesEntry:
self["introduction"].setText(_("Overwrite setting files (channellist) during software upgrade?"))
elif self["config"].getCurrent() == self.overwriteDriversfilesEntry:
self["introduction"].setText(_("Overwrite driver files during software upgrade?"))
elif self["config"].getCurrent() == self.overwriteEmusfilesEntry:
self["introduction"].setText(_("Overwrite softcam files during software upgrade?"))
elif self["config"].getCurrent() == self.overwritePiconsfilesEntry:
self["introduction"].setText(_("Overwrite picon files during software upgrade?"))
elif self["config"].getCurrent() == self.overwriteBootlogofilesEntry:
self["introduction"].setText(_("Overwrite bootlogo files during software upgrade?"))
elif self["config"].getCurrent() == self.overwriteSpinnerfilesEntry:
self["introduction"].setText(_("Overwrite spinner files during software upgrade?"))
elif self["config"].getCurrent() == self.updatetypeEntry:
self["introduction"].setText(_("Select how your box will upgrade."))
else:
self["introduction"].setText("")
def newConfig(self):
pass
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def confirm(self, confirmed):
if not confirmed:
print "not confirmed"
return
else:
self.keySave()
def apply(self):
self.session.openWithCallback(self.confirm, MessageBox, _("Use these settings?"), MessageBox.TYPE_YESNO, timeout = 20, default = True)
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"), MessageBox.TYPE_YESNO, timeout = 20, default = False)
else:
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
self.selectionChanged()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].value)
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class SoftwareManagerInfo(Screen):
skin = """
<screen name="SoftwareManagerInfo" position="center,center" size="560,440" title="SoftwareManager information">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,340" scrollbarMode="showOnDemand" selectionDisabled="0">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (5, 0), size = (540, 26), font=0, flags = RT_HALIGN_LEFT | RT_HALIGN_CENTER, text = 0), # index 0 is the name
],
"fonts": [gFont("Regular", 24),gFont("Regular", 22)],
"itemHeight": 26
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,400" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="5,410" size="550,30" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, skin_path = None, mode = None, submode = None):
Screen.__init__(self, session)
self.session = session
self.mode = mode
self.submode = submode
self.skin_path = skin_path
if self.skin_path == None:
self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager")
self["actions"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"back": self.close,
"red": self.close,
}, -2)
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Softwaremanager information"))
if self.mode is not None:
self.showInfos()
def showInfos(self):
if self.mode == "backupinfo":
self.list = []
if self.submode == "backupfiles_exclude":
backupfiles = config.plugins.configurationbackup.backupdirs_exclude.value
elif self.submode == "backupfiles_addon":
backupfiles = config.plugins.configurationbackup.backupdirs.value
else:
backupfiles = config.plugins.configurationbackup.backupdirs_default.value
for entry in backupfiles:
self.list.append((entry,))
self['list'].setList(self.list)
class PluginManager(Screen, PackageInfoHandler):
skin = """
<screen name="PluginManager" position="center,center" size="560,440" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,360" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"templates":
{"default": (51,[
MultiContentEntryText(pos = (0, 1), size = (470, 24), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (0, 25), size = (470, 24), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description
MultiContentEntryPixmapAlphaTest(pos = (475, 0), size = (48, 48), png = 5), # index 5 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (0, 49), size = (550, 2), png = 6), # index 6 is the div pixmap
]),
"category": (40,[
MultiContentEntryText(pos = (30, 0), size = (500, 22), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (30, 22), size = (500, 16), font=2, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the description
MultiContentEntryPixmapAlphaTest(pos = (0, 38), size = (550, 2), png = 3), # index 3 is the div pixmap
])
},
"fonts": [gFont("Regular", 22),gFont("Regular", 20),gFont("Regular", 16)],
"itemHeight": 52
}
</convert>
</widget>
<widget source="status" render="Label" position="5,410" zPosition="10" size="540,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, plugin_path = None, args = None):
Screen.__init__(self, session)
Screen.setTitle(self, _("Extensions management"))
self.session = session
self.skin_path = plugin_path
if self.skin_path == None:
self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager")
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions", "InfobarEPGActions", "HelpActions" ],
{
"ok": self.handleCurrent,
"back": self.exit,
"red": self.exit,
"green": self.handleCurrent,
"yellow": self.handleSelected,
"showEventInfo": self.handleSelected,
"displayHelp": self.handleHelp,
}, -1)
self.list = []
self.statuslist = []
self.selectedFiles = []
self.categoryList = []
self.packetlist = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText("")
self["key_yellow"] = StaticText("")
self["key_blue"] = StaticText("")
self["status"] = StaticText("")
self.cmdList = []
self.oktext = _("\nAfter pressing OK, please wait!")
if not self.selectionChanged in self["list"].onSelectionChanged:
self["list"].onSelectionChanged.append(self.selectionChanged)
self.currList = ""
self.currentSelectedTag = None
self.currentSelectedIndex = None
self.currentSelectedPackage = None
self.saved_currentSelectedPackage = None
self.restartRequired = False
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.getUpdateInfos)
def setWindowTitle(self):
self.setTitle(_("Extensions management"))
def exit(self):
if self.currList == "packages":
self.currList = "category"
self.currentSelectedTag = None
self["list"].style = "category"
self['list'].setList(self.categoryList)
self["list"].setIndex(self.currentSelectedIndex)
self["list"].updateList(self.categoryList)
self.selectionChanged()
else:
iSoftwareTools.cleanupSoftwareTools()
self.prepareInstall()
if len(self.cmdList):
self.session.openWithCallback(self.runExecute, PluginManagerInfo, self.skin_path, self.cmdList)
else:
self.close()
def handleHelp(self):
if self.currList != "status":
self.session.open(PluginManagerHelp, self.skin_path)
def setState(self,status = None):
if status:
self.currList = "status"
self.statuslist = []
self["key_green"].setText("")
self["key_blue"].setText("")
self["key_yellow"].setText("")
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
if status == 'update':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Updating software catalog"), '', _("Searching for available updates. Please wait..." ),'', '', statuspng, divpng, None, '' ))
elif status == 'sync':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Package list update"), '', _("Searching for new installed or removed packages. Please wait..." ),'', '', statuspng, divpng, None, '' ))
elif status == 'error':
self["key_green"].setText(_("Continue"))
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
self.statuslist.append(( _("Error"), '', _("An error occurred while downloading the packetlist. Please try again." ),'', '', statuspng, divpng, None, '' ))
self["list"].style = "default"
self['list'].setList(self.statuslist)
def getUpdateInfos(self):
if (iSoftwareTools.lastDownloadDate is not None and iSoftwareTools.NetworkConnectionAvailable is False):
self.rebuildList()
else:
self.setState('update')
iSoftwareTools.startSoftwareTools(self.getUpdateInfosCB)
def getUpdateInfosCB(self, retval = None):
if retval is not None:
if retval is True:
if iSoftwareTools.available_updates is not 0:
self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + ' ' + _("updates available."))
else:
self["status"].setText(_("There are no updates available."))
self.rebuildList()
elif retval is False:
if iSoftwareTools.lastDownloadDate is None:
self.setState('error')
if iSoftwareTools.NetworkConnectionAvailable:
self["status"].setText(_("Updatefeed not available."))
else:
self["status"].setText(_("No network connection available."))
else:
iSoftwareTools.lastDownloadDate = time()
iSoftwareTools.list_updating = True
self.setState('update')
iSoftwareTools.getUpdates(self.getUpdateInfosCB)
def rebuildList(self, retval = None):
if self.currentSelectedTag is None:
self.buildCategoryList()
else:
self.buildPacketList(self.currentSelectedTag)
def selectionChanged(self):
current = self["list"].getCurrent()
self["status"].setText("")
if current:
if self.currList == "packages":
self["key_red"].setText(_("Back"))
if current[4] == 'installed':
self["key_green"].setText(_("Uninstall"))
elif current[4] == 'installable':
self["key_green"].setText(_("Install"))
if iSoftwareTools.NetworkConnectionAvailable is False:
self["key_green"].setText("")
elif current[4] == 'remove':
self["key_green"].setText(_("Undo uninstall"))
elif current[4] == 'install':
self["key_green"].setText(_("Undo install"))
if iSoftwareTools.NetworkConnectionAvailable is False:
self["key_green"].setText("")
self["key_yellow"].setText(_("View details"))
self["key_blue"].setText("")
if len(self.selectedFiles) == 0 and iSoftwareTools.available_updates is not 0:
self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + ' ' + _("updates available."))
elif len(self.selectedFiles) is not 0:
self["status"].setText(str(len(self.selectedFiles)) + ' ' + _("packages selected."))
else:
self["status"].setText(_("There are currently no outstanding actions."))
elif self.currList == "category":
self["key_red"].setText(_("Close"))
self["key_green"].setText("")
self["key_yellow"].setText("")
self["key_blue"].setText("")
if len(self.selectedFiles) == 0 and iSoftwareTools.available_updates is not 0:
self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + ' ' + _("updates available."))
self["key_yellow"].setText(_("Update"))
elif len(self.selectedFiles) is not 0:
self["status"].setText(str(len(self.selectedFiles)) + ' ' + _("packages selected."))
self["key_yellow"].setText(_("Process"))
else:
self["status"].setText(_("There are currently no outstanding actions."))
def getSelectionState(self, detailsFile):
for entry in self.selectedFiles:
if entry[0] == detailsFile:
return True
return False
def handleCurrent(self):
current = self["list"].getCurrent()
if current:
if self.currList == "category":
self.currentSelectedIndex = self["list"].index
selectedTag = current[2]
self.buildPacketList(selectedTag)
elif self.currList == "packages":
if current[7] is not '':
idx = self["list"].getIndex()
detailsFile = self.list[idx][1]
if self.list[idx][7] == True:
for entry in self.selectedFiles:
if entry[0] == detailsFile:
self.selectedFiles.remove(entry)
else:
alreadyinList = False
for entry in self.selectedFiles:
if entry[0] == detailsFile:
alreadyinList = True
if not alreadyinList:
if (iSoftwareTools.NetworkConnectionAvailable is False and current[4] in ('installable','install')):
pass
else:
self.selectedFiles.append((detailsFile,current[4],current[3]))
self.currentSelectedPackage = ((detailsFile,current[4],current[3]))
if current[4] == 'installed':
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'remove', True)
elif current[4] == 'installable':
if iSoftwareTools.NetworkConnectionAvailable:
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'install', True)
elif current[4] == 'remove':
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'installed', False)
elif current[4] == 'install':
if iSoftwareTools.NetworkConnectionAvailable:
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'installable',False)
self["list"].setList(self.list)
self["list"].setIndex(idx)
self["list"].updateList(self.list)
self.selectionChanged()
elif self.currList == "status":
iSoftwareTools.lastDownloadDate = time()
iSoftwareTools.list_updating = True
self.setState('update')
iSoftwareTools.getUpdates(self.getUpdateInfosCB)
def handleSelected(self):
current = self["list"].getCurrent()
if current:
if self.currList == "packages":
if current[7] is not '':
detailsfile = iSoftwareTools.directory[0] + "/" + current[1]
if (os_path.exists(detailsfile) == True):
self.saved_currentSelectedPackage = self.currentSelectedPackage
self.session.openWithCallback(self.detailsClosed, PluginDetails, self.skin_path, current)
else:
self.session.open(MessageBox, _("Sorry, no details available!"), MessageBox.TYPE_INFO, timeout = 10)
elif self.currList == "category":
self.prepareInstall()
if len(self.cmdList):
self.session.openWithCallback(self.runExecute, PluginManagerInfo, self.skin_path, self.cmdList)
def detailsClosed(self, result = None):
if result is not None:
if result is not False:
self.setState('sync')
iSoftwareTools.lastDownloadDate = time()
for entry in self.selectedFiles:
if entry == self.saved_currentSelectedPackage:
self.selectedFiles.remove(entry)
iSoftwareTools.startIpkgListInstalled(self.rebuildList)
def buildEntryComponent(self, name, details, description, packagename, state, selected = False):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png"))
if state == 'installed':
return((name, details, description, packagename, state, installedpng, divpng, selected))
elif state == 'installable':
return((name, details, description, packagename, state, installablepng, divpng, selected))
elif state == 'remove':
return((name, details, description, packagename, state, removepng, divpng, selected))
elif state == 'install':
return((name, details, description, packagename, state, installpng, divpng, selected))
def buildPacketList(self, categorytag = None):
if categorytag is not None:
self.currList = "packages"
self.currentSelectedTag = categorytag
self.packetlist = []
for package in iSoftwareTools.packagesIndexlist[:]:
prerequisites = package[0]["prerequisites"]
if prerequisites.has_key("tag"):
for foundtag in prerequisites["tag"]:
if categorytag == foundtag:
attributes = package[0]["attributes"]
if attributes.has_key("packagetype"):
if attributes["packagetype"] == "internal":
continue
self.packetlist.append([attributes["name"], attributes["details"], attributes["shortdescription"], attributes["packagename"]])
else:
self.packetlist.append([attributes["name"], attributes["details"], attributes["shortdescription"], attributes["packagename"]])
self.list = []
for x in self.packetlist:
status = ""
name = x[0].strip()
details = x[1].strip()
description = x[2].strip()
if not description:
description = "No description available."
packagename = x[3].strip()
selectState = self.getSelectionState(details)
if iSoftwareTools.installed_packetlist.has_key(packagename):
if selectState == True:
status = "remove"
else:
status = "installed"
self.list.append(self.buildEntryComponent(name, _(details), _(description), packagename, status, selected = selectState))
else:
if selectState == True:
status = "install"
else:
status = "installable"
self.list.append(self.buildEntryComponent(name, _(details), _(description), packagename, status, selected = selectState))
if len(self.list):
self.list.sort(key=lambda x: x[0])
self["list"].style = "default"
self['list'].setList(self.list)
self["list"].updateList(self.list)
self.selectionChanged()
def buildCategoryList(self):
self.currList = "category"
self.categories = []
self.categoryList = []
for package in iSoftwareTools.packagesIndexlist[:]:
prerequisites = package[0]["prerequisites"]
if prerequisites.has_key("tag"):
for foundtag in prerequisites["tag"]:
attributes = package[0]["attributes"]
if foundtag not in self.categories:
self.categories.append(foundtag)
self.categoryList.append(self.buildCategoryComponent(foundtag))
self.categoryList.sort(key=lambda x: x[0])
self["list"].style = "category"
self['list'].setList(self.categoryList)
self["list"].updateList(self.categoryList)
self.selectionChanged()
def buildCategoryComponent(self, tag = None):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
if tag is not None:
if tag == 'System':
return(( _("System"), _("View list of available system extensions" ), tag, divpng ))
elif tag == 'Skin':
return(( _("Skins"), _("View list of available skins" ), tag, divpng ))
elif tag == 'Recording':
return(( _("Recordings"), _("View list of available recording extensions" ), tag, divpng ))
elif tag == 'Network':
return(( _("Network"), _("View list of available networking extensions" ), tag, divpng ))
elif tag == 'CI':
return(( _("Common Interface"), _("View list of available CommonInterface extensions" ), tag, divpng ))
elif tag == 'Default':
return(( _("Default settings"), _("View list of available default settings" ), tag, divpng ))
elif tag == 'SAT':
return(( _("Satellite equipment"), _("View list of available Satellite equipment extensions." ), tag, divpng ))
elif tag == 'Software':
return(( _("Software"), _("View list of available software extensions" ), tag, divpng ))
elif tag == 'Multimedia':
return(( _("Multimedia"), _("View list of available multimedia extensions." ), tag, divpng ))
elif tag == 'Display':
return(( _("Display and user interface"), _("View list of available display and userinterface extensions." ), tag, divpng ))
elif tag == 'EPG':
return(( _("Electronic Program Guide"), _("View list of available EPG extensions." ), tag, divpng ))
elif tag == 'Communication':
return(( _("Communication"), _("View list of available communication extensions." ), tag, divpng ))
else: # dynamically generate non existent tags
return(( str(tag), _("View list of available ") + str(tag) + ' ' + _("extensions." ), tag, divpng ))
def prepareInstall(self):
self.cmdList = []
if iSoftwareTools.available_updates > 0:
self.cmdList.append((IpkgComponent.CMD_UPGRADE, { "test_only": False }))
if self.selectedFiles and len(self.selectedFiles):
for plugin in self.selectedFiles:
detailsfile = iSoftwareTools.directory[0] + "/" + plugin[0]
if (os_path.exists(detailsfile) == True):
iSoftwareTools.fillPackageDetails(plugin[0])
self.package = iSoftwareTools.packageDetails[0]
if self.package[0].has_key("attributes"):
self.attributes = self.package[0]["attributes"]
if self.attributes.has_key("needsRestart"):
self.restartRequired = True
if self.attributes.has_key("package"):
self.packagefiles = self.attributes["package"]
if plugin[1] == 'installed':
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package["name"] }))
else:
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": plugin[2] }))
else:
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package["name"] }))
else:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": plugin[2] }))
else:
if plugin[1] == 'installed':
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": plugin[2] }))
else:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": plugin[2] }))
def runExecute(self, result = None):
if result is not None:
if result[0] is True:
self.session.openWithCallback(self.runExecuteFinished, Ipkg, cmdList = self.cmdList)
elif result[0] is False:
self.cmdList = result[1]
self.session.openWithCallback(self.runExecuteFinished, Ipkg, cmdList = self.cmdList)
else:
self.close()
def runExecuteFinished(self):
self.reloadPluginlist()
if plugins.restartRequired or self.restartRequired:
self.session.openWithCallback(self.ExecuteReboot, MessageBox, _("Install or remove finished.") +" "+_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
else:
self.selectedFiles = []
self.restartRequired = False
self.detailsClosed(True)
def ExecuteReboot(self, result):
if result:
self.session.open(TryQuitMainloop,retvalue=3)
else:
self.selectedFiles = []
self.restartRequired = False
self.detailsClosed(True)
def reloadPluginlist(self):
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
class PluginManagerInfo(Screen):
skin = """
<screen name="PluginManagerInfo" position="center,center" size="560,450" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,350" scrollbarMode="showOnDemand" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (50, 0), size = (150, 26), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (50, 27), size = (540, 23), font=1, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the state
MultiContentEntryPixmapAlphaTest(pos = (0, 1), size = (48, 48), png = 2), # index 2 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (0, 48), size = (550, 2), png = 3), # index 3 is the div pixmap
],
"fonts": [gFont("Regular", 24),gFont("Regular", 22)],
"itemHeight": 50
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,404" zPosition="10" size="560,2" transparent="1" alphatest="on" />
<widget source="status" render="Label" position="5,408" zPosition="10" size="550,44" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, plugin_path, cmdlist = None):
Screen.__init__(self, session)
Screen.setTitle(self, _("Plugin manager activity information"))
self.session = session
self.skin_path = plugin_path
self.cmdlist = cmdlist
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"ok": self.process_all,
"back": self.exit,
"red": self.exit,
"green": self.process_extensions,
}, -1)
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Only extensions."))
self["status"] = StaticText(_("Following tasks will be done after you press OK!"))
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
def setWindowTitle(self):
self.setTitle(_("Plugin manager activity information"))
def rebuildList(self):
self.list = []
if self.cmdlist is not None:
for entry in self.cmdlist:
action = ""
info = ""
cmd = entry[0]
if cmd == 0:
action = 'install'
elif cmd == 2:
action = 'remove'
else:
action = 'upgrade'
args = entry[1]
if cmd == 0:
info = args['package']
elif cmd == 2:
info = args['package']
else:
info = _("%s %s software because updates are available.") % (getMachineBrand(), getMachineName())
self.list.append(self.buildEntryComponent(action,info))
self['list'].setList(self.list)
self['list'].updateList(self.list)
def buildEntryComponent(self, action,info):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
upgradepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png"))
removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
if action == 'install':
return(( _('Installing'), info, installpng, divpng))
elif action == 'remove':
return(( _('Removing'), info, removepng, divpng))
else:
return(( _('Upgrading'), info, upgradepng, divpng))
def exit(self):
self.close()
def process_all(self):
self.close((True,None))
def process_extensions(self):
self.list = []
if self.cmdlist is not None:
for entry in self.cmdlist:
cmd = entry[0]
if entry[0] in (0,2):
self.list.append((entry))
self.close((False,self.list))
class PluginManagerHelp(Screen):
skin = """
<screen name="PluginManagerHelp" position="center,center" size="560,450" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,350" scrollbarMode="showOnDemand" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (50, 0), size = (540, 26), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (50, 27), size = (540, 23), font=1, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the state
MultiContentEntryPixmapAlphaTest(pos = (0, 1), size = (48, 48), png = 2), # index 2 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (0, 48), size = (550, 2), png = 3), # index 3 is the div pixmap
],
"fonts": [gFont("Regular", 24),gFont("Regular", 22)],
"itemHeight": 50
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,404" zPosition="10" size="560,2" transparent="1" alphatest="on" />
<widget source="status" render="Label" position="5,408" zPosition="10" size="550,44" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
Screen.setTitle(self, _("Plugin manager help"))
self.session = session
self.skin_path = plugin_path
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"back": self.exit,
"red": self.exit,
}, -1)
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["status"] = StaticText(_("A small overview of the available icon states and actions."))
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
def setWindowTitle(self):
self.setTitle(_("Plugin manager help"))
def rebuildList(self):
self.list = []
self.list.append(self.buildEntryComponent('install'))
self.list.append(self.buildEntryComponent('installable'))
self.list.append(self.buildEntryComponent('installed'))
self.list.append(self.buildEntryComponent('remove'))
self['list'].setList(self.list)
self['list'].updateList(self.list)
def buildEntryComponent(self, state):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png"))
if state == 'installed':
return(( _('This plugin is installed.'), _('You can remove this plugin.'), installedpng, divpng))
elif state == 'installable':
return(( _('This plugin is not installed.'), _('You can install this plugin.'), installablepng, divpng))
elif state == 'install':
return(( _('This plugin will be installed.'), _('You can cancel the installation.'), installpng, divpng))
elif state == 'remove':
return(( _('This plugin will be removed.'), _('You can cancel the removal.'), removepng, divpng))
def exit(self):
self.close()
class PluginDetails(Screen, PackageInfoHandler):
skin = """
<screen name="PluginDetails" position="center,center" size="600,440" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="author" render="Label" position="10,50" size="500,25" zPosition="10" font="Regular;21" transparent="1" />
<widget name="statuspic" position="550,40" size="48,48" alphatest="on"/>
<widget name="divpic" position="0,80" size="600,2" alphatest="on"/>
<widget name="detailtext" position="10,90" size="270,330" zPosition="10" font="Regular;21" transparent="1" halign="left" valign="top"/>
<widget name="screenshot" position="290,90" size="300,330" alphatest="on"/>
</screen>"""
def __init__(self, session, plugin_path, packagedata = None):
Screen.__init__(self, session)
Screen.setTitle(self, _("Plugin details"))
self.skin_path = plugin_path
self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
self.attributes = None
PackageInfoHandler.__init__(self, self.statusCallback, blocking = False)
self.directory = resolveFilename(SCOPE_METADIR)
if packagedata:
self.pluginname = packagedata[0]
self.details = packagedata[1]
self.pluginstate = packagedata[4]
self.statuspicinstance = packagedata[5]
self.divpicinstance = packagedata[6]
self.fillPackageDetails(self.details)
self.thumbnail = ""
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"back": self.exit,
"red": self.exit,
"green": self.go,
"up": self.pageUp,
"down": self.pageDown,
"left": self.pageUp,
"right": self.pageDown,
}, -1)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText("")
self["author"] = StaticText()
self["statuspic"] = Pixmap()
self["divpic"] = Pixmap()
self["screenshot"] = Pixmap()
self["detailtext"] = ScrollLabel()
self["statuspic"].hide()
self["screenshot"].hide()
self["divpic"].hide()
self.package = self.packageDetails[0]
if self.package[0].has_key("attributes"):
self.attributes = self.package[0]["attributes"]
self.restartRequired = False
self.cmdList = []
self.oktext = _("\nAfter pressing OK, please wait!")
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.paintScreenshotPixmapCB)
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.setInfos)
def setWindowTitle(self):
self.setTitle(_("Details for plugin: ") + self.pluginname )
def exit(self):
self.close(False)
def pageUp(self):
self["detailtext"].pageUp()
def pageDown(self):
self["detailtext"].pageDown()
def statusCallback(self, status, progress):
pass
def setInfos(self):
if self.attributes.has_key("screenshot"):
self.loadThumbnail(self.attributes)
if self.attributes.has_key("name"):
self.pluginname = self.attributes["name"]
else:
self.pluginname = _("unknown")
if self.attributes.has_key("author"):
self.author = self.attributes["author"]
else:
self.author = _("unknown")
if self.attributes.has_key("description"):
self.description = _(self.attributes["description"].replace("\\n", "\n"))
else:
self.description = _("No description available.")
self["author"].setText(_("Author: ") + self.author)
self["detailtext"].setText(_(self.description))
if self.pluginstate in ('installable', 'install'):
if iSoftwareTools.NetworkConnectionAvailable:
self["key_green"].setText(_("Install"))
else:
self["key_green"].setText("")
else:
self["key_green"].setText(_("Remove"))
def loadThumbnail(self, entry):
thumbnailUrl = None
if entry.has_key("screenshot"):
thumbnailUrl = entry["screenshot"]
if self.language == "de":
if thumbnailUrl[-7:] == "_en.jpg":
thumbnailUrl = thumbnailUrl[:-7] + "_de.jpg"
if thumbnailUrl is not None:
self.thumbnail = "/tmp/" + thumbnailUrl.split('/')[-1]
print "[PluginDetails] downloading screenshot " + thumbnailUrl + " to " + self.thumbnail
if iSoftwareTools.NetworkConnectionAvailable:
client.downloadPage(thumbnailUrl,self.thumbnail).addCallback(self.setThumbnail).addErrback(self.fetchFailed)
else:
self.setThumbnail(noScreenshot = True)
else:
self.setThumbnail(noScreenshot = True)
def setThumbnail(self, noScreenshot = False):
if not noScreenshot:
filename = self.thumbnail
else:
filename = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/noprev.png")
sc = AVSwitch().getFramebufferScale()
self.picload.setPara((self["screenshot"].instance.size().width(), self["screenshot"].instance.size().height(), sc[0], sc[1], False, 1, "#00000000"))
self.picload.startDecode(filename)
if self.statuspicinstance != None:
self["statuspic"].instance.setPixmap(self.statuspicinstance.__deref__())
self["statuspic"].show()
if self.divpicinstance != None:
self["divpic"].instance.setPixmap(self.divpicinstance.__deref__())
self["divpic"].show()
def paintScreenshotPixmapCB(self, picInfo=None):
ptr = self.picload.getData()
if ptr != None:
self["screenshot"].instance.setPixmap(ptr.__deref__())
self["screenshot"].show()
else:
self.setThumbnail(noScreenshot = True)
def go(self):
if self.attributes.has_key("package"):
self.packagefiles = self.attributes["package"]
if self.attributes.has_key("needsRestart"):
self.restartRequired = True
self.cmdList = []
if self.pluginstate in ('installed', 'remove'):
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package["name"] }))
if len(self.cmdList):
self.session.openWithCallback(self.runRemove, MessageBox, _("Do you want to remove the package:\n") + self.pluginname + "\n" + self.oktext)
else:
if iSoftwareTools.NetworkConnectionAvailable:
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package["name"] }))
if len(self.cmdList):
self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to install the package:\n") + self.pluginname + "\n" + self.oktext)
def runUpgrade(self, result):
if result:
self.session.openWithCallback(self.runUpgradeFinished, Ipkg, cmdList = self.cmdList)
def runUpgradeFinished(self):
self.reloadPluginlist()
if plugins.restartRequired or self.restartRequired:
self.session.openWithCallback(self.UpgradeReboot, MessageBox, _("Installation finished.") +" "+_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
else:
self.close(True)
def UpgradeReboot(self, result):
if result:
self.session.open(TryQuitMainloop,retvalue=3)
self.close(True)
def runRemove(self, result):
if result:
self.session.openWithCallback(self.runRemoveFinished, Ipkg, cmdList = self.cmdList)
def runRemoveFinished(self):
self.close(True)
def reloadPluginlist(self):
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
def fetchFailed(self,string):
self.setThumbnail(noScreenshot = True)
print "[PluginDetails] fetch failed " + string.getErrorMessage()
class UpdatePlugin(Screen):
skin = """
<screen name="UpdatePlugin" position="center,center" size="550,300" >
<widget name="activityslider" position="0,0" size="550,5" />
<widget name="slider" position="0,150" size="550,30" />
<widget source="package" render="Label" position="10,30" size="540,20" font="Regular;18" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
<widget source="status" render="Label" position="10,180" size="540,100" font="Regular;20" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, *args):
Screen.__init__(self, session)
Screen.setTitle(self, _("Software update"))
self.sliderPackages = { "dreambox-dvb-modules": 1, "enigma2": 2, "tuxbox-image-info": 3 }
self.slider = Slider(0, 4)
self["slider"] = self.slider
self.activityslider = Slider(0, 100)
self["activityslider"] = self.activityslider
self.status = StaticText(_("Please wait..."))
self["status"] = self.status
self.package = StaticText(_("Package list update"))
self["package"] = self.package
self.oktext = _("Press OK on your remote control to continue.")
self.packages = 0
self.error = 0
self.processed_packages = []
self.total_packages = None
self.skin_path = plugin_path
self.TraficCheck = False
self.TraficResult = False
self.CheckDateDone = False
self.activity = 0
self.activityTimer = eTimer()
self.activityTimer.callback.append(self.doActivityTimer)
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.updating = False
self["actions"] = ActionMap(["WizardActions"],
{
"ok": self.exit,
"back": self.exit
}, -1)
self.activityTimer.start(100, False)
def CheckDate(self):
# Check if image is not to old for update (max 30days)
self.CheckDateDone = True
tmpdate = getEnigmaVersionString()
imageDate = date(int(tmpdate[0:4]), int(tmpdate[5:7]), int(tmpdate[8:10]))
datedelay = imageDate + timedelta(days=30)
message = _("Your image is out of date!\n\n"
"After such a long time, there is a risk that your %s %s will not\n"
"boot after online-update, or will show disfunction in running Image.\n\n"
"A new flash will increase the stability\n\n"
"An online update is done at your own risk !!\n\n\n"
"Do you still want to update?") % (getMachineBrand(), getMachineName())
if datedelay > date.today():
self.updating = True
self.activityTimer.start(100, False)
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE_LIST)
else:
print"[SOFTWAREMANAGER] Your image is to old (%s), you need to flash new !!" %getEnigmaVersionString()
self.session.openWithCallback(self.checkDateCallback, MessageBox, message, default = False)
return
def checkDateCallback(self, ret):
print ret
if ret:
self.activityTimer.start(100, False)
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE_LIST)
else:
self.close()
return
def checkTraficLight(self):
from urllib import urlopen
import socket
currentTimeoutDefault = socket.getdefaulttimeout()
socket.setdefaulttimeout(3)
message = ""
picon = None
default = True
doUpdate = True
# TODO: Use Twisted's URL fetcher, urlopen is evil. And it can
# run in parallel to the package update.
try:
urlopenSTATUS = "http://droidsat.org/feeds-status/index.php"
d = urlopen(urlopenSTATUS)
tmpStatus = d.read()
if config.softwareupdate.updatebeta.value and 'gelb.png' in tmpStatus:
message = _("Caution update not tested yet !!") + "\n" + _("Update at your own risk") + "\n\n" + _("For more information see http://www.droidsat.org") + "\n\n"# + _("Last Status Date") + ": " + statusDate + "\n\n"
picon = MessageBox.TYPE_ERROR
default = False
elif 'rot.png' in tmpStatus:
if config.softwareupdate.updateisunstable.value:
message = _("Update is reported as faulty !!") + "\n" + _("But you have activated \"Install unstable updates\"") + "\n" + _("Update anyway?")# + "\n\n" + _("Last Status Date") + ": " + statusDate
picon = MessageBox.TYPE_ERROR
default = False
else:
message = _("Update is reported as faulty !!") + "\n" + _("Aborting updateprogress") + "\n\n" + _("For more information see http://www.droidsat.org")# + "\n\n" + _("Last Status Date") + ": " + statusDate
picon = MessageBox.TYPE_ERROR
default = False
doUpdate = False
except:
message = _("The status of the current update could not be checked because http://www.droidsat.org could not be reached for some reason") + "\n"
picon = MessageBox.TYPE_ERROR
default = False
socket.setdefaulttimeout(currentTimeoutDefault)
if default:
# We'll ask later
self.runUpgrade(True)
else:
if doUpdate:
# Ask for Update,
message += _("Do you want to update your %s %s?") % (getMachineBrand(), getMachineName()) + "\n" + _("After pressing OK, please wait!")
self.session.openWithCallback(self.runUpgrade, MessageBox, message, default = default, picon = picon)
else:
# Don't Update RED LIGHT !!
self.session.open(MessageBox, message, picon, timeout = 20)
self.runUpgrade(False)
def runUpgrade(self, result):
self.TraficResult = result
if result:
self.TraficCheck = True
print "create /etc/last-upgrades-git.log with opkg list-upgradable"
os.system("opkg list-upgradable > /etc/last-upgrades-git.log")
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE_LIST)
else:
self.TraficCheck = False
self.activityTimer.stop()
self.activityslider.setValue(0)
self.exit()
def doActivityTimer(self):
if not self.CheckDateDone:
self.activityTimer.stop()
self.CheckDate()
return
self.activity += 1
if self.activity == 100:
self.activity = 0
self.activityslider.setValue(self.activity)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_DOWNLOAD:
self.status.setText(_("Downloading"))
elif event == IpkgComponent.EVENT_UPGRADE:
if self.sliderPackages.has_key(param):
self.slider.setValue(self.sliderPackages[param])
self.package.setText(param)
self.status.setText(_("Upgrading") + ": %s/%s" % (self.packages, self.total_packages))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_INSTALL:
self.package.setText(param)
self.status.setText(_("Installing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_REMOVE:
self.package.setText(param)
self.status.setText(_("Removing"))
if not param in self.processed_packages:
self.processed_packages.append(param)
self.packages += 1
elif event == IpkgComponent.EVENT_CONFIGURING:
self.package.setText(param)
self.status.setText(_("Configuring"))
elif event == IpkgComponent.EVENT_MODIFIED:
if config.plugins.softwaremanager.overwriteConfigFiles.value in ("N", "Y"):
self.ipkg.write(True and config.plugins.softwaremanager.overwriteConfigFiles.value)
else:
self.session.openWithCallback(
self.modificationCallback,
MessageBox,
_("A configuration file (%s) was modified since Installation.\nDo you want to keep your version?") % (param)
)
elif event == IpkgComponent.EVENT_ERROR:
self.error += 1
elif event == IpkgComponent.EVENT_DONE:
if self.updating:
self.updating = False
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE_LIST)
elif self.ipkg.currentCommand == IpkgComponent.CMD_UPGRADE_LIST:
self.total_packages = len(self.ipkg.getFetchedList())
if self.total_packages and not self.TraficCheck:
self.checkTraficLight()
return
if self.total_packages and self.TraficCheck and self.TraficResult:
message = _("Do you want to update your %s %s") % (getMachineBrand(), getMachineName()) + " \n(%s " % self.total_packages + _("Packages") + ")"
if config.plugins.softwaremanager.updatetype.value == "cold":
choices = [(_("Show new Packages"), "show"), (_("Unattended upgrade without GUI and reboot system"), "cold"), (_("Cancel"), "")]
else:
choices = [(_("Show new Packages"), "show"), (_("Upgrade and ask to reboot"), "hot"), (_("Cancel"), "")]
self.session.openWithCallback(self.startActualUpgrade, ChoiceBox, title=message, list=choices)
else:
self.session.openWithCallback(self.close, MessageBox, _("Nothing to upgrade"), type=MessageBox.TYPE_INFO, timeout=10, close_on_any_key=True)
elif self.error == 0:
self.slider.setValue(4)
self.activityTimer.stop()
self.activityslider.setValue(0)
self.package.setText(_("Done - Installed or upgraded %d packages") % self.packages)
self.status.setText(self.oktext)
else:
self.activityTimer.stop()
self.activityslider.setValue(0)
error = _("your %s %s might be unusable now. Please consult the manual for further assistance before rebooting your %s %s.") % (getMachineBrand(), getMachineName(), getMachineBrand(), getMachineName())
if self.packages == 0:
error = _("No packages were upgraded yet. So you can check your network and try again.")
if self.updating:
error = _("Your %s %s isn't connected to the internet properly. Please check it and try again.") % (getMachineBrand(), getMachineName())
self.status.setText(_("Error") + " - " + error)
#print event, "-", param
pass
def startActualUpgrade(self, answer):
if not answer or not answer[1]:
self.close()
return
if answer[1] == "cold":
self.session.open(TryQuitMainloop,retvalue=42)
self.close()
elif answer[1] == "show":
global plugin_path
self.session.openWithCallback(self.ipkgCallback(IpkgComponent.EVENT_DONE, None), ShowUpdatePackages, plugin_path)
else:
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE, args = {'test_only': False})
def modificationCallback(self, res):
self.ipkg.write(res and "N" or "Y")
def exit(self):
if not self.ipkg.isRunning():
if self.packages != 0 and self.error == 0:
if fileExists("/etc/enigma2/.removelang"):
language.delLanguage()
#self.session.openWithCallback(self.exitAnswer, MessageBox, _("Upgrade finished.") +" "+_("Do you want to reboot your %s %s?") % (getMachineBrand(), getMachineName()))
self.restoreoDreamy()
else:
self.close()
else:
if not self.updating:
self.ipkg.stop()
self.close()
def exitAnswer(self, result):
if result is not None and result:
self.session.open(TryQuitMainloop,retvalue=2)
self.close()
def restoreoDreamy(self):
try:
if config.skin.primary_skin.value == "oDreamy/oDreamy.xml" and not os.path.exists("/usr/share/enigma2/oDreamy/oDreamy.xml"):
self.session.openWithCallback(self.restoreMetrixHDCallback, RestoreoDreamy)
elif config.skin.primary_skin.value == "oDreamy/skin.oDreamy.xml" and config.plugins.WeatherOther.FHDenabled.value:
from Plugins.Extensions.Weather.MainSettingsView import MainSettingsView
MainSettingsView(None).getFHDiconRefresh()
self.restoreoDreamyCallback()
else:
self.restoreoDreamyCallback()
except:
self.restoreoDreamyCallback()
def restoreoDreamyCallback(self, ret = None):
self.session.openWithCallback(self.exitAnswer, MessageBox, _("Upgrade finished.") +" "+_("Do you want to reboot your %s %s?") % (getMachineBrand(), getMachineName()))
class IPKGMenu(Screen):
skin = """
<screen name="IPKGMenu" position="center,center" size="560,400" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="filelist" position="5,50" size="550,340" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
Screen.setTitle(self, _("Select upgrade source to edit."))
self.skin_path = plugin_path
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Edit"))
self.sel = []
self.val = []
self.entry = False
self.exe = False
self.path = ""
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.KeyOk,
"cancel": self.keyCancel
}, -1)
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.keyCancel,
"green": self.KeyOk,
})
self["filelist"] = MenuList([])
self.fill_list()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Select upgrade source to edit."))
def fill_list(self):
flist = []
self.path = '/etc/opkg/'
if (os_path.exists(self.path) == False):
self.entry = False
return
for file in listdir(self.path):
if file.endswith(".conf"):
if file not in ('arch.conf', 'opkg.conf'):
flist.append((file))
self.entry = True
self["filelist"].l.setList(flist)
def KeyOk(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
self.val = self.path + self.sel
self.session.open(IPKGSource, self.val)
def keyCancel(self):
self.close()
def Exit(self):
self.close()
class IPKGSource(Screen):
skin = """
<screen name="IPKGSource" position="center,center" size="560,80" title="Edit upgrade source url." >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="text" position="5,50" size="550,25" font="Regular;20" backgroundColor="background" foregroundColor="#cccccc" />
</screen>"""
def __init__(self, session, configfile = None):
Screen.__init__(self, session)
self.session = session
self.configfile = configfile
text = ""
if self.configfile:
try:
fp = file(configfile, 'r')
sources = fp.readlines()
if sources:
text = sources[0]
fp.close()
except IOError:
pass
desk = getDesktop(0)
x= int(desk.size().width())
y= int(desk.size().height())
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
if (y>=720):
self["text"] = Input(text, maxSize=False, type=Input.TEXT)
else:
self["text"] = Input(text, maxSize=False, visible_width = 55, type=Input.TEXT)
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "TextEntryActions", "KeyboardInputActions","ShortcutActions"],
{
"ok": self.go,
"back": self.close,
"red": self.close,
"green": self.go,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDeleteForward,
"deleteBackward": self.keyDeleteBackward,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setWindowTitle()
self["text"].right()
def setWindowTitle(self):
self.setTitle(_("Edit upgrade source url."))
def go(self):
text = self["text"].getText()
if text:
fp = file(self.configfile, 'w')
fp.write(text)
fp.write("\n")
fp.close()
self.close()
def keyLeft(self):
self["text"].left()
def keyRight(self):
self["text"].right()
def keyHome(self):
self["text"].home()
def keyEnd(self):
self["text"].end()
def keyDeleteForward(self):
self["text"].delete()
def keyDeleteBackward(self):
self["text"].deleteBackward()
def keyNumberGlobal(self, number):
self["text"].number(number)
class PacketManager(Screen, NumericalTextInput):
skin = """
<screen name="PacketManager" position="center,center" size="530,420" title="Packet manager" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="520,365" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (5, 1), size = (440, 28), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (5, 26), size = (440, 20), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description
MultiContentEntryPixmapAlphaTest(pos = (445, 2), size = (48, 48), png = 4), # index 4 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (5, 50), size = (510, 2), png = 5), # index 4 is the div pixmap
],
"fonts": [gFont("Regular", 22),gFont("Regular", 14)],
"itemHeight": 52
}
</convert>
</widget>
</screen>"""
def __init__(self, session, plugin_path, args = None):
Screen.__init__(self, session)
NumericalTextInput.__init__(self)
self.session = session
self.skin_path = plugin_path
self.setUseableChars(u'1234567890abcdefghijklmnopqrstuvwxyz')
self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "NumberActions", "InputActions", "InputAsciiActions", "KeyboardInputActions" ],
{
"ok": self.go,
"back": self.exit,
"red": self.exit,
"green": self.reload,
"gotAsciiCode": self.keyGotAscii,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.list = []
self.statuslist = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Reload"))
self.list_updating = True
self.packetlist = []
self.installed_packetlist = {}
self.upgradeable_packages = {}
self.Console = Console()
self.cmdList = []
self.cachelist = []
self.cache_ttl = 86400 #600 is default, 0 disables, Seconds cache is considered valid (24h should be ok for caching ipkgs)
self.cache_file = eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/SoftwareManager/packetmanager.cache') #Path to cache directory
self.oktext = _("\nAfter pressing OK, please wait!")
self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', '-src', 'busybox')
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmAscii)
def keyNumberGlobal(self, val):
key = self.getKey(val)
if key is not None:
keyvalue = key.encode("utf-8")
if len(keyvalue) == 1:
self.setNextIdx(keyvalue[0])
def keyGotAscii(self):
keyvalue = unichr(getPrevAsciiCode()).encode("utf-8")
if len(keyvalue) == 1:
self.setNextIdx(keyvalue[0])
def setNextIdx(self,char):
if char in ("0", "1", "a"):
self["list"].setIndex(0)
else:
idx = self.getNextIdx(char)
if idx and idx <= self["list"].count:
self["list"].setIndex(idx)
def getNextIdx(self,char):
for idx, i in enumerate(self["list"].list):
if i[0] and (i[0][0] == char):
return idx
def exit(self):
self.ipkg.stop()
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmNone)
self.close()
def reload(self):
if (os_path.exists(self.cache_file) == True):
remove(self.cache_file)
self.list_updating = True
self.rebuildList()
def setWindowTitle(self):
self.setTitle(_("Packet manager"))
def setStatus(self,status = None):
if status:
self.statuslist = []
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
if status == 'update':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Package list update"), '', _("Trying to download a new packetlist. Please wait..." ),'',statuspng, divpng ))
self['list'].setList(self.statuslist)
elif status == 'error':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
self.statuslist.append(( _("Error"), '', _("An error occurred while downloading the packetlist. Please try again." ),'',statuspng, divpng ))
self['list'].setList(self.statuslist)
def rebuildList(self):
self.setStatus('update')
self.inv_cache = 0
self.vc = valid_cache(self.cache_file, self.cache_ttl)
if self.cache_ttl > 0 and self.vc != 0:
try:
self.buildPacketList()
except:
self.inv_cache = 1
if self.cache_ttl == 0 or self.inv_cache == 1 or self.vc == 0:
self.run = 0
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
def go(self, returnValue = None):
cur = self["list"].getCurrent()
if cur:
status = cur[3]
package = cur[0]
self.cmdList = []
if status == 'installed':
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package }))
if len(self.cmdList):
self.session.openWithCallback(self.runRemove, MessageBox, _("Do you want to remove the package:\n") + package + "\n" + self.oktext)
elif status == 'upgradeable':
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package }))
if len(self.cmdList):
self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to upgrade the package:\n") + package + "\n" + self.oktext)
elif status == "installable":
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package }))
if len(self.cmdList):
self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to install the package:\n") + package + "\n" + self.oktext)
def runRemove(self, result):
if result:
self.session.openWithCallback(self.runRemoveFinished, Ipkg, cmdList = self.cmdList)
def runRemoveFinished(self):
self.session.openWithCallback(self.RemoveReboot, MessageBox, _("Remove finished.") +" "+_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
def RemoveReboot(self, result):
if result is None:
return
if result is False:
cur = self["list"].getCurrent()
if cur:
item = self['list'].getIndex()
self.list[item] = self.buildEntryComponent(cur[0], cur[1], cur[2], 'installable')
self.cachelist[item] = [cur[0], cur[1], cur[2], 'installable']
self['list'].setList(self.list)
write_cache(self.cache_file, self.cachelist)
self.reloadPluginlist()
if result:
self.session.open(TryQuitMainloop,retvalue=3)
def runUpgrade(self, result):
if result:
self.session.openWithCallback(self.runUpgradeFinished, Ipkg, cmdList = self.cmdList)
def runUpgradeFinished(self):
self.session.openWithCallback(self.UpgradeReboot, MessageBox, _("Upgrade finished.") +" "+_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
def UpgradeReboot(self, result):
if result is None:
return
if result is False:
cur = self["list"].getCurrent()
if cur:
item = self['list'].getIndex()
self.list[item] = self.buildEntryComponent(cur[0], cur[1], cur[2], 'installed')
self.cachelist[item] = [cur[0], cur[1], cur[2], 'installed']
self['list'].setList(self.list)
write_cache(self.cache_file, self.cachelist)
self.reloadPluginlist()
if result:
self.session.open(TryQuitMainloop,retvalue=3)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_ERROR:
self.list_updating = False
self.setStatus('error')
elif event == IpkgComponent.EVENT_DONE:
if self.list_updating:
self.list_updating = False
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " list"
self.Console.ePopen(cmd, self.IpkgList_Finished)
#print event, "-", param
pass
def IpkgList_Finished(self, result, retval, extra_args = None):
result = result.replace('\n ',' - ')
if result:
self.packetlist = []
last_name = ""
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any((name.endswith(x) or name.find('locale') != -1) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
descr = l > 3 and tokens[3].strip() or l > 2 and tokens[2].strip() or ""
if name == last_name:
continue
last_name = name
self.packetlist.append([name, version, descr])
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " list_installed"
self.Console.ePopen(cmd, self.IpkgListInstalled_Finished)
def IpkgListInstalled_Finished(self, result, retval, extra_args = None):
if result:
self.installed_packetlist = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
self.installed_packetlist[name] = version
if not self.Console:
self.Console = Console()
cmd = "opkg list-upgradable"
self.Console.ePopen(cmd, self.OpkgListUpgradeable_Finished)
def OpkgListUpgradeable_Finished(self, result, retval, extra_args = None):
if result:
self.upgradeable_packages = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 2 and tokens[2].strip() or ""
self.upgradeable_packages[name] = version
self.buildPacketList()
def buildEntryComponent(self, name, version, description, state):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_ACTIVE_SKIN, "div-h.png"))
if not description:
description = "No description available."
if state == 'installed':
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
return((name, version, _(description), state, installedpng, divpng))
elif state == 'upgradeable':
upgradeablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgradeable.png"))
return((name, version, _(description), state, upgradeablepng, divpng))
else:
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
return((name, version, _(description), state, installablepng, divpng))
def buildPacketList(self):
self.list = []
self.cachelist = []
if self.cache_ttl > 0 and self.vc != 0:
print 'Loading packagelist cache from ',self.cache_file
try:
self.cachelist = load_cache(self.cache_file)
if len(self.cachelist) > 0:
for x in self.cachelist:
self.list.append(self.buildEntryComponent(x[0], x[1], x[2], x[3]))
self['list'].setList(self.list)
except:
self.inv_cache = 1
if self.cache_ttl == 0 or self.inv_cache == 1 or self.vc == 0:
print 'rebuilding fresh package list'
for x in self.packetlist:
status = ""
if self.installed_packetlist.has_key(x[0]):
if self.upgradeable_packages.has_key(x[0]):
status = "upgradeable"
else:
status = "installed"
else:
status = "installable"
self.list.append(self.buildEntryComponent(x[0], x[1], x[2], status))
self.cachelist.append([x[0], x[1], x[2], status])
write_cache(self.cache_file, self.cachelist)
self['list'].setList(self.list)
def reloadPluginlist(self):
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
class IpkgInstaller(Screen):
skin = """
<screen name="IpkgInstaller" position="center,center" size="550,450" title="Install extensions" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="540,360" />
<ePixmap pixmap="skin_default/div-h.png" position="0,410" zPosition="10" size="560,2" transparent="1" alphatest="on" />
<widget source="introduction" render="Label" position="5,420" zPosition="10" size="550,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, list):
Screen.__init__(self, session)
self.list = SelectionList()
self["list"] = self.list
p = 0
if len(list):
p = list[0].rfind("/")
title = list[0][:p]
self.title = ("%s %s %s") % (_("Install extensions"), _("from"), title)
for listindex in range(len(list)):
self.list.addSelection(list[listindex][p+1:], list[listindex], listindex, False)
self.list.sort()
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Install"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Invert"))
self["introduction"] = StaticText(_("Press OK to toggle the selection."))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"ok": self.list.toggleSelection,
"cancel": self.close,
"red": self.close,
"green": self.install,
"blue": self.list.toggleAllSelection
}, -1)
def install(self):
list = self.list.getSelectionsList()
cmdList = []
for item in list:
cmdList.append((IpkgComponent.CMD_INSTALL, { "package": item[1] }))
self.session.open(Ipkg, cmdList = cmdList)
def filescan_open(list, session, **kwargs):
filelist = [x.path for x in list]
session.open(IpkgInstaller, filelist) # list
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
return \
Scanner(mimetypes = ["application/x-debian-package"],
paths_to_scan =
[
ScanPath(path = "ipk", with_subdirs = True),
ScanPath(path = "", with_subdirs = False),
],
name = "Ipkg",
description = _("Install extensions."),
openfnc = filescan_open, )
class ShowUpdatePackages(Screen, NumericalTextInput):
skin = """
<screen name="ShowUpdatePackages" position="center,center" size="530,420" title="New Packages" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="520,365" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (5, 1), size = (440, 28), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (5, 26), size = (440, 20), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description
MultiContentEntryPixmapAlphaTest(pos = (445, 2), size = (48, 48), png = 4), # index 4 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (5, 50), size = (510, 2), png = 5), # index 4 is the div pixmap
],
"fonts": [gFont("Regular", 22),gFont("Regular", 14)],
"itemHeight": 52
}
</convert>
</widget>
</screen>"""
def __init__(self, session, plugin_path, args = None):
Screen.__init__(self, session)
NumericalTextInput.__init__(self)
self.session = session
self.skin_path = plugin_path
self.setUseableChars(u'1234567890abcdefghijklmnopqrstuvwxyz')
self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "NumberActions", "InputActions", "InputAsciiActions", "KeyboardInputActions"],
{
"back": self.exit,
"red": self.exit,
"ok": self.exit,
"green": self.rebuildList,
"gotAsciiCode": self.keyGotAscii,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.list = []
self.statuslist = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Reload"))
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmAscii)
def keyNumberGlobal(self, val):
key = self.getKey(val)
if key is not None:
keyvalue = key.encode("utf-8")
if len(keyvalue) == 1:
self.setNextIdx(keyvalue[0])
def keyGotAscii(self):
keyvalue = unichr(getPrevAsciiCode()).encode("utf-8")
if len(keyvalue) == 1:
self.setNextIdx(keyvalue[0])
def setNextIdx(self,char):
if char in ("0", "1", "a"):
self["list"].setIndex(0)
else:
idx = self.getNextIdx(char)
if idx and idx <= self["list"].count:
self["list"].setIndex(idx)
def getNextIdx(self,char):
for idx, i in enumerate(self["list"].list):
if i[0] and (i[0][0] == char):
return idx
def exit(self):
self.ipkg.stop()
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmNone)
self.close()
def setWindowTitle(self):
self.setTitle(_("New Packages"))
def setStatus(self,status = None):
if status:
self.statuslist = []
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if status == 'update':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Package list update"), '', _("Trying to download a new updatelist. Please wait..." ),'',statuspng, divpng ))
self['list'].setList(self.statuslist)
elif status == 'error':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
self.statuslist.append(( _("Error"), '', _("There was an error downloading the updatelist. Please try again." ),'',statuspng, divpng ))
self['list'].setList(self.statuslist)
def rebuildList(self):
self.setStatus('update')
self.ipkg.startCmd(IpkgComponent.CMD_UPGRADE_LIST)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_ERROR:
self.setStatus('error')
elif event == IpkgComponent.EVENT_DONE:
self.buildPacketList()
pass
def buildEntryComponent(self, name, version, description, state):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if not description:
description = "No description available."
if state == 'installed':
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
return((name, version, _(description), state, installedpng, divpng))
elif state == 'upgradeable':
upgradeablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgradeable.png"))
return((name, version, _(description), state, upgradeablepng, divpng))
else:
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
return((name, version, _(description), state, installablepng, divpng))
def buildPacketList(self):
self.list = []
fetchedList = self.ipkg.getFetchedList()
excludeList = self.ipkg.getExcludeList()
if len(fetchedList) > 0:
for x in fetchedList:
try:
self.list.append(self.buildEntryComponent(x[0], x[1], x[2], "upgradeable"))
except:
self.list.append(self.buildEntryComponent(x[0], '', 'no valid architecture, ignoring !!', "installable"))
if len(excludeList) > 0:
for x in excludeList:
try:
self.list.append(self.buildEntryComponent(x[0], x[1], x[2], "installable"))
except:
self.list.append(self.buildEntryComponent(x[0], '', 'no valid architecture, ignoring !!', "installable"))
self['list'].setList(self.list)
else:
self.setStatus('error')
def UpgradeMain(session, **kwargs):
session.open(UpdatePluginMenu)
def startSetup(menuid):
if menuid != "setup":
return [ ]
return [(_("Software management"), UpgradeMain, "software_manager", 50)]
def Plugins(path, **kwargs):
global plugin_path
plugin_path = path
list = [
PluginDescriptor(name=_("Software management"), description=_("Manage your %s %s's software") % (getMachineBrand(), getMachineName()), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=startSetup),
PluginDescriptor(name=_("Ipkg"), where = PluginDescriptor.WHERE_FILESCAN, needsRestart = False, fnc = filescan)
]
if config.usage.setup_level.index >= 2: # expert+
list.append(PluginDescriptor(name=_("Software management"), description=_("Manage your %s %s's software") % (getMachineBrand(), getMachineName()), where = PluginDescriptor.WHERE_EXTENSIONSMENU, needsRestart = False, fnc=UpgradeMain))
return list
| gpl-2.0 | -1,043,935,059,696,353,900 | 41.554745 | 242 | 0.694149 | false |
istio/istio | samples/bookinfo/src/productpage/productpage.py | 1 | 15160 | #!/usr/bin/python
#
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from flask_bootstrap import Bootstrap
from flask import Flask, request, session, render_template, redirect, url_for
from flask import _request_ctx_stack as stack
from jaeger_client import Tracer, ConstSampler
from jaeger_client.reporter import NullReporter
from jaeger_client.codecs import B3Codec
from opentracing.ext import tags
from opentracing.propagation import Format
from opentracing_instrumentation.request_context import get_current_span, span_in_context
import simplejson as json
import requests
import sys
from json2html import *
import logging
import requests
import os
import asyncio
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which is not logged.
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
app = Flask(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.DEBUG)
# Set the secret key to some random bytes. Keep this really secret!
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
Bootstrap(app)
servicesDomain = "" if (os.environ.get("SERVICES_DOMAIN") is None) else "." + os.environ.get("SERVICES_DOMAIN")
detailsHostname = "details" if (os.environ.get("DETAILS_HOSTNAME") is None) else os.environ.get("DETAILS_HOSTNAME")
ratingsHostname = "ratings" if (os.environ.get("RATINGS_HOSTNAME") is None) else os.environ.get("RATINGS_HOSTNAME")
reviewsHostname = "reviews" if (os.environ.get("REVIEWS_HOSTNAME") is None) else os.environ.get("REVIEWS_HOSTNAME")
flood_factor = 0 if (os.environ.get("FLOOD_FACTOR") is None) else int(os.environ.get("FLOOD_FACTOR"))
details = {
"name": "http://{0}{1}:9080".format(detailsHostname, servicesDomain),
"endpoint": "details",
"children": []
}
ratings = {
"name": "http://{0}{1}:9080".format(ratingsHostname, servicesDomain),
"endpoint": "ratings",
"children": []
}
reviews = {
"name": "http://{0}{1}:9080".format(reviewsHostname, servicesDomain),
"endpoint": "reviews",
"children": [ratings]
}
productpage = {
"name": "http://{0}{1}:9080".format(detailsHostname, servicesDomain),
"endpoint": "details",
"children": [details, reviews]
}
service_dict = {
"productpage": productpage,
"details": details,
"reviews": reviews,
}
# A note on distributed tracing:
#
# Although Istio proxies are able to automatically send spans, they need some
# hints to tie together the entire trace. Applications need to propagate the
# appropriate HTTP headers so that when the proxies send span information, the
# spans can be correlated correctly into a single trace.
#
# To do this, an application needs to collect and propagate headers from the
# incoming request to any outgoing requests. The choice of headers to propagate
# is determined by the trace configuration used. See getForwardHeaders for
# the different header options.
#
# This example code uses OpenTracing (http://opentracing.io/) to propagate
# the 'b3' (zipkin) headers. Using OpenTracing for this is not a requirement.
# Using OpenTracing allows you to add application-specific tracing later on,
# but you can just manually forward the headers if you prefer.
#
# The OpenTracing example here is very basic. It only forwards headers. It is
# intended as a reference to help people get started, eg how to create spans,
# extract/inject context, etc.
# A very basic OpenTracing tracer (with null reporter)
tracer = Tracer(
one_span_per_rpc=True,
service_name='productpage',
reporter=NullReporter(),
sampler=ConstSampler(decision=True),
extra_codecs={Format.HTTP_HEADERS: B3Codec()}
)
def trace():
'''
Function decorator that creates opentracing span from incoming b3 headers
'''
def decorator(f):
def wrapper(*args, **kwargs):
request = stack.top.request
try:
# Create a new span context, reading in values (traceid,
# spanid, etc) from the incoming x-b3-*** headers.
span_ctx = tracer.extract(
Format.HTTP_HEADERS,
dict(request.headers)
)
# Note: this tag means that the span will *not* be
# a child span. It will use the incoming traceid and
# spanid. We do this to propagate the headers verbatim.
rpc_tag = {tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER}
span = tracer.start_span(
operation_name='op', child_of=span_ctx, tags=rpc_tag
)
except Exception as e:
# We failed to create a context, possibly due to no
# incoming x-b3-*** headers. Start a fresh span.
# Note: This is a fallback only, and will create fresh headers,
# not propagate headers.
span = tracer.start_span('op')
with span_in_context(span):
r = f(*args, **kwargs)
return r
wrapper.__name__ = f.__name__
return wrapper
return decorator
def getForwardHeaders(request):
headers = {}
# x-b3-*** headers can be populated using the opentracing span
span = get_current_span()
carrier = {}
tracer.inject(
span_context=span.context,
format=Format.HTTP_HEADERS,
carrier=carrier)
headers.update(carrier)
# We handle other (non x-b3-***) headers manually
if 'user' in session:
headers['end-user'] = session['user']
# Keep this in sync with the headers in details and reviews.
incoming_headers = [
# All applications should propagate x-request-id. This header is
# included in access log statements and is used for consistent trace
# sampling and log sampling decisions in Istio.
'x-request-id',
# Lightstep tracing header. Propagate this if you use lightstep tracing
# in Istio (see
# https://istio.io/latest/docs/tasks/observability/distributed-tracing/lightstep/)
# Note: this should probably be changed to use B3 or W3C TRACE_CONTEXT.
# Lightstep recommends using B3 or TRACE_CONTEXT and most application
# libraries from lightstep do not support x-ot-span-context.
'x-ot-span-context',
# Datadog tracing header. Propagate these headers if you use Datadog
# tracing.
'x-datadog-trace-id',
'x-datadog-parent-id',
'x-datadog-sampling-priority',
# W3C Trace Context. Compatible with OpenCensusAgent and Stackdriver Istio
# configurations.
'traceparent',
'tracestate',
# Cloud trace context. Compatible with OpenCensusAgent and Stackdriver Istio
# configurations.
'x-cloud-trace-context',
# Grpc binary trace context. Compatible with OpenCensusAgent nad
# Stackdriver Istio configurations.
'grpc-trace-bin',
# b3 trace headers. Compatible with Zipkin, OpenCensusAgent, and
# Stackdriver Istio configurations. Commented out since they are
# propagated by the OpenTracing tracer above.
# 'x-b3-traceid',
# 'x-b3-spanid',
# 'x-b3-parentspanid',
# 'x-b3-sampled',
# 'x-b3-flags',
# Application-specific headers to forward.
'user-agent',
]
# For Zipkin, always propagate b3 headers.
# For Lightstep, always propagate the x-ot-span-context header.
# For Datadog, propagate the corresponding datadog headers.
# For OpenCensusAgent and Stackdriver configurations, you can choose any
# set of compatible headers to propagate within your application. For
# example, you can propagate b3 headers or W3C trace context headers with
# the same result. This can also allow you to translate between context
# propagation mechanisms between different applications.
for ihdr in incoming_headers:
val = request.headers.get(ihdr)
if val is not None:
headers[ihdr] = val
return headers
# The UI:
@app.route('/')
@app.route('/index.html')
def index():
""" Display productpage with normal user and test user buttons"""
global productpage
table = json2html.convert(json=json.dumps(productpage),
table_attributes="class=\"table table-condensed table-bordered table-hover\"")
return render_template('index.html', serviceTable=table)
@app.route('/health')
def health():
return 'Product page is healthy'
@app.route('/login', methods=['POST'])
def login():
user = request.values.get('username')
response = app.make_response(redirect(request.referrer))
session['user'] = user
return response
@app.route('/logout', methods=['GET'])
def logout():
response = app.make_response(redirect(request.referrer))
session.pop('user', None)
return response
# a helper function for asyncio.gather, does not return a value
async def getProductReviewsIgnoreResponse(product_id, headers):
getProductReviews(product_id, headers)
# flood reviews with unnecessary requests to demonstrate Istio rate limiting, asynchoronously
async def floodReviewsAsynchronously(product_id, headers):
# the response is disregarded
await asyncio.gather(*(getProductReviewsIgnoreResponse(product_id, headers) for _ in range(flood_factor)))
# flood reviews with unnecessary requests to demonstrate Istio rate limiting
def floodReviews(product_id, headers):
loop = asyncio.new_event_loop()
loop.run_until_complete(floodReviewsAsynchronously(product_id, headers))
loop.close()
@app.route('/productpage')
@trace()
def front():
product_id = 0 # TODO: replace default value
headers = getForwardHeaders(request)
user = session.get('user', '')
product = getProduct(product_id)
detailsStatus, details = getProductDetails(product_id, headers)
if flood_factor > 0:
floodReviews(product_id, headers)
reviewsStatus, reviews = getProductReviews(product_id, headers)
return render_template(
'productpage.html',
detailsStatus=detailsStatus,
reviewsStatus=reviewsStatus,
product=product,
details=details,
reviews=reviews,
user=user)
# The API:
@app.route('/api/v1/products')
def productsRoute():
return json.dumps(getProducts()), 200, {'Content-Type': 'application/json'}
@app.route('/api/v1/products/<product_id>')
@trace()
def productRoute(product_id):
headers = getForwardHeaders(request)
status, details = getProductDetails(product_id, headers)
return json.dumps(details), status, {'Content-Type': 'application/json'}
@app.route('/api/v1/products/<product_id>/reviews')
@trace()
def reviewsRoute(product_id):
headers = getForwardHeaders(request)
status, reviews = getProductReviews(product_id, headers)
return json.dumps(reviews), status, {'Content-Type': 'application/json'}
@app.route('/api/v1/products/<product_id>/ratings')
@trace()
def ratingsRoute(product_id):
headers = getForwardHeaders(request)
status, ratings = getProductRatings(product_id, headers)
return json.dumps(ratings), status, {'Content-Type': 'application/json'}
# Data providers:
def getProducts():
return [
{
'id': 0,
'title': 'The Comedy of Errors',
'descriptionHtml': '<a href="https://en.wikipedia.org/wiki/The_Comedy_of_Errors">Wikipedia Summary</a>: The Comedy of Errors is one of <b>William Shakespeare\'s</b> early plays. It is his shortest and one of his most farcical comedies, with a major part of the humour coming from slapstick and mistaken identity, in addition to puns and word play.'
}
]
def getProduct(product_id):
products = getProducts()
if product_id + 1 > len(products):
return None
else:
return products[product_id]
def getProductDetails(product_id, headers):
try:
url = details['name'] + "/" + details['endpoint'] + "/" + str(product_id)
res = requests.get(url, headers=headers, timeout=3.0)
except BaseException:
res = None
if res and res.status_code == 200:
return 200, res.json()
else:
status = res.status_code if res is not None and res.status_code else 500
return status, {'error': 'Sorry, product details are currently unavailable for this book.'}
def getProductReviews(product_id, headers):
# Do not remove. Bug introduced explicitly for illustration in fault injection task
# TODO: Figure out how to achieve the same effect using Envoy retries/timeouts
for _ in range(2):
try:
url = reviews['name'] + "/" + reviews['endpoint'] + "/" + str(product_id)
res = requests.get(url, headers=headers, timeout=3.0)
except BaseException:
res = None
if res and res.status_code == 200:
return 200, res.json()
status = res.status_code if res is not None and res.status_code else 500
return status, {'error': 'Sorry, product reviews are currently unavailable for this book.'}
def getProductRatings(product_id, headers):
try:
url = ratings['name'] + "/" + ratings['endpoint'] + "/" + str(product_id)
res = requests.get(url, headers=headers, timeout=3.0)
except BaseException:
res = None
if res and res.status_code == 200:
return 200, res.json()
else:
status = res.status_code if res is not None and res.status_code else 500
return status, {'error': 'Sorry, product ratings are currently unavailable for this book.'}
class Writer(object):
def __init__(self, filename):
self.file = open(filename, 'w')
def write(self, data):
self.file.write(data)
def flush(self):
self.file.flush()
if __name__ == '__main__':
if len(sys.argv) < 2:
logging.error("usage: %s port" % (sys.argv[0]))
sys.exit(-1)
p = int(sys.argv[1])
logging.info("start at port %s" % (p))
# Python does not work on an IPv6 only host
# https://bugs.python.org/issue10414
app.run(host='0.0.0.0', port=p, debug=True, threaded=True)
| apache-2.0 | 8,812,754,312,710,952,000 | 34.255814 | 360 | 0.674208 | false |
closeio/quotequail | quotequail/_patterns.py | 1 | 3164 | # -*- coding: utf-8 -*-
import re
REPLY_PATTERNS = [
u'^On (.*) wrote:$', # apple mail/gmail reply
u'^Am (.*) schrieb (.*):$', # German
u'^Le (.*) a écrit :$', # French
u'El (.*) escribió:$', # Spanish
u'^(.*) написал\(а\):$', # Russian
u'^Den (.*) skrev (.*):$', # Swedish
u'^Em (.*) escreveu:$', # Brazillian portuguese
u'([0-9]{4}/[0-9]{1,2}/[0-9]{1,2}) (.* <.*@.*>)$', # gmail (?) reply
]
REPLY_DATE_SPLIT_REGEX = re.compile(r'^(.*(:[0-9]{2}( [apAP]\.?[mM]\.?)?)), (.*)?$')
FORWARD_MESSAGES = [
# apple mail forward
'Begin forwarded message', 'Anfang der weitergeleiteten E-Mail',
u'Début du message réexpédié', 'Inicio del mensaje reenviado',
# gmail/evolution forward
'Forwarded [mM]essage', 'Mensaje reenviado', 'Vidarebefordrat meddelande',
# outlook
'Original [mM]essage', 'Ursprüngliche Nachricht', 'Mensaje [oO]riginal',
# Thunderbird forward
u'Message transféré',
# mail.ru forward (Russian)
u'Пересылаемое сообщение',
]
# We yield this pattern to simulate Outlook forward styles. It is also used for
# some emails forwarded by Yahoo.
FORWARD_LINE = '________________________________'
FORWARD_PATTERNS = [
'^{}$'.format(FORWARD_LINE),
] + ['^---+ ?%s ?---+$' % p for p in FORWARD_MESSAGES] \
+ ['^%s:$' % p for p in FORWARD_MESSAGES]
FORWARD_STYLES = [
# Outlook
'border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in',
]
HEADER_RE = re.compile(r'\*?([-\w ]+):\*?(.*)$', re.UNICODE)
HEADER_MAP = {
'from': 'from',
'von': 'from',
'de': 'from',
u'от кого': 'from',
u'från': 'from',
'to': 'to',
'an': 'to',
'para': 'to',
u'à': 'to',
u'pour': 'to',
u'кому': 'to',
u'till': 'to',
'cc': 'cc',
'kopie': 'cc',
'kopia': 'cc',
'bcc': 'bcc',
'cco': 'bcc',
'blindkopie': 'bcc',
'reply-to': 'reply-to',
'antwort an': 'reply-to',
u'répondre à': 'reply-to',
'responder a': 'reply-to',
'date': 'date',
'sent': 'date',
'received': 'date',
'datum': 'date',
'gesendet': 'date',
'enviado el': 'date',
'enviados': 'date',
'fecha': 'date',
u'дата': 'date',
'subject': 'subject',
'betreff': 'subject',
'asunto': 'subject',
'objet': 'subject',
'sujet': 'subject',
u'тема': 'subject',
u'ämne': 'subject',
}
COMPILED_PATTERN_MAP = {
'reply': [re.compile(regex) for regex in REPLY_PATTERNS],
'forward': [re.compile(regex) for regex in FORWARD_PATTERNS],
}
COMPILED_PATTERNS = sum(COMPILED_PATTERN_MAP.values(), [])
MULTIPLE_WHITESPACE_RE = re.compile('\s+')
# Amount to lines to join to check for potential wrapped patterns in plain text
# messages.
MAX_WRAP_LINES = 2
# minimum number of headers that we recognize
MIN_HEADER_LINES = 2
# minimum number of lines to recognize a quoted block
MIN_QUOTED_LINES = 3
# Characters at the end of line where we join lines without adding a space.
# For example, "John <\njohn@example>" becomes "John <john@example>", but
# "John\nDoe" becomes "John Doe".
STRIP_SPACE_CHARS = '<([{"\''
| mit | 1,889,049,090,240,398,000 | 24.434426 | 84 | 0.565904 | false |
schesis/csx | csx.py | 1 | 37312 | #!/usr/bin/env python3
# csx.py - Extended Cascading Stylesheets.
# Copyright (C) 2009 Zero Piraeus
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""csx - Create, manipulate and convert CSX stylesheets.
CSX is a style language based on `CSS 2.1`_, with two improvements:
1. Rules may be nested inside other rules.
2. There are no at-rules (like @media or @import).
CSX is also somewhat stricter than CSS (for example, you can't split a
quoted string over more than one line, and comments must be terminated).
See the file `README.txt` provided with this module for a complete description
of the differences between CSS and CSX.
.. _CSS 2.1: http://www.w3.org/TR/CSS21
"""
import collections
import copy
import re
__author__ = "Zero Piraeus <[email protected]>"
__version__ = "1.0.0"
DEFAULT_STYLE = "pretty"
STYLES = ("bare", "compact", "pretty")
TAB = " "
WIDTH = 80
class Error(Exception):
"""An error which may usefully be displayed by the csx utility."""
# Allows incorporation of filename and line no. for output to end-users
# by the csx utility - see doc/lib/error.txt for details.
def __init__(self, msg:str="", line:int=0, source:str=""):
self.msg = msg
self.line = line
self.source = source
def __str__(self):
if self.source == "-":
source = "<stdin>"
elif self.source:
source = repr(self.source)
else:
source = ""
if self.line:
line = "line {0}".format(self.line)
else:
line = ""
location = ", ".join(x for x in (source, line) if x)
return ": ".join(x for x in (location, self.msg) if x)
class _Comparable(object):
"""Base class for objects with sane comparison behaviour."""
# The following methods define comparisons other than `__lt__` in terms of
# `__lt__`, which is not otherwise the case for subclasses of e.g. `set`.
def __ge__(self, other):
less_than = (self < other)
return NotImplemented if less_than is NotImplemented else not less_than
def __gt__(self, other):
return other < self
def __le__(self, other):
more_than = (other < self)
return NotImplemented if more_than is NotImplemented else not more_than
class _Renderable(object):
"""Base class for renderable objects.
Example::
>>> class Chars(_Renderable, list): pass
>>> Chars("squee")
Chars('s,q,u,e,e')
"""
SEP = {s: "," for s in STYLES}
def __repr__(self):
return "{0}({1!r})".format(self.__class__.__name__, str(self))
def __str__(self):
return self.render("compact", level=1) # `level=1` to prevent newlines.
def render(self, style:str=DEFAULT_STYLE, level:int=0) -> str:
"""Return a textual representation of `self`.
Arguments:
:`style`: One of `STYLES`.
:`level`: Indentation level of "pretty" output.
Example::
>>> selector = Selector("ol li a")
>>> print(selector.render())
ol li a
"""
return self.SEP[style].join(self)
class _Renderable_Recursive(_Renderable):
"""Base class for recursively renderable objects."""
def render(self, style:str=DEFAULT_STYLE, level:int=0) -> str:
"""Return a textual representation of `self`.
Arguments:
:`style`: One of `STYLES`.
:`level`: Indentation level of "pretty" output.
Example::
>>> ds = Declarations("color: red; font-family: serif")
>>> print(ds.render())
color: red;
font-family: serif;
"""
# `SEP` is defined by the subclass.
return self.SEP[style].join(x.render(style, level) for x in self)
class Declaration(_Renderable, tuple):
"""A CSX declaration; e.g. "color: red".
Examples::
>>> d1 = Declaration("color: red")
>>> d2 = Declaration(["color", "red"])
>>> assert d1 == d2
"""
SEP = {
"bare": ":",
"compact": ": ",
"pretty": ": ",
}
def __new__(cls, arg:collections.Sequence):
if isinstance(arg, str):
try:
key, value = Text(arg).iterate_as(cls)
except ValueError as cause:
raise Error("invalid declaration {0!r}".format(arg)) from cause
else:
return tuple.__new__(cls, (key.lower(), value))
else:
return tuple.__new__(cls, arg)
def render(self, style:str=DEFAULT_STYLE, level:int=0) -> str:
"""Return a textual representation of `self`.
Arguments:
:`style`: One of `STYLES`.
:`level`: Indentation level of "pretty" output.
Example::
>>> declaration = Declaration("color: red")
>>> print(declaration.render())
color: red;
"""
if style == "pretty":
return (TAB * level) + super().render(style, level) + ";"
else:
return super().render(style, level)
class Declarations(_Renderable_Recursive, _Comparable, dict):
"""A CSX declaration block; e.g. "color: red; font-family: serif".
Examples::
>>> ds1 = Declarations("color: red; font-family: serif")
>>> ds2 = Declarations({
... "color": "red",
... "font-family": "serif",
... })
>>> ds3 = Declarations([
... Declaration("color: red"),
... Declaration("font-family: serif"),
... ])
Underscores are replaced by hyphens in keyword arguments::
>>> ds4 = Declarations(color="red", font_family="serif")
>>> assert ds1 == ds2 == ds3 == ds4
Iterating over a `Declarations` instance returns `Declaration`
instances, in order::
>>> [d for d in ds1]
[Declaration('color: red'), Declaration('font-family: serif')]
Unlike `dict`, `Declarations` is orderable::
>>> ds5 = Declarations("color: blue; font-family: serif")
>>> assert ds1 > ds5
"""
SEP = {
"bare": ";",
"compact": "; ",
"pretty": "\n",
}
def __init__(self, arg:collections.Iterable=(), **kwargs):
if isinstance(arg, str):
arg = (Declaration(s) for s in Text(arg).iterate_as(Declarations))
elif isinstance(arg, collections.Mapping):
arg = arg.items()
kwargs = {k.replace("_", "-"): v for k, v in kwargs.items()}
self.update(dict(arg, **kwargs))
def __iter__(self):
return iter(sorted(self.items()))
def __lt__(self, other):
return sorted(self.items()) < sorted(other.items())
def items(self) -> set:
"""Extend `dict.items` to return a set of `Declaration` instances.
Example::
>>> ds = Declarations("color: red; font-family: serif")
>>> d1 = Declaration("color: red")
>>> d2 = Declaration("font-family: serif")
>>> assert ds.items() == {d1, d2}
"""
return {Declaration(x) for x in super().items()}
class Rule(_Renderable, _Comparable):
"""A CSX rule; e.g. "dt, dd { color: red; a { color: blue } }".
Attributes:
:`selectors`: A `Selectors` instance.
:`declarations`: A `Declarations` instance.
:`rules`: A `Rules` instance.
Examples::
>>> r1 = Rule("dt, dd { color: red; a { color: blue } }")
>>> ss = Selectors("dt, dd")
>>> ds = Declarations("color: red")
>>> rs = Rules("a { color: blue }")
>>> r2 = Rule(selectors=ss, declarations=ds, rules=rs)
>>> assert r1 == r2
If declarations specified in string and keyword arguments conflict,
the string argument takes precedence::
>>> ds = Declarations("background: yellow; color: red")
>>> Rule("dl { background: aqua }", declarations=ds)
Rule('dl { background: aqua; color: red }')
"""
SEP = {
"bare": ";",
"compact": "; ",
"pretty": "\n",
}
FORMAT = {
"bare": "{selectors}{{{block}}}",
"compact": "{selectors} {{ {block} }}",
"pretty": "{selectors} {{\n{block}\n{indent}}}",
}
def __init__(self, text:str="", **kwargs):
self.selectors = kwargs.pop("selectors", Selectors())
self.declarations = kwargs.pop("declarations", Declarations())
self.rules = kwargs.pop("rules", Rules())
if kwargs:
invalid = kwargs.keys().pop()
raise TypeError("invalid keyword argument {0!r}".format(invalid))
text_rules = Text(text).extract()
if len(text_rules) > 1:
raise ValueError("string argument describes multiple rules")
elif text_rules:
rule = text_rules.pop()
self.selectors.update(rule.selectors)
self.declarations.update(rule.declarations)
self.rules.extend(rule.rules)
def __eq__(self, other):
return (
(self.selectors == other.selectors)
and (self.declarations == other.declarations)
and (self.rules == other.rules)
)
def __lt__(self, other):
if self.selectors != other.selectors:
return self.selectors < other.selectors
elif self.declarations != other.declarations:
return self.declarations < other.declarations
elif self.rules != other.rules:
return self.rules < other.rules
else:
return False
def collapse(self):
"""Remove redundant nesting from `self`.
Example::
>>> rule = Rule("ol { li { a { color: blue } } }")
>>> rule.collapse()
>>> print(rule.render())
ol li a {
color: blue;
}
"""
while (
(not self.declarations)
and (len(self.selectors) == 1)
and (len(self.rules) == 1)
and (len(self.rules[0].selectors) == 1)
):
child = self.rules.pop()
self.selectors = child.selectors.prefix(self.selectors)
self.declarations = child.declarations
self.rules = child.rules
for rule in self.rules:
rule.collapse()
def render(self, style:str=DEFAULT_STYLE, level:int=0) -> str:
"""Return a textual representation of `self`.
Arguments:
:`style`: One of `STYLES`.
:`level`: Indentation level of "pretty" output.
Example::
>>> rule = Rule("dt, dd { color: red; a { color: blue } }")
>>> print(rule.render())
dt, dd {
color: red;
a {
color: blue;
}
}
"""
selectors = self.selectors.render(style, level)
if self.declarations and self.rules:
block = self.SEP[style].join((
self.declarations.render(style, level + 1),
self.rules.render(style, level + 1),
))
elif self.declarations:
block = self.declarations.render(style, level + 1)
elif self.rules:
block = self.rules.render(style, level + 1)
else:
block = ""
indent = TAB * level
return self.FORMAT[style].format(**locals())
class Rules(_Renderable_Recursive, list):
"""A CSX rule block; e.g. "ol { background: aqua }; li { color: red }".
Examples::
>>> rs1 = Rules('''
... ol { background: aqua }
... li { color: red }
... ''')
>>> rs2 = Rules([
... Rule("ol { background: aqua }"),
... Rule("li { color: red }"),
... ])
>>> assert rs1 == rs2
"""
SEP = {
"bare": "",
"compact": "\n",
"pretty": "\n\n",
}
# Nested rules are delimited differently to top-level rules - see
# `Rules.render`.
ALT_SEP = {
"bare": ";",
"compact": "; ",
"pretty": "\n",
}
def __init__(self, arg:collections.Iterable=()):
if isinstance(arg, str):
self[:] = Text(arg).extract()
else:
self[:] = arg
def _graft(self, tree:"Rules") -> list:
"""Merge `tree` into `self` and return a list of leaf rules.
Arguments:
:`tree`: As returned by `Selectors._sprout`.
Example::
>>> rules = Rules('''
... dl, ol, ul { margin: 1em; padding: 1em }
... dl { background: yellow }
... ol, ul { background: aqua }
... ''')
>>> tree = Selectors("dl dt, dl dd, ol li, ul li")._sprout()
>>> leaves = rules._graft(tree)
>>> sorted(leaves)
[Rule('dt, dd { }'), Rule('li { }')]
>>> print(rules.render("compact"))
dl, ol, ul { margin: 1em; padding: 1em }
dl { background: yellow; dt, dd { } }
ol, ul { background: aqua; li { } }
"""
# Used by `_merge_optimized` - somewhat voodoo, but the examples above
# should make it clear *what* is happening, if not *how*. Essentially
# we search for nodes in `self` that, in traditional CSS, would
# have the same selectors as those in `tree`, so we can insert
# declarations from `tree` at those nodes as appropriate. This is
# tricky stuff, though, and can only really be properly understood by
# reading `_merge_optimized` as well. Action by side-effect is not
# ideal, admittedly, but I can't see an easier way to do it.
leaves = []
target_selectors = [r.selectors for r in self]
for rule in tree:
if rule.selectors in target_selectors:
target = self[target_selectors.index(rule.selectors)]
else:
self.append(rule)
target = rule
if ("$", "leaf") in rule.declarations.items():
leaves.append(target)
del rule.declarations["$"]
if rule.rules:
leaves += target.rules._graft(rule.rules)
return leaves
def _merge_normalized(self, rule:Rule, _prefixes:set=set()):
"""Merge a rule into `self` for each declaration in `rule`.
Arguments:
:`rule`: Rule to be merged into `self`.
:`_prefixes`: Used internally - ***DO NOT SET***.
Example::
>>> normalized = Rules('''
... dl { background: yellow }
... dl { margin: 1em }
... dl { padding: 1em }
... dl dt, dl dd { color: red }
... ''')
>>> rule = Rule('''
... ol, ul {
... background: aqua;
... margin: 1em;
... padding: 1em;
... li { color: red }
... }
... ''')
>>> normalized._merge_normalized(rule)
>>> print(normalized.render("compact"))
dl { background: yellow }
dl, ol, ul { margin: 1em }
dl, ol, ul { padding: 1em }
dl dt, dl dd, ol li, ul li { color: red }
ol, ul { background: aqua }
"""
# Used by `Rules.optimize`.
selectors = rule.selectors.prefix(_prefixes)
for dec in (Declarations({d}) for d in rule.declarations):
for candidate in self:
if dec == candidate.declarations:
candidate.selectors.update(selectors)
break
else:
self.append(Rule(selectors=selectors.copy(), declarations=dec))
for subrule in rule.rules:
self._merge_normalized(subrule, selectors)
def _merge_optimized(self, rule:Rule):
"""Merge an optimized rule tree based on `rule` into `self`.
Arguments:
:`rule`: Rule to be merged into `self`.
Example::
>>> optimized = Rules('''
... dl { background: yellow }
... ol, ul { background: aqua }
... ''')
>>> rule = Rule("dl dt, dl dd, ol li, ul li { color: red }")
>>> optimized._merge_optimized(rule)
>>> print(optimized.render("compact"))
dl { background: yellow; dt, dd { color: red } }
ol, ul { background: aqua; li { color: red } }
"""
# Used by `Rules.optimize`.
for leaf in self._graft(rule.selectors._sprout()):
leaf.declarations.update(rule.declarations)
for subrule in rule.rules:
leaf.rules._merge_optimized(subrule)
def flatten(self, _prefixes:set=set()):
"""Convert `self` to un-nested rules.
Arguments:
:`_prefixes`: Used internally - ***DO NOT SET***.
Example::
>>> rules = Rules('''
...
... dl, ol, ul {
... margin: 1em;
... padding: 1em;
... }
...
... ol, ul {
... background: aqua;
... li { color: red }
... }
...
... dl {
... background: yellow;
... dt, dd { color: red }
... }
...
... ''')
>>> rules.flatten()
>>> print(rules.render("compact"))
dl, ol, ul { margin: 1em; padding: 1em }
ol, ul { background: aqua }
ol li, ul li { color: red }
dl { background: yellow }
dl dt, dl dd { color: red }
"""
flattened = []
for rule in self:
selectors = rule.selectors.prefix(_prefixes)
if rule.declarations:
new = Rule(selectors=selectors, declarations=rule.declarations)
flattened.append(new)
rule.rules.flatten(selectors)
flattened += rule.rules
self[:] = flattened
def optimize(self, collapse:bool=True, normalize:bool=False):
"""Optimize `self`.
Arguments:
:`collapse`: If True, collapse redundant nesting.
:`normalize`: If True, normalize `self` before optimizing
(dramatically alters rule order).
Example::
>>> rules = Rules('''
... dl { background: yellow }
... dl, ol, ul { margin: 1em }
... dl, ol, ul { padding: 1em }
... dl dt, dl dd, ol li, ul li { color: red }
... ol, ul { background: aqua }
... ''')
>>> rules.optimize()
>>> print(rules.render("compact"))
dl, ol, ul { margin: 1em; padding: 1em }
dl { background: yellow; dt, dd { color: red } }
ol, ul { background: aqua; li { color: red } }
"""
if normalize:
normalized = Rules()
for rule in self:
normalized._merge_normalized(rule)
rules = normalized
else:
rules = self
optimized = Rules()
for rule in rules:
optimized._merge_optimized(rule)
if collapse:
for rule in optimized:
rule.collapse()
optimized.sort()
self[:] = optimized
def render(self, style:str=DEFAULT_STYLE, level:int=0) -> str:
"""Return a textual representation of `self`.
Arguments:
:`style`: One of `STYLES`.
:`level`: Indentation level of "pretty" output.
Example::
>>> rules = Rules('''
...
... dl, ol, ul {
... margin: 1em;
... padding: 1em;
... }
...
... ol, ul {
... background: aqua;
... li { color: red }
... }
...
... dl {
... background: yellow;
... dt, dd { color: red }
... }
...
... ''')
>>> print(rules.render("compact"))
dl, ol, ul { margin: 1em; padding: 1em }
ol, ul { background: aqua; li { color: red } }
dl { background: yellow; dt, dd { color: red } }
"""
if (level == 0) or (self.SEP is self.ALT_SEP):
return super().render(style, level)
else:
self.SEP, cache = self.ALT_SEP, self.SEP
try:
return super().render(style, level)
finally:
self.SEP = cache
def sort(self, key=None, reverse=False):
"""Extend `list.sort` to recursively sort `self`.
Example::
>>> rules = Rules('''
... ol li, ul li { a { color: blue }; b { color: black } }
... dl dt, dl dd { b { color: black }; a { color: blue } }
... ''')
>>> rules.sort()
>>> print(rules.render("compact"))
dl dt, dl dd { a { color: blue }; b { color: black } }
ol li, ul li { a { color: blue }; b { color: black } }
"""
for rule in self:
rule.rules.sort(key, reverse)
super().sort(key=key, reverse=reverse)
class Selector(_Renderable, _Comparable, tuple):
"""A CSX selector; e.g. "ol > li a[lang|='en']".
Example::
>>> Selector("ol > li a[lang|='en']")
Selector("ol > li a[lang|='en']")
"""
# Match and retain:
# > or + (possibly followed by whitespace), followed by any sequence
# of characters except whitespace, > or +
# Match and discard:
# whitespace (where not part of a match as detailed above)
ITER = re.compile("([>+]\s*[^\s>+]+)|\s+")
SEP = {s: " " for s in STYLES}
# Both `SORT_ATTRIBUTES` and `SORT_ELEMENTS` are used to create
# substitutions so that lexicographical sorting produces output in a more
# useful order; i.e `:link` before `:visited`, `dt` before `dd` etc.
SORT_ATTRIBUTES = tuple((x, "~{0:02d} ".format(n)) for n, x in enumerate((
":link",
":visited",
":focus",
":hover",
":active",
":", "[", ".", "#",
)))
# Not a dict, because order is important.
SORT_ELEMENTS = (
("*", " *"),
("html", " * ~"),
("body", " * ~~"),
("dd", "dt ~"),
("input", "label ~"),
("td", "th ~"),
("tfoot", "thead ~"),
("tbody", "thead ~~"),
)
__SORT_ELEMENT_KEYS = tuple(e[0] for e in SORT_ELEMENTS) # for speed
__lt_cache = {}
def __new__(cls, arg:collections.Iterable):
if isinstance(arg, str):
self = tuple.__new__(cls, Text(arg).iterate_as(Selector))
else:
self = tuple.__new__(cls, arg)
return self
def __lt__(self, other):
# This comparison is expensive and frequently performed, so we
# optimize it by caching the result.
try:
return self.__lt_cache[(self, other)]
except KeyError:
for self_item, other_item in zip(self._sort_key, other._sort_key):
if self_item != other_item:
result = self_item < other_item
break
else:
result = len(self) < len(other)
self.__lt_cache[(self, other)] = result
return result
@property
def _sort_key(self) -> collections.Iterator:
"""An ordering key for `self`.
Example::
>>> selector = Selector("body dl[lang='en'] dt + dd a:link")
>>> list(selector._sort_key)
[' * ~~', "dl~06 lang='en']", 'dt', ' dt ~', 'a~00 ']
"""
# See `SORT_ATTRIBUTES`, `SORT_ELEMENTS` to better understand what's
# going on above.
for item in self:
# Sibling and child combinators sort before type selectors.
if item[0] == "+":
item = " " + item[1:].lstrip()
if item[0] == ">":
item = " " + item[1:].lstrip()
# Bare class, id etc. selectors sort after type selectors.
for target, replacement in self.SORT_ATTRIBUTES:
item = item.replace(target, replacement)
# Conceptually related type selectors sort in the "right" order.
if item.lstrip().startswith(self.__SORT_ELEMENT_KEYS):
for target, replacement in self.SORT_ELEMENTS:
if item.lstrip().startswith(target):
item = item.replace(target, replacement, 1)
break
yield item
class Selectors(_Renderable_Recursive, _Comparable, set):
"""A CSX selector block; e.g. "dl dd, dl dt, ol li, ul li".
Example::
>>> Selectors("dl dd, dl dt, ol li, ul li")
Selectors('dl dt, dl dd, ol li, ul li')
"""
SEP = {
"bare": ",",
"compact": ", ",
"pretty": ", ",
}
def __init__(self, arg:collections.Iterable=()):
if isinstance(arg, str):
arg = Text(arg).iterate_as(Selectors)
set.__init__(self, (Selector(x) for x in arg))
def __iter__(self):
return iter(sorted(set(self)))
def __lt__(self, other):
# This comparison is expensive and frequently performed, so we
# optimize it by caching the result.
for self_item, other_item in zip(self, other):
if self_item != other_item:
return self_item < other_item
else: # indentation is correct! This is the else of the for loop.
return len(self) > len(other)
def _sprout(self, _prefix:collections.Iterable=()) -> {Rule, Rules}:
"""Return a graftable tree of optimized rules for `self`.
Arguments:
:`_prefix`: Used internally - ***DO NOT SET***.
Example::
>>> example = Selectors("ul, ol, ul ul, ol ol, ol ul, ul ol")
>>> print(example._sprout().render())
ol, ul {
$: leaf;
ol, ul {
$: leaf;
}
}
"""
# Used by `Rules._graft` - turns a selector block into an
# optimally-nested `Rules` object with place-markers for where any
# declarations for those selectors should go. This was murder to write,
# but I *think* it now works in all cases. No line-by-line comments
# because they just padded out the code and made it even harder to
# understand - you just have to read it very very carefully.
tree = Rule(selectors=Selectors({_prefix}))
for node in {s[:1] for s in self}:
if node:
branches = {s[1:] for s in self if s[:1] == node}
rule = Selectors(branches)._sprout(node)
rule_spec = (rule.rules, rule.declarations)
tree_specs = [(r.rules, r.declarations) for r in tree.rules]
if rule_spec in tree_specs:
target = tree.rules[tree_specs.index(rule_spec)]
target.selectors.add(Selector(node))
else:
tree.rules.append(rule)
else:
tree.declarations["$"] = "leaf"
if _prefix:
return tree
else:
tree.rules.sort()
return tree.rules
def copy(self) -> "Selectors":
"""Extend `set.copy` to return a `Selectors` instance."""
# set.copy() returns a set, which isn't what we want.
return copy.copy(self)
def prefix(self, prefixes:set) -> "Selectors":
"""Prefix `self` with `prefixes`.
Arguments:
:`prefixes`: Selectors with which to prefix `self`.
Example::
>>> prefixed = Selectors("b, i").prefix(Selectors("p, a"))
>>> Selectors(prefixed)
Selectors('a b, a i, p b, p i')
"""
if prefixes:
return Selectors({Selector(p + s) for p in prefixes for s in self})
else:
return self.copy()
def render(self, style:str=DEFAULT_STYLE, level:int=0) -> str:
"""Return a textual representation of `self`.
Arguments:
:`style`: One of `STYLES`.
:`level`: Indentation level of "pretty" output.
Example::
>>> selectors = Selectors("ol li, ul li, dl dd, dl dt")
>>> print(selectors.render("compact"))
dl dt, dl dd, ol li, ul li
>>> table = Selectors('''
...
... table tbody tr td, table tbody tr th,
... table tfoot tr td, table tfoot tr th,
... table thead tr td, table thead tr th,
...
... ''')
>>> print(table.render(level=1))
table thead tr th,
table thead tr td,
table tfoot tr th,
table tfoot tr td,
table tbody tr th,
table tbody tr td
"""
if style == "pretty":
indent = TAB * level
result = indent + super().render(style, level)
if len(result) <= WIDTH:
return result
else:
cache = self.SEP["pretty"]
try:
self.SEP["pretty"] = ",\n" + indent
return indent + super().render(style, level)
finally:
self.SEP["pretty"] = cache
else:
return super().render(style, level)
class Text(str):
r"""An escaped string.
Attributes:
:`subs`: A list of quoted substrings extracted from input.
Instantiated by passing a string to the constructor.
Quoted substrings are replaced by `str.format` replacement fields, and
appended to the `subs` attribute. They must not contain newlines,
unless escaped with a backslash. CSS comments are removed.
`str(instance_of_Text)` reconstructs the original string,
removing extraneous whitespace.
Examples::
>>> aymara_quechua = Text('''
...
... /* indigenous Altiplano languages */
...
... p[lang|='ay'],
... p[lang|='qu'] {
...
... ''')
>>> str(aymara_quechua)
"p[lang|='ay'], p[lang|='qu'] {"
>>> aymara_quechua[:]
'\n\n\n\n p[lang|={0}],\n p[lang|={1}] {{\n\n'
>>> aymara_quechua.subs
["'ay'", "'qu'"]
Instantiated by:
- `Declaration.__new__`
- `Declarations.__init__`
- `Rule.__init__`
- `Rules.__init__`
- `Selector.__new__`
- `Selectors.__init__`
"""
# Match any of ...
#
# - any seqence of characters except semicolons, or braces that aren't part
# of "{int}"-style str.format replacement fields, followed by "{{"
#
# - a semicolon
#
# - "}}", unless followed by an odd number of closing braces (to avoid
# catching the ends of replacement fields)
#
# - the start of the string
#
# ... followed by any all-whitespace sequence.
ITER = re.compile("((?:(?:{[0-9]+}|[^;{}])*{{|;|}}(?!}(?:}})*[^}])|^)\s*)")
# Match:
# - a single or double quotation mark, unless escaped with a backslash
# - any of "/*", "*/", "{", "}", "@", "<!--" or "-->"
STRIP = re.compile(r"""((?<!\\)['"]|/\*|\*/|[{}@]|<!--|-->)""")
def __new__(cls, text:str):
# Lots of work to make sure that comment markers are only treated as
# such outside strings, quotes are only treated as string delimiters
# outside comments, etc. It would probably have made more sense to use
# a parser and a grammar, but hell, it's written now and it works.
escaped = ""
subs = []
delimiter = None
for position, line in enumerate(text.split("\n"), start=1):
for item in re.split(cls.STRIP, line):
if delimiter:
if delimiter in {'"', "'"}:
subs[-1] += item
if item == delimiter:
delimiter = None
elif item in {'{', "}"}:
escaped += item * 2
elif item == "/*":
delimiter = "*/"
elif item in {'"', "'"}:
delimiter = item
escaped += "{{{0}}}".format(len(subs))
subs.append(item)
elif item in {"@", "<!--", "-->"}:
raise Error("syntax error: {0!r}".format(item), position)
else:
escaped += item
if delimiter in {"'", '"'}:
raise Error("unfinished string", position)
escaped += "\n"
if delimiter:
raise Error("unfinished comment", position)
self = str.__new__(cls, escaped[:-1])
self.subs = subs
return self
def __iter__(self):
position = 1
for raw_token in re.split(self.ITER, self):
if raw_token.strip() not in {"", ";"}:
token = " ".join(raw_token.split()).format(*self.subs)
try:
yield token
except Exception:
# Spit out the line no. if caller throws something at us.
yield position
return
position += raw_token.count("\n")
def __repr__(self):
return "{0}({1!r})".format(self.__class__.__name__, str(self))
def __str__(self):
return " ".join(self.split()).format(*self.subs)
def extract(self, _stream:collections.Iterable=()) -> {Rules, tuple}:
rules = Rules()
if _stream:
# Recursive call, then.
stream = _stream
declarations = Declarations()
else:
stream = iter(self)
for token in stream:
try:
if token == "}":
break
elif token.endswith("{"):
rule = Rule(selectors=Selectors(token.rstrip(" {")))
rule.declarations, rule.rules = self.extract(stream)
rules.append(rule)
elif _stream:
declarations.update({Declaration(token)})
else:
try:
Declaration(token)
except Exception:
raise Error("syntax error: {0!r}".format(token))
else:
raise Error("declaration outside rule")
except Error as exc:
exc.line = stream.throw(exc) # get the line no.
raise
else:
if _stream:
raise Error("unfinished rule")
else:
return rules
if _stream:
return declarations, rules
else:
raise Error("unmatched '}", stream.throw(Error)) # get the line no.
def iterate_as(self, cls) -> collections.Iterator:
"""Iterate over tokens in `self` as defined by `cls`.
Arguments:
:`cls`: A subclass of `csx._Renderable`.
Examples::
>>> for cls, text in [
... (Declaration, "color: red"),
... (Declarations, "color: red; font-family: serif"),
... (Selector, "ol li a"),
... (Selectors, "dl dd a, dl dt a, ol li a, ul li a"),
... ]:
... list(Text(text).iterate_as(cls))
['color', 'red']
['color: red', 'font-family: serif']
['ol', 'li', 'a']
['dl dd a', 'dl dt a', 'ol li a', 'ul li a']
"""
if cls == Selector:
# Special-case Selector to deal with child/sibling combinators, and
# take the opportunity to normalize case while we're at it.
tokens = (x.lower() for x in re.split(cls.ITER, self) if x)
else:
tokens = (x for x in self.split(cls.SEP["bare"]) if x.strip())
for raw_token in tokens:
yield " ".join(raw_token.split()).format(*self.subs)
if __name__ == "__main__":
# Exit with an error message if we're run as a script.
import os, sys
if "csx" in os.listdir(os.curdir):
SCRIPT = "./csx"
else:
SCRIPT = "csx"
sys.exit("Try:\n{0} --help".format(SCRIPT))
| gpl-3.0 | -684,098,559,107,336,600 | 30.380992 | 79 | 0.49598 | false |
zalando/patroni | tests/test_aws.py | 1 | 2023 | import boto.ec2
import sys
import unittest
import urllib3
from mock import Mock, patch
from collections import namedtuple
from patroni.scripts.aws import AWSConnection, main as _main
class MockEc2Connection(object):
@staticmethod
def get_all_volumes(*args, **kwargs):
oid = namedtuple('Volume', 'id')
return [oid(id='a'), oid(id='b')]
@staticmethod
def create_tags(objects, *args, **kwargs):
if len(objects) == 0:
raise boto.exception.BotoServerError(503, 'Service Unavailable', 'Request limit exceeded')
return True
@patch('boto.ec2.connect_to_region', Mock(return_value=MockEc2Connection()))
class TestAWSConnection(unittest.TestCase):
@patch('patroni.scripts.aws.requests_get', Mock(return_value=urllib3.HTTPResponse(
status=200, body=b'{"instanceId": "012345", "region": "eu-west-1"}')))
def setUp(self):
self.conn = AWSConnection('test')
def test_on_role_change(self):
self.assertTrue(self.conn.on_role_change('master'))
with patch.object(MockEc2Connection, 'get_all_volumes', Mock(return_value=[])):
self.conn._retry.max_tries = 1
self.assertFalse(self.conn.on_role_change('master'))
@patch('patroni.scripts.aws.requests_get', Mock(side_effect=Exception('foo')))
def test_non_aws(self):
conn = AWSConnection('test')
self.assertFalse(conn.on_role_change("master"))
@patch('patroni.scripts.aws.requests_get', Mock(return_value=urllib3.HTTPResponse(status=200, body=b'foo')))
def test_aws_bizare_response(self):
conn = AWSConnection('test')
self.assertFalse(conn.aws_available())
@patch('patroni.scripts.aws.requests_get', Mock(return_value=urllib3.HTTPResponse(
status=200, body=b'{"instanceId": "012345", "region": "eu-west-1"}')))
@patch('sys.exit', Mock())
def test_main(self):
self.assertIsNone(_main())
sys.argv = ['aws.py', 'on_start', 'replica', 'foo']
self.assertIsNone(_main())
| mit | -7,008,248,690,051,154,000 | 35.781818 | 112 | 0.659911 | false |
alex/jwcrypto | jwcrypto/jwk.py | 1 | 10325 | # Copyright (C) 2015 JWCrypto Project Contributors - see LICENSE file
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import ec
from jwcrypto.common import base64url_decode
import json
# draft-ietf-jose-json-web-algorithms-24 - 7.4
JWKTypesRegistry = {'EC': 'Elliptic Curve',
'RSA': 'RSA',
'oct': 'Octet sequence'}
# draft-ietf-jose-json-web-algorithms-24 - 7.5
# It is part of the JWK Parameters Registry, but we want a more
# specific map for internal usage
JWKValuesRegistry = {'EC': {'crv': ('Curve', 'Public'),
'x': ('X Coordinate', 'Public'),
'y': ('Y Coordinate', 'Public'),
'd': ('ECC Private Key', 'Private')},
'RSA': {'n': ('Modulus', 'Public'),
'e': ('Exponent', 'Public'),
'd': ('Private Exponent', 'Private'),
'p': ('First Prime Factor', 'Private'),
'q': ('Second Prime Factor', 'Private'),
'dp': ('First Factor CRT Exponent', 'Private'),
'dq': ('Second Factor CRT Exponent', 'Private'),
'qi': ('First CRT Coefficient', 'Private')},
'oct': {'k': ('Key Value', 'Private')}}
JWKParamsRegistry = {'kty': ('Key Type', 'Public', ),
'use': ('Public Key Use', 'Public'),
'key_ops': ('Key Operations', 'Public'),
'alg': ('Algorithm', 'Public'),
'kid': ('Key ID', 'Public'),
'x5u': ('X.509 URL', 'Public'),
'x5c': ('X.509 Certificate Chain', 'Public'),
'x5t': ('X.509 Certificate SHA-1 Thumbprint', 'Public'),
'x5t#S256': ('X.509 Certificate SHA-256 Thumbprint',
'Public')}
# draft-ietf-jose-json-web-algorithms-24 - 7.6
JWKEllipticCurveRegistry = {'P-256': 'P-256 curve',
'P-384': 'P-384 curve',
'P-521': 'P-521 curve'}
# draft-ietf-jose-json-web-key-41 - 8.2
JWKUseRegistry = {'sig': 'Digital Signature or MAC',
'enc': 'Encryption'}
# draft-ietf-jose-json-web-key-41 - 8.2
JWKOperationsRegistry = {'sign': 'Compute digital Signature or MAC',
'verify': 'Verify digital signature or MAC',
'encrypt': 'Encrypt content',
'decrypt': 'Decrypt content and validate'
' decryption, if applicable',
'wrapKey': 'Encrypt key',
'unwrapKey': 'Decrypt key and validate'
' decryption, if applicable',
'deriveKey': 'Derive key',
'deriveBits': 'Derive bits not to be used as a key'}
class InvalidJWKType(Exception):
def __init__(self, value=None):
super(InvalidJWKType, self).__init__()
self.value = value
def __str__(self):
return 'Unknown type "%s", valid types are: %s' % (
self.value, JWKTypesRegistry.keys())
class InvalidJWKUsage(Exception):
def __init__(self, use, value):
super(InvalidJWKUsage, self).__init__()
self.value = value
self.use = use
def __str__(self):
if self.use in JWKUseRegistry.keys():
usage = JWKUseRegistry[self.use]
else:
usage = 'Unknown(%s)' % self.use
if self.value in JWKUseRegistry.keys():
valid = JWKUseRegistry[self.value]
else:
valid = 'Unknown(%s)' % self.value
return 'Invalid usage requested: "%s". Valid for: "%s"' % (usage,
valid)
class InvalidJWKOperation(Exception):
def __init__(self, operation, values):
super(InvalidJWKOperation, self).__init__()
self.op = operation
self.values = values
def __str__(self):
if self.op in JWKOperationsRegistry.keys():
op = JWKOperationsRegistry[self.op]
else:
op = 'Unknown(%s)' % self.op
valid = list()
for v in self.values:
if v in JWKOperationsRegistry.keys():
valid.append(JWKOperationsRegistry[v])
else:
valid.append('Unknown(%s)' % v)
return 'Invalid operation requested: "%s". Valid for: "%s"' % (op,
valid)
class InvalidJWKValue(Exception):
pass
class JWK(object):
def __init__(self, **kwargs):
names = kwargs.keys()
self._params = dict()
for name in JWKParamsRegistry.keys():
if name in kwargs:
self._params[name] = kwargs[name]
while name in names:
names.remove(name)
kty = self._params.get('kty', None)
if kty not in JWKTypesRegistry:
raise InvalidJWKType(kty)
self._key = dict()
for name in JWKValuesRegistry[kty].keys():
if name in kwargs:
self._key[name] = kwargs[name]
while name in names:
names.remove(name)
if len(names) != 0:
raise InvalidJWKValue('Unknown key parameters: %s' % names)
if len(self._key) == 0:
raise InvalidJWKValue('No Key Values found')
def export(self):
d = dict()
d.update(self._params)
d.update(self._key)
return json.dumps(d)
@property
def key_id(self):
return self._params.get('kid', None)
def get_curve(self, arg):
k = self._key
if self._params['kty'] != 'EC':
raise InvalidJWKType('Not an EC key')
if arg and k['crv'] != arg:
raise InvalidJWKValue('Curve requested is "%s", but '
'key curve is "%s"' % (arg, k['crv']))
if k['crv'] == 'P-256':
return ec.SECP256R1()
elif k['crv'] == 'P-384':
return ec.SECP384R1()
elif k['crv'] == 'P-521':
return ec.SECP521R1()
else:
raise InvalidJWKValue('Unknown Elliptic Curve Type')
def _check_constraints(self, usage, operation):
use = self._params.get('use', None)
if use and use != usage:
raise InvalidJWKUsage(usage, use)
ops = self._params.get('key_ops', None)
if ops:
if not isinstance(ops, list):
ops = [ops]
if operation not in ops:
raise InvalidJWKOperation(operation, ops)
# TODO: check alg ?
def _decode_int(self, n):
return int(base64url_decode(n).encode('hex'), 16)
def _rsa_pub(self, k):
return rsa.RSAPublicNumbers(self._decode_int(k['e']),
self._decode_int(k['n']))
def _rsa_pri(self, k):
return rsa.RSAPrivateNumbers(self._decode_int(k['p']),
self._decode_int(k['q']),
self._decode_int(k['d']),
self._decode_int(k['dp']),
self._decode_int(k['dq']),
self._decode_int(k['qi']),
self._rsa_pub(k))
def _ec_pub(self, k, curve):
return ec.EllipticCurvePublicNumbers(self._decode_int(k['x']),
self._decode_int(k['y']),
self.get_curve(curve))
def _ec_pri(self, k, curve):
return ec.EllipticCurvePrivateNumbers(self._decode_int(k['d']),
self._ec_pub(k, curve))
def sign_key(self, arg=None):
self._check_constraints('sig', 'sign')
if self._params['kty'] == 'oct':
return self._key['k']
elif self._params['kty'] == 'RSA':
return self._rsa_pri(self._key).private_key(default_backend())
elif self._params['kty'] == 'EC':
return self._ec_pri(self._key, arg).private_key(default_backend())
else:
raise NotImplementedError
def verify_key(self, arg=None):
self._check_constraints('sig', 'verify')
if self._params['kty'] == 'oct':
return self._key['k']
elif self._params['kty'] == 'RSA':
return self._rsa_pub(self._key).public_key(default_backend())
elif self._params['kty'] == 'EC':
return self._ec_pub(self._key, arg).public_key(default_backend())
else:
raise NotImplementedError
def encrypt_key(self, arg=None):
self._check_constraints('enc', 'encrypt')
if self._params['kty'] == 'oct':
return self._key['k']
elif self._params['kty'] == 'RSA':
return self._rsa_pub(self._key).public_key(default_backend())
elif self._params['kty'] == 'EC':
return self._ec_pub(self._key, arg).public_key(default_backend())
else:
raise NotImplementedError
def decrypt_key(self, arg=None):
self._check_constraints('enc', 'decrypt')
if self._params['kty'] == 'oct':
return self._key['k']
elif self._params['kty'] == 'RSA':
return self._rsa_pri(self._key).private_key(default_backend())
elif self._params['kty'] == 'EC':
return self._ec_pri(self._key, arg).private_key(default_backend())
else:
raise NotImplementedError
class JWKSet(set):
def add(self, elem):
if not isinstance(elem, JWK):
raise TypeError('Only JWK objects are valid elements')
set.add(self, elem)
def export(self):
keys = list()
for jwk in self:
keys.append(json.loads(jwk.export()))
return json.dumps({'keys': keys})
def get_key(self, kid):
for jwk in self:
if jwk.key_id == kid:
return jwk
return None
| lgpl-3.0 | -4,073,168,078,696,465,000 | 36.40942 | 78 | 0.495787 | false |
DataTorrent/koya | koya-slider-package/package/scripts/kafka.py | 1 | 2964 | import logging
import sys
import os
import inspect
import pprint
import util
from resource_management import *
logger = logging.getLogger()
class Kafka(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
def start(self, env):
import params
env.set_params(params)
self.configure(env)
# log the component configuration
ppp = pprint.PrettyPrinter(indent=4)
logger.info("broker component config: " + ppp.pformat(params.broker_config))
# log the environment variables
logger.info("Env Variables:")
for key in os.environ.keys():
logger.info("%10s %s \n" % (key,os.environ[key]))
pass
# This updating thing is changing files in-place and it really
# should not (static cache)
# For kafka 0.8.1.1, there is no way to set the log dir to location other than params.app_root + "/logs"
if(params.kafka_version.find("0.8.1.1") != -1):
os.symlink(params.app_root + "/logs", params.app_log_dir + "/kafka")
else:
kafkaLogConfig = {"kafka.logs.dir" : params.app_log_dir + "/kafka"}
util.updating(params.app_root + "/config/log4j.properties", kafkaLogConfig)
# File(format("{params.app_root}/conf/log4j.properties"),
# owner=params.app_user,
# content=InlineTemplate(param.log4j_prop))
pass
# update the broker properties for different brokers
server_conf=format("{params.conf_dir}/server.slider.properties")
PropertiesFile(server_conf, properties = params.broker_config, owner=params.app_user)
# execute the process
process_cmd = format("{app_root}/bin/kafka-server-start.sh {server_conf}")
os.environ['LOG_DIR'] = params.app_log_dir + "/kafka"
HEAP_OPT = ""
if params.xmx:
HEAP_OPT = HEAP_OPT + " -Xmx" + params.xmx
pass
if params.xms:
HEAP_OPT = HEAP_OPT + " -Xms" + params.xms
pass
if HEAP_OPT:
os.environ['KAFKA_HEAP_OPTS'] = HEAP_OPT
pass
Execute(process_cmd,
user=params.app_user,
logoutput=True,
wait_for_finish=False,
pid_file=params.pid_file
)
def stop(self, env):
import params
env.set_params(params)
pid = format("`cat {pid_file}` >/dev/null 2>&1")
Execute(format("kill {pid}"),
user=params.app_user
)
Execute(format("kill -9 {pid}"),
ignore_failures=True,
user=params.app_user
)
Execute(format("rm -f {pid_file}"),
user=params.app_user)
def status(self, env):
import status_params
env.set_params(status_params)
## jps_cmd = format("{java64_home}/bin/jps")
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
## cmd = format("echo `{jps_cmd} | grep Kafka | cut -d' ' -f1` > {pid_file}")
## Execute(cmd, not_if=no_op_test)
check_process_status(status_params.pid_file)
if __name__ == "__main__":
Kafka().execute()
| apache-2.0 | -186,110,986,597,655,680 | 29.875 | 108 | 0.634615 | false |
nerdvegas/rez | src/rez/utils/sourcecode.py | 1 | 11114 | from rez.utils.formatting import indent
from rez.utils.data_utils import cached_property
from rez.utils.logging_ import print_debug
from rez.utils import py23
from inspect import getsourcelines
from textwrap import dedent
from glob import glob
import traceback
import os.path
def early():
"""Used by functions in package.py to harden to the return value at build time.
The term 'early' refers to the fact these package attribute are evaluated
early, ie at build time and before a package is installed.
"""
def decorated(fn):
setattr(fn, "_early", True)
return fn
return decorated
def late():
"""Used by functions in package.py that are evaluated lazily.
The term 'late' refers to the fact these package attributes are evaluated
late, ie when the attribute is queried for the first time.
If you want to implement a package.py attribute as a function, you MUST use
this decorator - otherwise it is understood that you want your attribute to
be a function, not the return value of that function.
"""
from rez.package_resources import package_rex_keys
def decorated(fn):
# this is done here rather than in standard schema validation because
# the latter causes a very obfuscated error message
if fn.__name__ in package_rex_keys:
raise ValueError("Cannot use @late decorator on function '%s'"
% fn.__name__)
setattr(fn, "_late", True)
_add_decorator(fn, "late")
return fn
return decorated
def include(module_name, *module_names):
"""Used by functions in package.py to have access to named modules.
See the 'package_definition_python_path' config setting for more info.
"""
def decorated(fn):
_add_decorator(fn, "include", nargs=[module_name] + list(module_names))
return fn
return decorated
def _add_decorator(fn, name, **kwargs):
if not hasattr(fn, "_decorators"):
setattr(fn, "_decorators", [])
kwargs.update({"name": name})
fn._decorators.append(kwargs)
class SourceCodeError(Exception):
def __init__(self, msg, short_msg):
super(SourceCodeError, self).__init__(msg)
self.short_msg = short_msg
class SourceCodeCompileError(SourceCodeError):
pass
class SourceCodeExecError(SourceCodeError):
pass
class SourceCode(object):
"""Wrapper for python source code.
This object is aware of the decorators defined in this sourcefile (such as
'include') and deals with them appropriately.
"""
def __init__(self, source=None, func=None, filepath=None,
eval_as_function=True):
self.source = (source or '').rstrip()
self.func = func
self.filepath = filepath
self.eval_as_function = eval_as_function
self.package = None
self.funcname = None
self.decorators = []
if self.func is not None:
self._init_from_func()
def copy(self):
other = SourceCode.__new__(SourceCode)
other.source = self.source
other.func = self.func
other.filepath = self.filepath
other.eval_as_function = self.eval_as_function
other.package = self.package
other.funcname = self.funcname
other.decorators = self.decorators
return other
def _init_from_func(self):
self.funcname = self.func.__name__
self.decorators = getattr(self.func, "_decorators", [])
# get txt of function body. Skips sig and any decorators. Assumes that
# only the decorators in this file (such as 'include') are used.
loc = getsourcelines(self.func)[0][len(self.decorators) + 1:]
code = dedent(''.join(loc))
# align lines that start with a comment (#)
codelines = code.split('\n')
linescount = len(codelines)
for i, line in enumerate(codelines):
if line.startswith('#'):
nextindex = i + 1 if i < linescount else i - 1
nextline = codelines[nextindex]
while nextline.startswith('#'):
nextline = codelines[nextindex]
nextindex = (nextindex + 1 if nextindex < linescount
else nextindex - 1)
firstchar = len(nextline) - len(nextline.lstrip())
codelines[i] = '%s%s' % (nextline[:firstchar], line)
code = '\n'.join(codelines).rstrip()
code = dedent(code)
self.source = code
@cached_property
def includes(self):
info = self._get_decorator_info("include")
if not info:
return None
return set(info.get("nargs", []))
@cached_property
def late_binding(self):
info = self._get_decorator_info("late")
return bool(info)
@cached_property
def evaluated_code(self):
if self.eval_as_function:
funcname = self.funcname or "_unnamed"
code = indent(self.source)
code = (
"def %s():\n" % funcname
+ code
+ "\n_result = %s()" % funcname
)
else:
code = "if True:\n" + indent(self.source)
return code
@property
def sourcename(self):
if self.filepath:
filename = self.filepath
else:
filename = "string"
if self.funcname:
filename += ":%s" % self.funcname
return "<%s>" % filename
@cached_property
def compiled(self):
try:
pyc = compile(self.evaluated_code, self.sourcename, 'exec')
except Exception as e:
stack = traceback.format_exc()
raise SourceCodeCompileError(
"Failed to compile %s:\n%s" % (self.sourcename, stack),
short_msg=str(e))
return pyc
def set_package(self, package):
# this is needed to load @included modules
self.package = package
def exec_(self, globals_={}):
# bind import modules
if self.package is not None and self.includes:
for name in self.includes:
module = include_module_manager.load_module(name, self.package)
globals_[name] = module
# exec
pyc = self.compiled
try:
exec(pyc, globals_)
except Exception as e:
stack = traceback.format_exc()
raise SourceCodeExecError(
"Failed to execute %s:\n%s" % (self.sourcename, stack),
short_msg=str(e))
return globals_.get("_result")
def to_text(self, funcname):
# don't indent code if already indented
if self.source[0] in (' ', '\t'):
source = self.source
else:
source = indent(self.source)
txt = "def %s():\n%s" % (funcname, source)
for entry in self.decorators:
nargs_str = ", ".join(map(repr, entry.get("nargs", [])))
name_str = entry.get("name")
sig = "@%s(%s)" % (name_str, nargs_str)
txt = sig + '\n' + txt
return txt
def _get_decorator_info(self, name):
matches = [x for x in self.decorators if x.get("name") == name]
if not matches:
return None
return matches[0]
def __getstate__(self):
return {
"source": self.source,
"filepath": self.filepath,
"funcname": self.funcname,
"eval_as_function": self.eval_as_function,
"decorators": self.decorators
}
def __setstate__(self, state):
self.source = state["source"]
self.filepath = state["filepath"]
self.funcname = state["funcname"]
self.eval_as_function = state["eval_as_function"]
self.decorators = state["decorators"]
self.func = None
self.package = None
def __eq__(self, other):
return (
isinstance(other, SourceCode)
and other.source == self.source
)
def __ne__(self, other):
return not (other == self)
def __str__(self):
return self.source
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.source)
class IncludeModuleManager(object):
"""Manages a cache of modules imported via '@include' decorator.
"""
# subdirectory under package 'base' path where we expect to find copied
# sourcefiles referred to by the '@include' function decorator.
#
include_modules_subpath = ".rez/include"
def __init__(self):
self.modules = {}
def load_module(self, name, package):
from hashlib import sha1
from rez.config import config # avoiding circular import
from rez.developer_package import DeveloperPackage
# in rare cases, a @late bound function may get called before the
# package is built. An example is 'requires' and the other requires-like
# functions. These need to be evaluated before a build, but it does also
# make sense to sometimes implement these as late-bound functions. We
# detect this case here, and load the modules from the original (pre-
# copied into package payload) location.
#
if isinstance(package, DeveloperPackage):
# load sourcefile from original location
path = config.package_definition_python_path
filepath = os.path.join(path, "%s.py" % name)
if not os.path.exists(filepath):
return None
with open(filepath, "rb") as f:
hash_str = sha1(f.read().strip()).hexdigest()
else:
# load sourcefile that's been copied into package install payload
path = os.path.join(package.base, self.include_modules_subpath)
pathname = os.path.join(path, "%s.py" % name)
hashname = os.path.join(path, "%s.sha1" % name)
if os.path.isfile(pathname) and os.path.isfile(hashname):
with open(hashname, "r") as f:
hash_str = f.readline()
filepath = pathname
else:
# Fallback for backward compat
pathname = os.path.join(path, "%s-*.py" % name)
hashnames = glob(pathname)
if not hashnames:
return None
filepath = hashnames[0]
hash_str = filepath.rsplit('-', 1)[-1].split('.', 1)[0]
# End, for details of backward compat,
# see https://github.com/nerdvegas/rez/issues/934
# and https://github.com/nerdvegas/rez/pull/935
module = self.modules.get(hash_str)
if module is not None:
return module
if config.debug("file_loads"):
print_debug("Loading include sourcefile: %s" % filepath)
module = py23.load_module_from_file(name, filepath)
self.modules[hash_str] = module
return module
# singleton
include_module_manager = IncludeModuleManager()
| lgpl-3.0 | -5,989,282,987,858,868,000 | 30.044693 | 83 | 0.580619 | false |
mmasaki/trove | trove/guestagent/datastore/experimental/postgresql/service/access.py | 1 | 3285 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.guestagent.datastore.experimental.postgresql import pgutil
from trove.guestagent.db import models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class PgSqlAccess(object):
"""Mixin implementing the user-access API calls."""
def grant_access(self, context, username, hostname, databases):
"""Give a user permission to use a given database.
The username and hostname parameters are strings.
The databases parameter is a list of strings representing the names of
the databases to grant permission on.
"""
for database in databases:
LOG.info(
_("{guest_id}: Granting user ({user}) access to database "
"({database}).").format(
guest_id=CONF.guest_id,
user=username,
database=database,)
)
pgutil.psql(
pgutil.AccessQuery.grant(
user=username,
database=database,
),
timeout=30,
)
def revoke_access(self, context, username, hostname, database):
"""Revoke a user's permission to use a given database.
The username and hostname parameters are strings.
The database parameter is a string representing the name of the
database.
"""
LOG.info(
_("{guest_id}: Revoking user ({user}) access to database"
"({database}).").format(
guest_id=CONF.guest_id,
user=username,
database=database,)
)
pgutil.psql(
pgutil.AccessQuery.revoke(
user=username,
database=database,
),
timeout=30,
)
def list_access(self, context, username, hostname):
"""List database for which the given user as access.
Return a list of serialized Postgres databases.
"""
if self.user_exists(username):
return [db.serialize() for db in self._get_databases_for(username)]
raise exception.UserNotFound(username)
def _get_databases_for(self, username):
"""Return all Postgres databases accessible by a given user."""
results = pgutil.query(
pgutil.AccessQuery.list(user=username),
timeout=30,
)
return [models.PostgreSQLSchema(
row[0].strip(), character_set=row[1], collate=row[2])
for row in results]
| apache-2.0 | -4,512,601,558,100,570,600 | 34.322581 | 79 | 0.598478 | false |
Connor-R/nba_shot_charts | processing/table_exporter.py | 1 | 5599 | import argparse
from time import time
import csv
import os
from py_db import db
db = db("nba_shots")
def initiate():
start_time = time()
print "\nexporting to .csv"
for statType in ('Player', 'Team', 'PlayerCareer'):
for rangeType in ('Reg', 'Pre', 'Post'):
print '\t', statType, rangeType
if statType == 'PlayerCareer':
isCareer=True
dataType = 'Player'
else:
isCareer=False
dataType = statType
export_table(dataType, rangeType, isCareer=isCareer)
end_time = time()
elapsed_time = float(end_time - start_time)
print "\n\nNBA table_exporter.py"
print "time elapsed (in seconds): " + str(elapsed_time)
print "time elapsed (in minutes): " + str(elapsed_time/60.0)
def export_table(dataType, rangeType, isCareer):
if dataType == "Player":
qry_join = "JOIN players pl USING (player_id) WHERE 1"
fname = "fname"
lname = "lname"
elif dataType == "Team":
qry_join = "JOIN teams t USING (team_id) WHERE LEFT(season_id,4) > start_year AND LEFT(season_id,4) <= end_year"
fname = "city"
lname = "tname"
if isCareer is False:
careerText = ""
qry = """SELECT
CONCAT(%s, ' ', %s) as 'Name',
season_type as 'Season Type',
%s_id as 'NBA ID',
season_id as 'Year(s)',
b.games as 'Games',
b.makes as 'FG',
b.attempts as 'FGA',
b.points as 'Points',
ROUND(efg*100,1) as 'EFG_Perc',
ROUND(efg_plus,1) as 'EFG+',
ROUND(PAA,1) as 'PAA',
ROUND(PAA_per_game,1) as 'PAA/Game',
ROUND(PAR,1) as 'PAR',
ROUND(PAR_per_game,1) as 'PAR/Game',
ROUND(ShotSkillPlus,1) as 'ShotSkill+',
AttemptsPerGame_percentile as 'Volume Percentile',
EFG_percentile as 'EFG Percentile',
PAAperGame_percentile as 'PAA/Game Percentile',
PARperGame_percentile as 'PAR/Game Percentile',
shotSkill_percentile as 'ShotSkill Percentile'
FROM shots_%s_Relative_Year r
JOIN shots_%s_Distribution_Year d USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shots_%s_Breakdown b USING (%s_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shot_skill_plus_%s_Year s USING (%s_id, season_id, season_type)
JOIN percentiles_%s_Year p USING (%s_id, season_id, season_type)
%s
AND shot_zone_basic = 'all'
AND season_type = '%s';"""
query = qry % (fname, lname, dataType, dataType, dataType, dataType, dataType, dataType, dataType, dataType, dataType, dataType, qry_join, rangeType)
# raw_input(query)
elif isCareer is True:
careerText = "_Career"
qry = """SELECT
CONCAT(fname, ' ', lname) as 'Name',
season_type as 'Season Type',
player_id as 'NBA ID',
season_id as 'Year(s)',
b.games as 'Games',
b.makes as 'FG',
b.attempts as 'FGA',
b.points as 'Points',
ROUND(efg*100,1) as 'EFG_Perc',
ROUND(efg_plus,1) as 'EFG+',
ROUND(PAA,1) as 'PAA',
ROUND(PAA_per_game,1) as 'PAA/Game',
ROUND(PAR,1) as 'PAR',
ROUND(PAR_per_game,1) as 'PAR/Game',
ROUND(ShotSkillPlus,1) as 'ShotSkill+',
AttemptsPerGame_percentile as 'Volume Percentile',
EFG_percentile as 'EFG Percentile',
PAAperGame_percentile as 'PAA/Game Percentile',
PARperGame_percentile as 'PAR/Game Percentile',
shotSkill_percentile as 'ShotSkill Percentile'
FROM shots_player_Relative_Career r
JOIN shots_player_Distribution_Career d USING (player_id, season_id, season_type, shot_zone_basic, shot_zone_area)
JOIN(
SELECT
player_id, season_type, shot_zone_basic, shot_zone_area,
SUM(games) AS games,
SUM(attempts) AS attempts,
SUM(makes) AS makes,
SUM(points) AS points
FROM shots_player_Breakdown
GROUP BY player_id, season_type, shot_zone_area, shot_zone_basic, season_type
) b USING (player_id, season_type, shot_zone_basic, shot_zone_area)
JOIN shot_skill_plus_player_Career s USING (player_id, season_id, season_type)
JOIN percentiles_player_Career p USING (player_id, season_id, season_type)
JOIN players pl USING (player_id)
WHERE shot_zone_basic = 'all'
AND season_type = '%s';"""
query = qry % (rangeType)
# raw_input(query)
res = db.query(query)
file_name = "%s_%s%s" % (dataType, rangeType, careerText)
csv_title = "/Users/connordog/Dropbox/Desktop_Files/Work_Things/CodeBase/Python_Scripts/Python_Projects/nba_shot_charts/csvs/leaderboards/%s.csv" % (file_name)
csv_file = open(csv_title, "wb")
append_csv = csv.writer(csv_file)
csv_header = ["Name", "Season Type", "NBA ID", "Year(s)", "Games", "FG", "FGA", "FG Points", "EFG%", "EFG+", "PAA", "PAA/Game", "PAR", "PAR/Game", "ShotSkill+", "Volume Percentile", "EFG Percentile", "PAA/Game Percentile", "PAR/Game Percentile", "ShotSkill Percentile"]
append_csv.writerow(csv_header)
for row in res:
row = list(row[0:])
for i, val in enumerate(row):
if type(val) in (str,):
row[i] = "".join([l if ord(l) < 128 else "" for l in val])
append_csv.writerow(row)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
initiate()
| mit | 7,872,525,690,449,116,000 | 36.831081 | 273 | 0.58957 | false |
kawashiro/dewyatochka2 | src/dewyatochka/core/log/output.py | 1 | 4059 | # -*- coding: UTF-8
""" Log output handlers implementation
Classes
=======
Handler -- Abstract output handler
STDOUTHandler -- Console output handler
FileHandler -- Text file output handler
NullHandler -- Empty handler (stub)
"""
import sys
import logging
import fcntl
import termios
import struct
from abc import ABCMeta, abstractmethod
from threading import Lock
from .service import LEVEL_PROGRESS, LEVEL_NAME_PROGRESS
__all__ = ['Handler', 'STDOUTHandler', 'FileHandler', 'NullHandler']
class Handler(metaclass=ABCMeta):
""" Abstract output handler
Wrapper over log handler to be sure
to use single custom output format
for any output stream
"""
def __init__(self, log_format: str):
""" Set logging service
:param str log_format:
"""
self._handler = self._create_handler()
self._handler.setFormatter(logging.Formatter(log_format))
self._lock = Lock()
def __getattr__(self, item):
""" Inherit inner handler methods/properties
:returns: Depending on inner method attributes
"""
return getattr(self.handler, item)
@property
def __in_cr_mode(self) -> bool:
""" Check if logger is in \r mode now
:return bool:
"""
return self.handler.terminator == '\r'
def __enable_cr_mode(self):
""" Enable \r mode
:return None:
"""
self.handler.terminator = '\r'
def __disable_cr_mode(self):
""" Return to \n mode
:return None:
"""
self.handler.terminator = '\n'
@property
def __terminal_width(self) -> int:
""" Get terminal width
:return int:
"""
winsize = fcntl.ioctl(self.handler.stream.fileno(), termios.TIOCGWINSZ, struct.pack('HH', 0, 0))
return struct.unpack('HH', winsize)[1]
def handle(self, record: logging.LogRecord):
""" Do whatever it takes to actually log the specified logging record.
:param logging.LogRecord record: Log record instance to emit
:return None:
"""
with self._lock:
if record.levelno == LEVEL_PROGRESS:
record.levelname = LEVEL_NAME_PROGRESS
self.__enable_cr_mode()
try:
padding = ' ' * (self.__terminal_width - len(self.handler.format(record)))
except:
padding = ''
record.msg += padding
elif self.__in_cr_mode:
self.__disable_cr_mode()
self.handler.stream.write(self.handler.terminator)
self.handler.handle(record)
@property
def handler(self):
""" Get inner handler instance
:return logging.Handler:
"""
return self._handler
@abstractmethod
def _create_handler(self) -> logging.StreamHandler: # pragma: no cover
""" Create new inner handler instance
:return logging.Handler:
"""
pass
class STDOUTHandler(Handler):
""" Console output handler """
def _create_handler(self) -> logging.Handler:
""" Create new inner handler instance
:return logging.Handler:
"""
return logging.StreamHandler(stream=sys.stdout)
class FileHandler(Handler):
""" Text file output handler """
def __init__(self, log_format: str, file_path: str):
""" Set logging service
:param str log_format:
:param str file_path:
"""
self._file = file_path
super().__init__(log_format)
def _create_handler(self) -> logging.StreamHandler:
""" Create new inner handler instance
:return logging.Handler:
"""
return logging.FileHandler(self._file, delay=True)
class NullHandler(Handler):
""" Empty handler (stub) """
def _create_handler(self) -> logging.StreamHandler:
""" Create new inner handler instance
:return logging.Handler:
"""
return logging.NullHandler()
| gpl-3.0 | 4,817,703,408,906,691,000 | 24.36875 | 104 | 0.58438 | false |
ArturGaspar/w3lib | tests/test_url.py | 1 | 37051 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import unittest
from w3lib.url import (is_url, safe_url_string, safe_download_url,
url_query_parameter, add_or_replace_parameter, url_query_cleaner,
file_uri_to_path, parse_data_uri, path_to_file_uri, any_to_uri,
urljoin_rfc, canonicalize_url, parse_url)
from six.moves.urllib.parse import urlparse
class UrlTests(unittest.TestCase):
def test_safe_url_string(self):
# Motoko Kusanagi (Cyborg from Ghost in the Shell)
motoko = u'\u8349\u8599 \u7d20\u5b50'
self.assertEqual(safe_url_string(motoko), # note the %20 for space
'%E8%8D%89%E8%96%99%20%E7%B4%A0%E5%AD%90')
self.assertEqual(safe_url_string(motoko),
safe_url_string(safe_url_string(motoko)))
self.assertEqual(safe_url_string(u'©'), # copyright symbol
'%C2%A9')
# page-encoding does not affect URL path
self.assertEqual(safe_url_string(u'©', 'iso-8859-1'),
'%C2%A9')
# path_encoding does
self.assertEqual(safe_url_string(u'©', path_encoding='iso-8859-1'),
'%A9')
self.assertEqual(safe_url_string("http://www.example.org/"),
'http://www.example.org/')
alessi = u'/ecommerce/oggetto/Te \xf2/tea-strainer/1273'
self.assertEqual(safe_url_string(alessi),
'/ecommerce/oggetto/Te%20%C3%B2/tea-strainer/1273')
self.assertEqual(safe_url_string("http://www.example.com/test?p(29)url(http://www.another.net/page)"),
"http://www.example.com/test?p(29)url(http://www.another.net/page)")
self.assertEqual(safe_url_string("http://www.example.com/Brochures_&_Paint_Cards&PageSize=200"),
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200")
# page-encoding does not affect URL path
# we still end up UTF-8 encoding characters before percent-escaping
safeurl = safe_url_string(u"http://www.example.com/£")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string(u"http://www.example.com/£", encoding='utf-8')
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string(u"http://www.example.com/£", encoding='latin-1')
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string(u"http://www.example.com/£", path_encoding='latin-1')
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3")
self.assertTrue(isinstance(safe_url_string(b'http://example.com/'), str))
def test_safe_url_string_with_query(self):
safeurl = safe_url_string(u"http://www.example.com/£?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string(u"http://www.example.com/£?unit=µ", encoding='utf-8')
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string(u"http://www.example.com/£?unit=µ", encoding='latin-1')
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
safeurl = safe_url_string(u"http://www.example.com/£?unit=µ", path_encoding='latin-1')
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%C2%B5")
safeurl = safe_url_string(u"http://www.example.com/£?unit=µ", encoding='latin-1', path_encoding='latin-1')
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
def test_safe_url_string_misc(self):
# mixing Unicode and percent-escaped sequences
safeurl = safe_url_string(u"http://www.example.com/£?unit=%C2%B5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string(u"http://www.example.com/%C2%A3?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
def test_safe_url_string_bytes_input(self):
safeurl = safe_url_string(b"http://www.example.com/")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/")
# bytes input is assumed to be UTF-8
safeurl = safe_url_string(b"http://www.example.com/\xc2\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
# page-encoding encoded bytes still end up as UTF-8 sequences in path
safeurl = safe_url_string(b"http://www.example.com/\xb5", encoding='latin1')
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
safeurl = safe_url_string(b"http://www.example.com/\xa3?unit=\xb5", encoding='latin1')
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
def test_safe_url_string_bytes_input_nonutf8(self):
# latin1
safeurl = safe_url_string(b"http://www.example.com/\xa3?unit=\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
# cp1251
# >>> u'Россия'.encode('cp1251')
# '\xd0\xee\xf1\xf1\xe8\xff'
safeurl = safe_url_string(b"http://www.example.com/country/\xd0\xee\xf1\xf1\xe8\xff")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/country/%D0%EE%F1%F1%E8%FF")
def test_safe_url_idna(self):
# adapted from:
# https://ssl.icu-project.org/icu-bin/idnbrowser
# http://unicode.org/faq/idn.html
# + various others
websites = (
(u'http://www.färgbolaget.nu/färgbolaget', 'http://www.xn--frgbolaget-q5a.nu/f%C3%A4rgbolaget'),
(u'http://www.räksmörgås.se/?räksmörgås=yes', 'http://www.xn--rksmrgs-5wao1o.se/?r%C3%A4ksm%C3%B6rg%C3%A5s=yes'),
(u'http://www.brændendekærlighed.com/brændende/kærlighed', 'http://www.xn--brndendekrlighed-vobh.com/br%C3%A6ndende/k%C3%A6rlighed'),
(u'http://www.예비교사.com', 'http://www.xn--9d0bm53a3xbzui.com'),
(u'http://理容ナカムラ.com', 'http://xn--lck1c3crb1723bpq4a.com'),
(u'http://あーるいん.com', 'http://xn--l8je6s7a45b.com'),
# --- real websites ---
# in practice, this redirect (301) to http://www.buecher.de/?q=b%C3%BCcher
(u'http://www.bücher.de/?q=bücher', 'http://www.xn--bcher-kva.de/?q=b%C3%BCcher'),
# Japanese
(u'http://はじめよう.みんな/?query=サ&maxResults=5', 'http://xn--p8j9a0d9c9a.xn--q9jyb4c/?query=%E3%82%B5&maxResults=5'),
# Russian
(u'http://кто.рф/', 'http://xn--j1ail.xn--p1ai/'),
(u'http://кто.рф/index.php?domain=Что', 'http://xn--j1ail.xn--p1ai/index.php?domain=%D0%A7%D1%82%D0%BE'),
# Korean
(u'http://내도메인.한국/', 'http://xn--220b31d95hq8o.xn--3e0b707e/'),
(u'http://맨체스터시티축구단.한국/', 'http://xn--2e0b17htvgtvj9haj53ccob62ni8d.xn--3e0b707e/'),
# Arabic
(u'http://nic.شبكة', 'http://nic.xn--ngbc5azd'),
# Chinese
(u'https://www.贷款.在线', 'https://www.xn--0kwr83e.xn--3ds443g'),
(u'https://www2.xn--0kwr83e.在线', 'https://www2.xn--0kwr83e.xn--3ds443g'),
(u'https://www3.贷款.xn--3ds443g', 'https://www3.xn--0kwr83e.xn--3ds443g'),
)
for idn_input, safe_result in websites:
safeurl = safe_url_string(idn_input)
self.assertEqual(safeurl, safe_result)
# make sure the safe URL is unchanged when made safe a 2nd time
for _, safe_result in websites:
safeurl = safe_url_string(safe_result)
self.assertEqual(safeurl, safe_result)
def test_safe_url_idna_encoding_failure(self):
# missing DNS label
self.assertEqual(
safe_url_string(u"http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9")
# DNS label too long
self.assertEqual(
safe_url_string(
u"http://www.{label}.com/résumé?q=résumé".format(
label=u"example"*11)),
"http://www.{label}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9".format(
label=u"example"*11))
def test_safe_download_url(self):
self.assertEqual(safe_download_url('http://www.example.org'),
'http://www.example.org/')
self.assertEqual(safe_download_url('http://www.example.org/../'),
'http://www.example.org/')
self.assertEqual(safe_download_url('http://www.example.org/../../images/../image'),
'http://www.example.org/image')
self.assertEqual(safe_download_url('http://www.example.org/dir/'),
'http://www.example.org/dir/')
def test_is_url(self):
self.assertTrue(is_url('http://www.example.org'))
self.assertTrue(is_url('https://www.example.org'))
self.assertTrue(is_url('file:///some/path'))
self.assertFalse(is_url('foo://bar'))
self.assertFalse(is_url('foo--bar'))
def test_url_query_parameter(self):
self.assertEqual(url_query_parameter("product.html?id=200&foo=bar", "id"),
'200')
self.assertEqual(url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault"),
'mydefault')
self.assertEqual(url_query_parameter("product.html?id=", "id"),
None)
self.assertEqual(url_query_parameter("product.html?id=", "id", keep_blank_values=1),
'')
def test_url_query_parameter_2(self):
"""
This problem was seen several times in the feeds. Sometime affiliate URLs contains
nested encoded affiliate URL with direct URL as parameters. For example:
aff_url1 = 'http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1'
the typical code to extract needed URL from it is:
aff_url2 = url_query_parameter(aff_url1, 'url')
after this aff2_url is:
'http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's gardenfurniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1'
the direct URL extraction is
url = url_query_parameter(aff_url2, 'referredURL')
but this will not work, because aff_url2 contains ' (comma sign encoded in the feed)
and the URL extraction will fail, current workaround was made in the spider,
just a replace for ' to %27
"""
return # FIXME: this test should pass but currently doesnt
# correct case
aff_url1 = "http://www.anrdoezrs.net/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EGarden+table+and+chair+sets%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357199%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, 'url')
self.assertEqual(aff_url2, "http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Garden table and chair sets&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357199%26langId%3D-1")
prod_url = url_query_parameter(aff_url2, 'referredURL')
self.assertEqual(prod_url, "http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357199&langId=-1")
# weird case
aff_url1 = "http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, 'url')
self.assertEqual(aff_url2, "http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's garden furniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1")
prod_url = url_query_parameter(aff_url2, 'referredURL')
# fails, prod_url is None now
self.assertEqual(prod_url, "http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357023&langId=-1")
def test_add_or_replace_parameter(self):
url = 'http://domain/test'
self.assertEqual(add_or_replace_parameter(url, 'arg', 'v'),
'http://domain/test?arg=v')
url = 'http://domain/test?arg1=v1&arg2=v2&arg3=v3'
self.assertEqual(add_or_replace_parameter(url, 'arg4', 'v4'),
'http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4')
self.assertEqual(add_or_replace_parameter(url, 'arg3', 'nv3'),
'http://domain/test?arg1=v1&arg2=v2&arg3=nv3')
url = 'http://domain/test?arg1=v1;arg2=v2'
self.assertEqual(add_or_replace_parameter(url, 'arg1', 'v3'),
'http://domain/test?arg1=v3&arg2=v2')
self.assertEqual(add_or_replace_parameter("http://domain/moreInfo.asp?prodID=", 'prodID', '20'),
'http://domain/moreInfo.asp?prodID=20')
url = 'http://rmc-offers.co.uk/productlist.asp?BCat=2%2C60&CatID=60'
self.assertEqual(add_or_replace_parameter(url, 'BCat', 'newvalue'),
'http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60')
url = 'http://rmc-offers.co.uk/productlist.asp?BCat=2,60&CatID=60'
self.assertEqual(add_or_replace_parameter(url, 'BCat', 'newvalue'),
'http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60')
url = 'http://rmc-offers.co.uk/productlist.asp?'
self.assertEqual(add_or_replace_parameter(url, 'BCat', 'newvalue'),
'http://rmc-offers.co.uk/productlist.asp?BCat=newvalue')
url = "http://example.com/?version=1&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2"
self.assertEqual(add_or_replace_parameter(url, 'version', '2'),
'http://example.com/?version=2&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2')
self.assertEqual(add_or_replace_parameter(url, 'pageurl', 'test'),
'http://example.com/?version=1&pageurl=test¶m2=value2')
def test_url_query_cleaner(self):
self.assertEqual('product.html?id=200',
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id']))
self.assertEqual('product.html?id=200',
url_query_cleaner("product.html?&id=200&&foo=bar&name=wired", ['id']))
self.assertEqual('product.html',
url_query_cleaner("product.html?foo=bar&name=wired", ['id']))
self.assertEqual('product.html?id=200&name=wired',
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id', 'name']))
self.assertEqual('product.html?id',
url_query_cleaner("product.html?id&other=3&novalue=", ['id']))
# default is to remove duplicate keys
self.assertEqual('product.html?d=1',
url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ['d']))
# unique=False disables duplicate keys filtering
self.assertEqual('product.html?d=1&d=2&d=3',
url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ['d'], unique=False))
self.assertEqual('product.html?id=200&foo=bar',
url_query_cleaner("product.html?id=200&foo=bar&name=wired#id20", ['id', 'foo']))
self.assertEqual('product.html?foo=bar&name=wired',
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ['id'], remove=True))
self.assertEqual('product.html?name=wired',
url_query_cleaner("product.html?id=2&foo=bar&name=wired", ['id', 'foo'], remove=True))
self.assertEqual('product.html?foo=bar&name=wired',
url_query_cleaner("product.html?id=2&foo=bar&name=wired", ['id', 'footo'], remove=True))
self.assertEqual('product.html?foo=bar',
url_query_cleaner("product.html?foo=bar&name=wired", 'foo'))
self.assertEqual('product.html?foobar=wired',
url_query_cleaner("product.html?foo=bar&foobar=wired", 'foobar'))
def test_path_to_file_uri(self):
if os.name == 'nt':
self.assertEqual(path_to_file_uri("C:\\windows\clock.avi"),
"file:///C:/windows/clock.avi")
else:
self.assertEqual(path_to_file_uri("/some/path.txt"),
"file:///some/path.txt")
fn = "test.txt"
x = path_to_file_uri(fn)
self.assert_(x.startswith('file:///'))
self.assertEqual(file_uri_to_path(x).lower(), os.path.abspath(fn).lower())
def test_file_uri_to_path(self):
if os.name == 'nt':
self.assertEqual(file_uri_to_path("file:///C:/windows/clock.avi"),
"C:\\windows\clock.avi")
uri = "file:///C:/windows/clock.avi"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
else:
self.assertEqual(file_uri_to_path("file:///path/to/test.txt"),
"/path/to/test.txt")
self.assertEqual(file_uri_to_path("/path/to/test.txt"),
"/path/to/test.txt")
uri = "file:///path/to/test.txt"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
self.assertEqual(file_uri_to_path("test.txt"),
"test.txt")
def test_any_to_uri(self):
if os.name == 'nt':
self.assertEqual(any_to_uri("C:\\windows\clock.avi"),
"file:///C:/windows/clock.avi")
else:
self.assertEqual(any_to_uri("/some/path.txt"),
"file:///some/path.txt")
self.assertEqual(any_to_uri("file:///some/path.txt"),
"file:///some/path.txt")
self.assertEqual(any_to_uri("http://www.example.com/some/path.txt"),
"http://www.example.com/some/path.txt")
def test_urljoin_rfc_deprecated(self):
jurl = urljoin_rfc("http://www.example.com/", "/test")
self.assertEqual(jurl, b"http://www.example.com/test")
class CanonicalizeUrlTest(unittest.TestCase):
def test_canonicalize_url(self):
# simplest case
self.assertEqual(canonicalize_url("http://www.example.com/"),
"http://www.example.com/")
def test_return_str(self):
assert isinstance(canonicalize_url(u"http://www.example.com"), str)
assert isinstance(canonicalize_url(b"http://www.example.com"), str)
def test_append_missing_path(self):
self.assertEqual(canonicalize_url("http://www.example.com"),
"http://www.example.com/")
def test_typical_usage(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1")
self.assertEqual(canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1")
def test_sorting(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3")
def test_keep_blank_values(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=")
self.assertEqual(canonicalize_url(u'http://www.example.com/do?1750,4'),
'http://www.example.com/do?1750%2C4=')
def test_spaces(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
def test_canonicalize_url_unicode_path(self):
self.assertEqual(canonicalize_url(u"http://www.example.com/résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9")
def test_canonicalize_url_unicode_query_string(self):
# default encoding for path and query is UTF-8
self.assertEqual(canonicalize_url(u"http://www.example.com/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9")
# passed encoding will affect query string
self.assertEqual(canonicalize_url(u"http://www.example.com/résumé?q=résumé", encoding='latin1'),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%E9sum%E9")
self.assertEqual(canonicalize_url(u"http://www.example.com/résumé?country=Россия", encoding='cp1251'),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%EE%F1%F1%E8%FF")
def test_canonicalize_url_unicode_query_string_wrong_encoding(self):
# trying to encode with wrong encoding
# fallback to UTF-8
self.assertEqual(canonicalize_url(u"http://www.example.com/résumé?currency=€", encoding='latin1'),
"http://www.example.com/r%C3%A9sum%C3%A9?currency=%E2%82%AC")
self.assertEqual(canonicalize_url(u"http://www.example.com/résumé?country=Россия", encoding='latin1'),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%A0%D0%BE%D1%81%D1%81%D0%B8%D1%8F")
def test_normalize_percent_encoding_in_paths(self):
self.assertEqual(canonicalize_url("http://www.example.com/r%c3%a9sum%c3%a9"),
"http://www.example.com/r%C3%A9sum%C3%A9")
# non-UTF8 encoded sequences: they should be kept untouched, only upper-cased
# 'latin1'-encoded sequence in path
self.assertEqual(canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do")
# 'latin1'-encoded path, UTF-8 encoded query string
self.assertEqual(canonicalize_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9"),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9")
# 'latin1'-encoded path and query string
self.assertEqual(canonicalize_url("http://www.example.com/a%a3do?q=r%e9sum%e9"),
"http://www.example.com/a%A3do?q=r%E9sum%E9")
def test_normalize_percent_encoding_in_query_arguments(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3")
self.assertEqual(canonicalize_url("http://www.example.com/do?k=r%c3%a9sum%c3%a9"),
"http://www.example.com/do?k=r%C3%A9sum%C3%A9")
def test_non_ascii_percent_encoding_in_paths(self):
self.assertEqual(canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1"),
self.assertEqual(canonicalize_url(u"http://www.example.com/a do£.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1")
self.assertEqual(canonicalize_url(b"http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1")
def test_non_ascii_percent_encoding_in_query_arguments(self):
self.assertEqual(canonicalize_url(u"http://www.example.com/do?price=£500&a=5&z=3"),
u"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url(b"http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url(b"http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500")
def test_urls_with_auth_and_ports(self):
self.assertEqual(canonicalize_url(u"http://user:[email protected]:81/do?now=1"),
u"http://user:[email protected]:81/do?now=1")
def test_remove_fragments(self):
self.assertEqual(canonicalize_url(u"http://user:[email protected]/do?a=1#frag"),
u"http://user:[email protected]/do?a=1")
self.assertEqual(canonicalize_url(u"http://user:[email protected]/do?a=1#frag", keep_fragments=True),
u"http://user:[email protected]/do?a=1#frag")
def test_dont_convert_safe_characters(self):
# dont convert safe characters to percent encoding representation
self.assertEqual(canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html")
def test_safe_characters_unicode(self):
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
# convert the urls to string. the following test asserts that
# functionality.
self.assertEqual(canonicalize_url(u'http://www.example.com/caf%E9-con-leche.htm'),
'http://www.example.com/caf%E9-con-leche.htm')
def test_domains_are_case_insensitive(self):
self.assertEqual(canonicalize_url("http://www.EXAMPLE.com/"),
"http://www.example.com/")
def test_canonicalize_idns(self):
self.assertEqual(canonicalize_url(u'http://www.bücher.de?q=bücher'),
'http://www.xn--bcher-kva.de/?q=b%C3%BCcher')
# Japanese (+ reordering query parameters)
self.assertEqual(canonicalize_url(u'http://はじめよう.みんな/?query=サ&maxResults=5'),
'http://xn--p8j9a0d9c9a.xn--q9jyb4c/?maxResults=5&query=%E3%82%B5')
def test_quoted_slash_and_question_sign(self):
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC+rocks%3f/?yeah=1"),
"http://foo.com/AC%2FDC+rocks%3F/?yeah=1")
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC/"),
"http://foo.com/AC%2FDC/")
def test_canonicalize_urlparsed(self):
# canonicalize_url() can be passed an already urlparse'd URL
self.assertEqual(canonicalize_url(urlparse(u"http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9")
self.assertEqual(canonicalize_url(urlparse('http://www.example.com/caf%e9-con-leche.htm')),
'http://www.example.com/caf%E9-con-leche.htm')
self.assertEqual(canonicalize_url(urlparse("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9")
def test_canonicalize_parse_url(self):
# parse_url() wraps urlparse and is used in link extractors
self.assertEqual(canonicalize_url(parse_url(u"http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9")
self.assertEqual(canonicalize_url(parse_url('http://www.example.com/caf%e9-con-leche.htm')),
'http://www.example.com/caf%E9-con-leche.htm')
self.assertEqual(canonicalize_url(parse_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9")
def test_canonicalize_url_idempotence(self):
for url, enc in [(u'http://www.bücher.de/résumé?q=résumé', 'utf8'),
(u'http://www.example.com/résumé?q=résumé', 'latin1'),
(u'http://www.example.com/résumé?country=Россия', 'cp1251'),
(u'http://はじめよう.みんな/?query=サ&maxResults=5', 'iso2022jp')]:
canonicalized = canonicalize_url(url, encoding=enc)
# if we canonicalize again, we ge the same result
self.assertEqual(canonicalize_url(canonicalized, encoding=enc), canonicalized)
# without encoding, already canonicalized URL is canonicalized identically
self.assertEqual(canonicalize_url(canonicalized), canonicalized)
def test_canonicalize_url_idna_exceptions(self):
# missing DNS label
self.assertEqual(
canonicalize_url(u"http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9")
# DNS label too long
self.assertEqual(
canonicalize_url(
u"http://www.{label}.com/résumé?q=résumé".format(
label=u"example"*11)),
"http://www.{label}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9".format(
label=u"example"*11))
class DataURITests(unittest.TestCase):
def test_default_mediatype_charset(self):
result = parse_data_uri("data:,A%20brief%20note")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "US-ASCII"})
self.assertEqual(result.data, b"A brief note")
def test_text_uri(self):
result = parse_data_uri(u"data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_bytes_uri(self):
result = parse_data_uri(b"data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_unicode_uri(self):
result = parse_data_uri(u"data:,é")
self.assertEqual(result.data, u"é".encode('utf-8'))
def test_default_mediatype(self):
result = parse_data_uri("data:;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters,
{"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_text_charset(self):
result = parse_data_uri("data:text/plain;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters,
{"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_mediatype_parameters(self):
result = parse_data_uri('data:text/plain;'
'foo=%22foo;bar%5C%22%22;'
'charset=utf-8;'
'bar=%22foo;%5C%22foo%20;/%20,%22,'
'%CE%8E%CE%A3%CE%8E')
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters,
{"charset": "utf-8",
"foo": 'foo;bar"',
"bar": 'foo;"foo ;/ ,'})
self.assertEqual(result.data, b"\xce\x8e\xce\xa3\xce\x8e")
def test_base64(self):
result = parse_data_uri("data:text/plain;base64,"
"SGVsbG8sIHdvcmxkLg%3D%3D")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_base64_spaces(self):
result = parse_data_uri("data:text/plain;base64,SGVsb%20G8sIH%0A%20%20"
"dvcm%20%20%20xk%20Lg%3D%0A%3D")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
result = parse_data_uri("data:text/plain;base64,SGVsb G8sIH\n "
"dvcm xk Lg%3D\n%3D")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_wrong_base64_param(self):
with self.assertRaises(ValueError):
parse_data_uri("data:text/plain;baes64,SGVsbG8sIHdvcmxkLg%3D%3D")
def test_missing_comma(self):
with self.assertRaises(ValueError):
parse_data_uri("data:A%20brief%20note")
def test_missing_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("text/plain,A%20brief%20note")
def test_wrong_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("http://example.com/")
def test_scheme_case_insensitive(self):
result = parse_data_uri("DATA:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
result = parse_data_uri("DaTa:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 1,854,225,113,798,313,700 | 55.941176 | 497 | 0.599391 | false |
googleads/google-ads-python | google/ads/googleads/v8/enums/types/user_list_combined_rule_operator.py | 1 | 1166 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.enums",
marshal="google.ads.googleads.v8",
manifest={"UserListCombinedRuleOperatorEnum",},
)
class UserListCombinedRuleOperatorEnum(proto.Message):
r"""Logical operator connecting two rules. """
class UserListCombinedRuleOperator(proto.Enum):
r"""Enum describing possible user list combined rule operators."""
UNSPECIFIED = 0
UNKNOWN = 1
AND = 2
AND_NOT = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -7,409,470,100,416,270,000 | 30.513514 | 74 | 0.707547 | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/contrib/gis/management/commands/inspectdb.py | 1 | 1530 | from django.core.management.commands.inspectdb import \
Command as InspectDBCommand
class Command(InspectDBCommand):
db_module = 'django.contrib.gis.db'
gis_tables = {}
def get_field_type(self, connection, table_name, row):
field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row)
if field_type == 'GeometryField':
geo_col = row[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col)
field_params.update(geo_params)
# Adding the table name and column to the `gis_tables` dictionary, this
# allows us to track which tables need a GeoManager.
if table_name in self.gis_tables:
self.gis_tables[table_name].append(geo_col)
else:
self.gis_tables[table_name] = [geo_col]
return field_type, field_params, field_notes
def get_meta(self, table_name, constraints):
meta_lines = super(Command, self).get_meta(table_name, constraints)
if table_name in self.gis_tables:
# If the table is a geographic one, then we need make
# GeoManager the default manager for the model.
meta_lines.insert(0, ' objects = models.GeoManager()')
return meta_lines
| mit | -9,104,450,396,811,775,000 | 47.354839 | 112 | 0.626797 | false |
elkeschaper/tral | tral/repeat/test/repeat_test.py | 1 | 1564 | import os
import pytest
from tral.repeat import repeat
TEST_MSA_O = ['OCC', 'OOO']
TEST_MSA_K = ['KCC', 'KKK']
TEST_SCORE = "phylo_gap01"
# defaultdict(<class 'int'>, {'pSim': 0.66666666669999997, 'parsimony': 0.66666666669999997,
# 'entropy': 0.66666666666666663, 'phylo': 0.11368675605567802})
# pvalue 'phylo': 0.3821
@pytest.mark.no_external_software_required
def test_standardize_amino_acids():
assert repeat.standardize("ABDEF-G", "AA") == "ADDEF-G"
@pytest.mark.no_external_software_required
def test_repeat_ambiguous():
myTR_O = repeat.Repeat(msa=TEST_MSA_O)
myTR_K = repeat.Repeat(msa=TEST_MSA_K)
assert myTR_O.msaTD_standard_aa == myTR_K.msaTD
assert myTR_O.msaTD_standard_aa == myTR_K.msaTD_standard_aa
assert myTR_O.score(TEST_SCORE) == myTR_K.score(TEST_SCORE)
assert myTR_O.divergence(TEST_SCORE) == myTR_K.divergence(TEST_SCORE)
assert myTR_O.pvalue(TEST_SCORE) == myTR_K.pvalue(TEST_SCORE)
assert myTR_O.divergence(TEST_SCORE) == 2.095947265625
assert myTR_K.pvalue(TEST_SCORE) == 0.3507
@pytest.mark.no_external_software_required
def test_repeat_pickle(tmpdir):
myTR_O = repeat.Repeat(msa=TEST_MSA_O)
test_pickle = tmpdir.join("test.pickle")
myTR_O.write(test_pickle, 'pickle')
myTR_O_new = repeat.Repeat.create(test_pickle, 'pickle')
assert myTR_O.msa == myTR_O_new.msa
assert myTR_O.sequence_type == myTR_O_new.sequence_type
assert myTR_O.text == myTR_O_new.text
if os.path.exists(test_pickle):
os.remove(test_pickle)
| gpl-2.0 | 8,622,698,513,205,656,000 | 28.509434 | 92 | 0.680307 | false |
ini-bdds/ermrest | ermrest/sanepg2.py | 1 | 6608 |
#
# Copyright 2013 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Wrapper for sane access to psycopg2 with transactional data streaming.
This module provides a customized psycopg2 connection class that can
be used as a connection_factory parameter to the normal
psycopg2.connect() factory. Also provided is a convenience pool()
factory to create a ThreadedConnectionPool that will use this
customized connection class.
The purpose of the customized connection class is to make it easier to
use a sane combination of psycopg2 features:
1. server-side named cursors to allow streaming of large results
without consuming lots of Python memory
2. cursors with 'withhold=True' mode to allow results to be fetched
before and/or after a transaction is committed
This combination allows a transaction to be committed and then to
serialize results, without any risk of commencing serialization on a
transaction that will fail nor with any need to buffer the entire
result before serialization commences.
"""
import psycopg2
import psycopg2.pool
import web
import sys
import traceback
import datetime
import math
class connection (psycopg2.extensions.connection):
"""Customized psycopg2 connection factory with per-execution() cursor support.
"""
def __init__(self, dsn):
psycopg2.extensions.connection.__init__(self, dsn)
self._curnumber = 1
def execute(self, stmt, vars=None):
"""Name and create a server-side cursor with withhold=True and run statement in it.
You can iterate over the resulting cursor to efficiently
fetch rows from the server, and you may do this before or
after committing the transaction. The entire result set
need not exist in Python memory if you dispose of your old
rows as you go.
Please remember to close these per-statement cursors to avoid
wasting resources on the Postgres server session.
"""
curname = 'cursor%d' % self._curnumber
self._curnumber += 1
cur = self.cursor(curname, withhold=True)
cur.execute(stmt, vars=vars)
return cur
def pool(minconn, maxconn, dsn):
"""Open a thread-safe connection pool with minconn <= N <= maxconn connections to database.
The connections are from the customized connection factory in this module.
"""
return psycopg2.pool.ThreadedConnectionPool(minconn, maxconn, dsn=dsn, connection_factory=connection)
class PoolManager (object):
"""Manage a set of database connection pools keyed by database name.
"""
def __init__(self):
# map dsn -> [pool, timestamp]
self.pools = dict()
self.max_idle_seconds = 60 * 60 # 1 hour
def __getitem__(self, dsn):
"""Lookup existing or create new pool for database on demand.
May fail transiently and caller should retry.
"""
# abandon old pools so they can be garbage collected
for key in self.pools.keys():
try:
pair = self.pools.pop(key)
delta = (datetime.datetime.now() - pair[1])
try:
delta_seconds = delta.total_seconds()
except:
delta_seconds = delta.seconds + delta.microseconds * math.pow(10,-6)
if delta_seconds < self.max_idle_seconds:
# this pool is sufficiently active so put it back!
boundpair = self.pools.setdefault(key, pair)
# if pair is still removed at this point, let garbage collector deal with it
except KeyError:
# another thread could have purged key before we got to it
pass
try:
pair = self.pools[dsn]
pair[1] = datetime.datetime.now() # update timestamp
return pair[0]
except KeyError:
# atomically get/set pool
newpool = pool(1, 4, dsn)
boundpair = self.pools.setdefault(dsn, [newpool, datetime.datetime.now()])
if boundpair[0] is not newpool:
# someone beat us to it
newpool.closeall()
return boundpair[0]
pools = PoolManager()
class PooledConnection (object):
def __init__(self, dsn):
self.used_pool = pools[dsn]
self.conn = self.used_pool.getconn()
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_REPEATABLE_READ)
self.cur = self.conn.cursor()
def perform(self, bodyfunc, finalfunc=lambda x: x, verbose=False):
"""Run bodyfunc(conn, cur) using pooling, commit, transform with finalfunc, clean up.
Automates handling of errors.
"""
assert self.conn is not None
try:
result = bodyfunc(self.conn, self.cur)
self.conn.commit()
result = finalfunc(result)
if hasattr(result, 'next'):
# need to defer cleanup to after result is drained
for d in result:
yield d
else:
yield result
except psycopg2.InterfaceError, e:
# reset bad connection
self.used_pool.putconn(self.conn, close=True)
self.conn = None
raise e
except GeneratorExit, e:
# happens normally at end of result yielding sequence
raise
except:
if self.conn is not None:
self.conn.rollback()
if verbose:
et, ev, tb = sys.exc_info()
web.debug(u'got exception "%s" during sanepg2.PooledConnection.perform()' % unicode(ev),
traceback.format_exception(et, ev, tb))
raise
def final(self):
if self.conn is not None:
self.cur.close()
try:
self.conn.commit()
except:
pass
self.used_pool.putconn(self.conn)
self.conn = None
| apache-2.0 | -7,061,611,559,697,385,000 | 35.508287 | 105 | 0.623184 | false |
desion/terminal | web/http_server.py | 1 | 7072 | #!/usr/bin/env python
#coding:utf-8
import os.path
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import socket
import struct
import json
from conf import terminal_servers_conf
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class ControlHandler(tornado.web.RequestHandler):
def command(self):
address = (self.host, self.port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
timeout_usecs = 1500000
s.settimeout(timeout_usecs / 1000000.0)
success = {'err_code': 0, 'msg' : 'success'}
fail = {'err_code': 1, 'msg' : 'fail'}
try:
s.connect(address)
head = self.command_line + '\r\n'
ret = s.send(head)
if ret != len(head) :
s.close()
return json.dumps(fail)
#recv response
data = s.recv(4096)
s.close()
if len(data) <= 0:
fail["msg"] = "receive error"
return json.dumps(fail)
if head.startswith('close'):
if data.startswith('CLOSED'):
return json.dumps(success)
else:
fail["msg"] = data
return json.dumps(fail)
elif head.startswith('open'):
if data.startswith('OPENED'):
return json.dumps(success)
else:
fail["msg"] = data
return json.dumps(fail)
elif head.startswith('append'):
if data.startswith('OPENED'):
return json.dumps(success)
else:
fail["msg"] = data
return json.dumps(fail)
else:
fail["msg"] = "command error"
return json.dumps(fail)
except Exception, e:
fail["msg"] = str(e)
return json.dumps(fail)
def post(self):
self.host = self.get_argument("host")
self.port = int(self.get_argument("port"))
self.command_line = self.get_argument("command")
ret = self.command()
self.write(ret)
class IndexHandler(tornado.web.RequestHandler):
def server_status(self):
servers_status = []
for server in terminal_servers_conf:
host = server['host']
port = server['port']
address = (host, port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
timeout_usecs = 1500000
s.settimeout(timeout_usecs / 1000000.0)
try:
s.connect(address)
head = "stats\r\n"
ret = s.send(head)
if ret != len(head) :
s.close()
continue
stats_dict = {}
data = s.recv(4096)
for item in data.split("\r\n"):
if item == "END":
break
else:
stats = item.split(" ")
key = stats[1]
val = stats[2]
stats_dict[key] = val
stats_dict['tag'] = server['tag']
stats_dict['host'] = server['host']
stats_dict['port'] = server['port']
s.close()
servers_status.append(stats_dict)
except Exception, e:
print e
pass
return servers_status
def get(self):
status_infos = self.server_status()
self.render("index.html", servers=terminal_servers_conf, stats = status_infos)
class DBHandler(tornado.web.RequestHandler):
def db_info(self, host, port):
servers_status = []
address = (host, port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
timeout_usecs = 1500000
s.settimeout(timeout_usecs / 1000000.0)
try:
s.connect(address)
head = "info\r\n"
ret = s.send(head)
if ret != len(head) :
s.close()
return servers_status
data = s.recv(4096)
info_lst = data.split("\n")[2:]
for info in info_lst:
items = filter(lambda x: x, info.split(' '))
if len(items) < 2:
continue
stats_dict = {}
stats_dict['dbid'] = items[0]
stats_dict['tag'] = items[1]
stats_dict['version'] = items[2]
stats_dict['status'] = items[3]
stats_dict['ref'] = items[4]
stats_dict['query_num'] = items[5]
stats_dict['idx_num'] = items[6]
stats_dict['open_time'] = items[7]
stats_dict['path'] = items[8]
servers_status.append(stats_dict)
s.close()
except Exception, e:
pass
return servers_status
def get(self):
host = self.get_argument("host")
port = int(self.get_argument("port"))
tag = self.get_argument("tag")
db_infos = self.db_info(host, port)
self.render("dbinfo.html", servers=terminal_servers_conf, infos = db_infos, host = host, port = port, tag = tag)
class AjaxHandler(tornado.web.RequestHandler):
def db_info(self, host, port):
query_num = 0
address = (host, port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
timeout_usecs = 1500000
s.settimeout(timeout_usecs / 1000000.0)
try:
s.connect(address)
head = "info\r\n"
ret = s.send(head)
if ret != len(head) :
s.close()
return 0
data = s.recv(4096)
info_lst = data.split("\n")[2:]
for info in info_lst:
items = filter(lambda x: x, info.split(' '))
if len(items) < 2:
continue
query_num += int(items[5])
s.close()
except Exception, e:
pass
return query_num
def get(self):
host = self.get_argument("host")
port = int(self.get_argument("port"))
query_num = self.db_info(host, port)
out_str = "%d" %(query_num)
self.write(out_str)
handlers = [
(r"/", IndexHandler),
(r"/dbinfo", DBHandler),
(r"/ajax", AjaxHandler),
(r"/controler", ControlHandler)
]
setting = dict(
template_path=os.path.join(os.path.dirname(__file__),"pages"),
static_path=os.path.join(os.path.dirname(__file__),"asserts"),
)
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(handlers, **setting)
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| bsd-3-clause | 7,699,235,526,345,135,000 | 32.67619 | 120 | 0.49293 | false |
xaxa89/mitmproxy | test/mitmproxy/tools/console/test_master.py | 1 | 1876 | from mitmproxy.test import tflow
from mitmproxy.test import tutils
from mitmproxy.tools import console
from mitmproxy import proxy
from mitmproxy import options
from mitmproxy.tools.console import common
from ... import tservers
import urwid
def test_format_keyvals():
assert common.format_keyvals(
[
("aa", "bb"),
None,
("cc", "dd"),
(None, "dd"),
(None, "dd"),
]
)
def test_options():
assert options.Options(replay_kill_extra=True)
class TestMaster(tservers.MasterTest):
def mkmaster(self, **opts):
if "verbosity" not in opts:
opts["verbosity"] = 1
o = options.Options(**opts)
m = console.master.ConsoleMaster(o, proxy.DummyServer())
m.addons.configure_all(o, o.keys())
return m
def test_basic(self):
m = self.mkmaster()
for i in (1, 2, 3):
try:
self.dummy_cycle(m, 1, b"")
except urwid.ExitMainLoop:
pass
assert len(m.view) == i
def test_run_script_once(self):
m = self.mkmaster()
f = tflow.tflow(resp=True)
m.run_script_once("nonexistent", [f])
assert any("Input error" in str(l) for l in m.logbuffer)
def test_intercept(self):
"""regression test for https://github.com/mitmproxy/mitmproxy/issues/1605"""
m = self.mkmaster(intercept="~b bar")
f = tflow.tflow(req=tutils.treq(content=b"foo"))
m.addons.handle_lifecycle("request", f)
assert not m.view[0].intercepted
f = tflow.tflow(req=tutils.treq(content=b"bar"))
m.addons.handle_lifecycle("request", f)
assert m.view[1].intercepted
f = tflow.tflow(resp=tutils.tresp(content=b"bar"))
m.addons.handle_lifecycle("request", f)
assert m.view[2].intercepted
| mit | -7,520,414,474,566,767,000 | 29.258065 | 84 | 0.588486 | false |
arkem/texthole | common.py | 1 | 3695 | import models
import base64
import json
import logging
import os
import time
from google.appengine.ext import db
from google.appengine.api import memcache
def generate_id():
return base64.b64encode(os.urandom(6), '-_')
def error_message(user, data, message = ""):
return json.dumps({'status': False,
'error': message})
def decode_and_validate(data):
if not data:
return None
try:
decoded = json.loads(data)
except:
return None
if 'body' not in decoded and 'delete' not in decoded:
return None
if ('delete' in decoded or 'overwrite' in decoded) and\
'authenticated' not in decoded:
return None
return decoded
def fetch_message(message_id, cache = False):
message = memcache.get(message_id)
if not message:
q = db.GqlQuery('SELECT * FROM Message ' +
'WHERE deleted = False ' +
'AND message_id = :1 ' +
'AND expiry < :2 ' +
'ORDER BY expiry DESC',
message_id, int(time.time()))
message = q.get()
if cache and message:
memcache.add(message_id, message)
if message and message.expiry < int(time.time()):
message = None
memcache.delete(message_id)
return message
def new_message(data, user, message_id = None, ip = None):
if not message_id:
message_id = generate_id()
message = models.Message(message_id=generate_id(), body="<EMPTY>")
message.body = data.get("body", "<EMPTY>")
if len(message.body) > 2**24:
return error_message(data, user, message = "Message too long")
if "authenticated" in data and user:
message.user = user
username = user.email()
else:
username = "None"
expiry = min(int(data.get('expiry', '60000')), 31557600)
message.expiry = int(time.time()+expiry)
if ip:
message.ip_addr = ip
message.put()
memcache.set(message.message_id, message)
output = {'message_id': message.message_id,
'expiry': message.expiry,
'user': username,
'status': True}
return json.dumps(output)
def overwrite_message(data, user, ip):
old_id = data.get('overwrite', None)
old_message = fetch_message(old_id)
if not old_message:
return error_message(data, user, message = "Overwrite error")
if old_message.user.user_id() != user.user_id():
return error_message(data, user, message = "Authentication error")
old_message.deleted = True
old_message.put()
return new_message(data, user, message_id = old_id, ip = ip)
def delete_message(data, user, ip):
old_id = data.get('delete', None)
old_message = fetch_message(old_id)
if not old_message:
return error_message(data, user, message = "Delete error")
if old_message.user.user_id() != user.user_id():
return error_message(data, user, message = "Authentication error")
old_message.deleted = True
old_message.put()
memcache.delete(old_id)
return json.dumps({'status': True, 'message_id': old_id})
def process_command(data, user = None, ip = None):
if 'body' in data and\
'authenticated' in data and\
'overwrite' in data and\
user:
return overwrite_message(data, user, ip=ip)
if 'body' in data and\
'authenticated' in data and\
'delete' in data and\
user:
return overwrite_message(data, user, ip=ip)
if 'body' in data:
return new_message(data, user, ip=ip)
return error_message(data, user, message = "Unknown command")
| bsd-2-clause | -8,369,444,276,560,896,000 | 27.867188 | 74 | 0.598106 | false |
trilan/django-sitesutils | tests/test_context_processors/tests.py | 1 | 1156 | from django.contrib.sites.models import Site, RequestSite
from django.test import TestCase, RequestFactory
from sitesutils.context_processors import site
from sitesutils.middleware import RequestSiteMiddleware
def create_request(host):
request_factory = RequestFactory(HTTP_HOST=host)
return request_factory.get('/')
class SiteContextProcessorTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=1)
self.middleware = RequestSiteMiddleware()
def test_is_lazy(self):
request = create_request('example.com')
self.middleware.process_request(request)
with self.assertNumQueries(0):
context = site(request)
with self.assertNumQueries(1):
context['site'].domain
def test_is_lazy(self):
request = create_request('example.com')
self.middleware.process_request(request)
context = site(request)
self.assertEqual(context['site'], self.site)
def test_returns_request_site(self):
request = create_request('example.com')
context = site(request)
self.assertIsInstance(context['site'], RequestSite)
| isc | 994,248,313,722,007,300 | 31.111111 | 59 | 0.688581 | false |
SUNET/eduid-webapp | src/eduid_webapp/idp/sso_session.py | 1 | 9142 | #
# Copyright (c) 2014 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author : Fredrik Thulin <[email protected]>
#
from __future__ import annotations
import uuid
from dataclasses import asdict, dataclass, field
from datetime import datetime, timezone
from typing import Any, Dict, List, Mapping, NewType, Optional, Type
import bson
from bson import ObjectId
from eduid_common.misc.timeutil import utc_now
from eduid_common.session.logindata import ExternalMfaData
from eduid_userdb.idp import IdPUser, IdPUserDb
from eduid_webapp.idp.idp_authn import AuthnData
# A distinct type for session ids
SSOSessionId = NewType('SSOSessionId', bytes)
@dataclass
class SSOSession:
"""
Single Sign On sessions are used to remember a previous authentication
performed, to avoid re-authenticating users for every Service Provider
they visit.
The references to 'authn' here are strictly about what kind of Authn
the user has performed. The resulting SAML AuthnContext is a product
of this, as well as other policy decisions (such as what ID-proofing
has taken place, what AuthnContext the SP requested and so on).
:param user_id: User id, typically MongoDB _id
:param authn_request_id: SAML request id of request that caused authentication
:param authn_credentials: Data about what credentials were used to authn
:param authn_timestamp: Authentication timestamp, in UTC
# These fields are from the 'outer' scope of the session, and are
# duplicated here for now. Can't be changed here, since they are removed in to_dict.
:param created_ts: When the database document was created
:param eppn: User eduPersonPrincipalName
"""
user_id: bson.ObjectId # move away from this - use the eppn instead
authn_request_id: str
authn_credentials: List[AuthnData]
eppn: str
idp_user: IdPUser = field(repr=False) # extra info - not serialised
_id: Optional[ObjectId] = None
session_id: SSOSessionId = field(default_factory=lambda: create_session_id())
created_ts: datetime = field(default_factory=utc_now)
external_mfa: Optional[ExternalMfaData] = None
authn_timestamp: datetime = field(default_factory=utc_now)
def __str__(self) -> str:
return f'<{self.__class__.__name__}: eppn={self.eppn}, ts={self.authn_timestamp.isoformat()}>'
def to_dict(self) -> Dict[str, Any]:
""" Return the object in dict format (serialized for storing in MongoDB).
For legacy reasons, some of the attributes are stored in an 'inner' scope called 'data':
{
'_id': ObjectId('5fcde44d56cf512b51f1ac4e'),
'session_id': b'ZjYzOTcwNWItYzUyOS00M2U1LWIxODQtODMxYTJhZjQ0YzA1',
'username': 'hubba-bubba',
'data': {
'user_id': ObjectId('5fd09748c07041072b237ae0')
'authn_request_id': 'id-IgHyGTmxBEORfx5NJ',
'authn_credentials': [
{
'cred_id': '5fc8b78cbdaa0bf337490db1',
'authn_ts': datetime.fromisoformat('2020-09-13T12:26:40+00:00'),
}
],
'authn_timestamp': 1600000000,
'external_mfa': None,
},
'created_ts': datetime.fromisoformat('2020-12-07T08:14:05.744+00:00'),
}
"""
res = asdict(self)
res['authn_credentials'] = [x.to_dict() for x in self.authn_credentials]
if self.external_mfa is not None:
res['external_mfa'] = self.external_mfa.to_session_dict()
# Remove extra fields
del res['idp_user']
# Use integer format for this in the database until this code (from_dict() below) has been
# deployed everywhere so we can switch to datetime.
# TODO: Switch over to datetime.
res['authn_timestamp'] = int(self.authn_timestamp.timestamp())
# Store these attributes in an 'inner' scope (called 'data')
_data = {}
for this in ['user_id', 'authn_request_id', 'authn_credentials', 'authn_timestamp', 'external_mfa']:
_data[this] = res.pop(this)
res['data'] = _data
# rename 'eppn' to 'username' in the database, for legacy reasons
res['username'] = res.pop('eppn')
return res
@classmethod
def from_dict(cls: Type[SSOSession], data: Mapping[str, Any], userdb: IdPUserDb) -> SSOSession:
""" Construct element from a data dict in database format. """
_data = dict(data) # to not modify callers data
if 'data' in _data:
# move contents from 'data' to top-level of dict
_data.update(_data.pop('data'))
_data['authn_credentials'] = [AuthnData.from_dict(x) for x in _data['authn_credentials']]
if 'external_mfa' in _data and _data['external_mfa'] is not None:
_data['external_mfa'] = [ExternalMfaData.from_session_dict(x) for x in _data['external_mfa']]
if 'user_id' in _data:
_data['idp_user'] = userdb.lookup_user(_data['user_id'])
if not _data['idp_user']:
raise RuntimeError(f'User with id {repr(_data["user_id"])} not found')
# Compatibility code to convert integer format to datetime format. Keep this until nothing writes
# authn_timestamp as integers, and all the existing sessions have expired.
# TODO: Remove this code when all sessions in the database have datetime authn_timestamps.
if isinstance(_data.get('authn_timestamp'), int):
_data['authn_timestamp'] = datetime.fromtimestamp(_data['authn_timestamp'], tz=timezone.utc)
# rename 'username' to 'eppn'
if 'eppn' not in _data:
_data['eppn'] = _data.pop('username')
return cls(**_data)
@property
def public_id(self) -> str:
"""
Return a identifier for this session that can't be used to hijack sessions
if leaked through a log file etc.
"""
return f'{self.user_id}.{self.authn_timestamp.timestamp()}'
@property
def minutes_old(self) -> int:
""" Return the age of this SSO session, in minutes. """
age = (utc_now() - self.authn_timestamp).total_seconds()
return int(age) // 60
def add_authn_credential(self, authn: AuthnData) -> None:
""" Add information about a credential successfully used in this session. """
if not isinstance(authn, AuthnData):
raise ValueError(f'data should be AuthnData (not {type(authn)})')
# Store only the latest use of a particular credential.
_creds: Dict[str, AuthnData] = {x.cred_id: x for x in self.authn_credentials}
_existing = _creds.get(authn.cred_id)
# TODO: remove this in the future - don't have to set tz when all SSO sessions without such have expired
if _existing and _existing.timestamp.tzinfo is None:
_existing.timestamp = _existing.timestamp.replace(tzinfo=timezone.utc)
# only replace if newer
if not _existing or authn.timestamp > _existing.timestamp:
_creds[authn.cred_id] = authn
# sort on cred_id to have deterministic order in tests
_list = list(_creds.values())
self.authn_credentials = sorted(_list, key=lambda x: x.cred_id)
def create_session_id() -> SSOSessionId:
"""
Create a unique value suitable for use as session identifier.
The uniqueness and inability to guess is security critical!
:return: session_id as bytes (to match what cookie decoding yields)
"""
return SSOSessionId(bytes(str(uuid.uuid4()), 'ascii'))
| bsd-3-clause | 2,594,995,407,846,799,400 | 44.257426 | 112 | 0.662656 | false |
devopshq/crosspm | crosspm/helpers/exceptions.py | 1 | 1087 | # -*- coding: utf-8 -*-
CROSSPM_ERRORCODES = (
CROSSPM_ERRORCODE_SUCCESS,
CROSSPM_ERRORCODE_UNKNOWN_ERROR,
CROSSPM_ERRORCODE_WRONG_ARGS,
CROSSPM_ERRORCODE_FILE_DEPS_NOT_FOUND,
CROSSPM_ERRORCODE_WRONG_SYNTAX,
CROSSPM_ERRORCODE_MULTIPLE_DEPS,
CROSSPM_ERRORCODE_NO_FILES_TO_PACK,
CROSSPM_ERRORCODE_SERVER_CONNECT_ERROR,
CROSSPM_ERRORCODE_PACKAGE_NOT_FOUND,
CROSSPM_ERRORCODE_PACKAGE_BRANCH_NOT_FOUND,
CROSSPM_ERRORCODE_VERSION_PATTERN_NOT_MATCH,
CROSSPM_ERRORCODE_UNKNOWN_OUT_TYPE,
CROSSPM_ERRORCODE_FILE_IO,
CROSSPM_ERRORCODE_CONFIG_NOT_FOUND,
CROSSPM_ERRORCODE_CONFIG_IO_ERROR,
CROSSPM_ERRORCODE_CONFIG_FORMAT_ERROR,
CROSSPM_ERRORCODE_ADAPTER_ERROR,
CROSSPM_ERRORCODE_UNKNOWN_ARCHIVE,
) = range(18)
class CrosspmException(Exception):
def __init__(self, error_code, msg=''):
super().__init__(msg)
self.error_code = error_code
self.msg = msg
class CrosspmExceptionWrongArgs(CrosspmException):
def __init__(self, msg=''):
super().__init__(CROSSPM_ERRORCODE_WRONG_ARGS, msg)
| mit | 1,808,988,960,025,678,600 | 30.970588 | 59 | 0.701012 | false |
gpennington/PyMarvel | marvel/event.py | 1 | 6417 | # -*- coding: utf-8 -*-
__author__ = 'Garrett Pennington'
__date__ = '02/07/14'
from .core import MarvelObject, DataWrapper, DataContainer, Summary, List
class EventDataWrapper(DataWrapper):
@property
def data(self):
return EventDataContainer(self.marvel, self.dict['data'])
class EventDataContainer(DataContainer):
@property
def results(self):
return self.list_to_instance_list(self.dict['results'], Event)
class Event(MarvelObject):
"""
Event object
Takes a dict of character attrs
"""
_resource_url = 'events'
@property
def id(self):
return self.dict['id']
@property
def title(self):
return self.dict['title']
@property
def description(self):
"""
:returns: str -- The preferred description of the comic.
"""
return self.dict['description']
@property
def resourceURI(self):
return self.dict['resourceURI']
@property
def urls(self):
return self.dict['urls']
@property
def modified(self):
return str_to_datetime(self.dict['modified'])
@property
def modified_raw(self):
return self.dict['modified']
@property
def start(self):
return str_to_datetime(self.dict['start'])
@property
def start_raw(self):
return self.dict['start']
@property
def end(self):
return str_to_datetime(self.dict['end'])
@property
def end_raw(self):
return self.dict['end']
@property
def thumbnail(self):
return Image(self.marvel, self.dict['thumbnail'])
@property
def comics(self):
from .comic import ComicList
return ComicList(self.marvel, self.dict['comics'])
@property
def stories(self):
from .story import StoryList
return StoryList(self.marvel, self.dict['stories'])
@property
def series(self):
"""
Returns SeriesList object
"""
from .series import SeriesList
return SeriesList(self.marvel, self.dict['series'])
@property
def characters(self):
from .character import CharacterList
return CharacterList(self.marvel, self.dict['characters'])
@property
def creators(self):
from .creator import CreatorList
return CreatorList(self.marvel, self.dict['creators'])
@property
def series(self):
from .series import SeriesList
return SeriesList(self.marvel, self.dict['series'])
@property
def next(self):
return EventSummary(self.marvel, self.dict['next'])
@property
def previoius(self):
return EventSummary(self.marvel, self.dict['previous'])
def get_creators(self, *args, **kwargs):
"""
Returns a full CreatorDataWrapper object for this event.
/events/{eventId}/creators
:returns: CreatorDataWrapper -- A new request to API. Contains full results set.
"""
from .creator import Creator, CreatorDataWrapper
return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs)
def get_characters(self, *args, **kwargs):
"""
Returns a full CharacterDataWrapper object for this event.
/events/{eventId}/characters
:returns: CreatorDataWrapper -- A new request to API. Contains full results set.
"""
from .character import Character, CharacterDataWrapper
return self.get_related_resource(Character, CharacterDataWrapper, args, kwargs)
def get_comics(self, *args, **kwargs):
"""
Returns a full ComicDataWrapper object for this event.
/events/{eventId}/comics
:returns: ComicDataWrapper -- A new request to API. Contains full results set.
"""
from .comic import Comic, ComicDataWrapper
return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
def get_series(self, *args, **kwargs):
"""
Returns a full SeriesDataWrapper object for this event.
/events/{eventId}/series
:returns: SeriesDataWrapper -- A new request to API. Contains full results set.
"""
from .series import Series, SeriesDataWrapper
return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)
def get_stories(self, *args, **kwargs):
"""
Returns a full StoryDataWrapper object for this event.
/events/{eventId}/stories
:returns: StoriesDataWrapper -- A new request to API. Contains full results set.
"""
from .story import Story, StoryDataWrapper
return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)
"""
Event {
id (int, optional): The unique ID of the event resource.,
title (string, optional): The title of the event.,
description (string, optional): A description of the event.,
resourceURI (string, optional): The canonical URL identifier for this resource.,
urls (Array[Url], optional): A set of public web site URLs for the event.,
modified (Date, optional): The date the resource was most recently modified.,
start (Date, optional): The date of publication of the first issue in this event.,
end (Date, optional): The date of publication of the last issue in this event.,
thumbnail (Image, optional): The representative image for this event.,
comics (ComicList, optional): A resource list containing the comics in this event.,
stories (StoryList, optional): A resource list containing the stories in this event.,
series (SeriesList, optional): A resource list containing the series in this event.,
characters (CharacterList, optional): A resource list containing the characters which appear in this event.,
creators (CreatorList, optional): A resource list containing creators whose work appears in this event.,
next (EventSummary, optional): A summary representation of the event which follows this event.,
previous (EventSummary, optional): A summary representation of the event which preceded this event.
}
"""
class EventList(List):
"""
EventList object
"""
@property
def items(self):
"""
Returns List of EventSummary objects
"""
return self.list_to_instance_list(self.dict['items'], EventSummary)
class EventSummary(Summary):
"""
EventSummary object
""" | mit | -519,258,431,733,276,000 | 29.273585 | 112 | 0.651239 | false |
thumbor-community/redis-tornado | tc_redis_tornado/storages/redis_tornado_storage.py | 1 | 2895 | # -*- coding: utf-8 -*-
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2015 Thumbor Community
import tornadis
import tornado
from thumbor.storages import BaseStorage
class Storage(BaseStorage):
pool = None
def __init__(self, context):
'''Initialize the RedisStorage
:param thumbor.context.Context shared_client: Current context
:param boolean shared_client: When set to True a singleton client will
be used.
'''
BaseStorage.__init__(self, context)
if not Storage.pool:
Storage.pool = tornadis.ClientPool(
max_size=self.context.config.get(
'REDIS_TORNADO_STORAGE_POOL_MAX_SIZE',
-1
),
client_timeout=self.context.config.get(
'REDIS_TORNADO_STORAGE_CLIENT_TIMEOUT',
-1
),
port=self.context.config.REDIS_STORAGE_SERVER_PORT,
host=self.context.config.REDIS_STORAGE_SERVER_HOST,
# db=self.context.config.REDIS_STORAGE_SERVER_DB,
password=self.context.config.REDIS_STORAGE_SERVER_PASSWORD
)
@tornado.gen.coroutine
def put(self, path, bytes):
ttl = self.context.config.STORAGE_EXPIRATION_SECONDS
with (yield Storage.pool.connected_client()) as client:
if ttl:
yield client.call('SETEX', path, ttl, bytes)
else:
yield client.call('SET', path, bytes)
@tornado.gen.coroutine
def exists(self, path):
with (yield Storage.pool.connected_client()) as client:
exists = yield client.call('EXISTS', path)
raise tornado.gen.Return(exists)
@tornado.gen.coroutine
def remove(self, path):
with (yield Storage.pool.connected_client()) as client:
yield client.call('DEL', path)
@tornado.gen.coroutine
def get(self, path):
with (yield Storage.pool.connected_client()) as client:
buffer = yield client.call('GET', path)
raise tornado.gen.Return(buffer)
@tornado.gen.coroutine
def put_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return
if not self.context.server.security_key:
raise RuntimeError(
"STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no "
"SECURITY_KEY specified"
)
key = self.__key_for(path)
with (yield Storage.pool.connected_client()) as client:
yield client.call('SET', key, self.context.server.security_key)
def __key_for(self, url):
return 'thumbor-crypto-%s' % url
def __detector_key_for(self, url):
return 'thumbor-detector-%s' % url
| mit | 5,562,602,277,216,316,000 | 32.275862 | 78 | 0.584111 | false |
rock-planning/planning-lama | lama/translate/invariant_finder.py | 1 | 5900 | #! /usr/bin/env python
# -*- coding: latin-1 -*-
from collections import deque, defaultdict
import itertools
import time
import invariants
import pddl
import timers
class BalanceChecker(object):
def __init__(self, task, reachable_action_params):
self.predicates_to_add_actions = defaultdict(set)
self.action_name_to_heavy_action = {}
for act in task.actions:
action = self.add_inequality_preconds(act, reachable_action_params)
too_heavy_effects = []
create_heavy_act = False
heavy_act = action
for eff in action.effects:
too_heavy_effects.append(eff)
if eff.parameters: # universal effect
create_heavy_act = True
too_heavy_effects.append(eff.copy())
if not eff.literal.negated:
predicate = eff.literal.predicate
self.predicates_to_add_actions[predicate].add(action)
if create_heavy_act:
heavy_act = pddl.Action(action.name, action.parameters,
action.precondition, too_heavy_effects,
action.cost)
# heavy_act: duplicated universal effects and assigned unique names
# to all quantified variables (implicitly in constructor)
self.action_name_to_heavy_action[action.name] = heavy_act
def get_threats(self, predicate):
return self.predicates_to_add_actions.get(predicate, set())
def get_heavy_action(self, action_name):
return self.action_name_to_heavy_action[action_name]
def add_inequality_preconds(self, action, reachable_action_params):
if reachable_action_params is None or len(action.parameters) < 2:
return action
inequal_params = []
combs = itertools.combinations(list(range(len(action.parameters))), 2)
for pos1, pos2 in combs:
inequality = True
for params in reachable_action_params[action.name]:
if params[pos1] == params[pos2]:
inequality = False
break
if inequality:
inequal_params.append((pos1, pos2))
if inequal_params:
precond_parts = list(action.precondition.parts)
for pos1, pos2 in inequal_params:
param1 = action.parameters[pos1].name
param2 = action.parameters[pos2].name
new_cond = pddl.NegatedAtom("=", (param1, param2))
precond_parts.append(new_cond)
precond = action.precondition.change_parts(precond_parts)
return pddl.Action(action.name, action.parameters, precond,
action.effects, action.cost)
else:
return action
def get_fluents(task):
fluent_names = set()
for action in task.actions:
for eff in action.effects:
fluent_names.add(eff.literal.predicate)
return [pred for pred in task.predicates if pred.name in fluent_names]
def get_initial_invariants(task):
for predicate in get_fluents(task):
all_args = list(range(len(predicate.arguments)))
for omitted_arg in [-1] + all_args:
order = [i for i in all_args if i != omitted_arg]
part = invariants.InvariantPart(predicate.name, order, omitted_arg)
yield invariants.Invariant((part,))
# Input file might be grounded, beware of too many invariant candidates
MAX_CANDIDATES = 100000
MAX_TIME = 300
def find_invariants(task, reachable_action_params):
candidates = deque(get_initial_invariants(task))
print(len(candidates), "initial candidates")
seen_candidates = set(candidates)
balance_checker = BalanceChecker(task, reachable_action_params)
def enqueue_func(invariant):
if len(seen_candidates) < MAX_CANDIDATES and invariant not in seen_candidates:
candidates.append(invariant)
seen_candidates.add(invariant)
start_time = time.clock()
while candidates:
candidate = candidates.popleft()
if time.clock() - start_time > MAX_TIME:
print("Time limit reached, aborting invariant generation")
return
if candidate.check_balance(balance_checker, enqueue_func):
yield candidate
def useful_groups(invariants, initial_facts):
predicate_to_invariants = defaultdict(list)
for invariant in invariants:
for predicate in invariant.predicates:
predicate_to_invariants[predicate].append(invariant)
nonempty_groups = set()
overcrowded_groups = set()
for atom in initial_facts:
if isinstance(atom, pddl.Assign):
continue
for invariant in predicate_to_invariants.get(atom.predicate, ()):
group_key = (invariant, tuple(invariant.get_parameters(atom)))
if group_key not in nonempty_groups:
nonempty_groups.add(group_key)
else:
overcrowded_groups.add(group_key)
useful_groups = nonempty_groups - overcrowded_groups
for (invariant, parameters) in useful_groups:
yield [part.instantiate(parameters) for part in invariant.parts]
def get_groups(task, reachable_action_params=None):
with timers.timing("Finding invariants"):
invariants = list(find_invariants(task, reachable_action_params))
with timers.timing("Checking invariant weight"):
result = list(useful_groups(invariants, task.init))
return result
if __name__ == "__main__":
import pddl
print("Parsing...")
task = pddl.open()
print("Finding invariants...")
for invariant in find_invariants(task):
print(invariant)
print("Finding fact groups...")
groups = get_groups(task)
for group in groups:
print("[%s]" % ", ".join(map(str, group)))
| gpl-3.0 | 1,017,633,145,280,532,500 | 38.597315 | 86 | 0.622542 | false |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/io/fits/util.py | 1 | 31000 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
from __future__ import division
import gzip as _system_gzip
import itertools
import io
import mmap
import os
import platform
import signal
import string
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from distutils.version import LooseVersion
import numpy as np
try:
from StringIO import StringIO
except ImportError:
# Use for isinstance test only
class StringIO(object):
pass
from ...extern import six
from ...extern.six import (string_types, integer_types, text_type,
binary_type, next)
from ...extern.six.moves import zip
from ...utils import wraps
from ...utils.compat import ignored
from ...utils.compat import gzip as _astropy_gzip
from ...utils.exceptions import AstropyUserWarning
_GZIP_FILE_TYPES = (_astropy_gzip.GzipFile, _system_gzip.GzipFile)
if six.PY3:
cmp = lambda a, b: (a > b) - (a < b)
elif six.PY2:
cmp = cmp
class NotifierMixin(object):
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener(object):
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with ignored(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = '_update_{0}'.format(notification)
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super(NotifierMixin, self).__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state['_listeners'] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
itersubclasses(cls)
Generator over all subclasses of a given class, in depth first order.
>>> class A(object): pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL (new-style) classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if not isinstance(cls, type):
raise TypeError('itersubclasses must be called with '
'new-style classes, not %.100r' % cls)
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=lambda s: s.__name__):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.currentThread()
single_thread = (threading.activeCount() == 1 and
curr_thread.getName() == 'MainThread')
class SigintHandler(object):
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn('KeyboardInterrupt ignored until %s is '
'complete!' % func.__name__, AstropyUserWarning)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def first(iterable):
"""Returns the first element from an iterable."""
return next(iter(iterable))
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
"""
In Python 2 this is a no-op. Strings are left alone. In Python 3 this
will be replaced with a function that actually encodes unicode strings to
ASCII bytes.
"""
return s
def decode_ascii(s):
"""
In Python 2 this is a no-op. Strings are left alone. In Python 3 this
will be replaced with a function that actually decodes ascii bytes to
unicode.
"""
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if six.PY3 and hasattr(f, 'readable'):
return f.readable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'read'):
return False
if hasattr(f, 'mode') and not any((c in f.mode for c in 'r+')):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if six.PY3 and hasattr(f, 'writable'):
return f.writable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'write'):
return False
if hasattr(f, 'mode') and not any((c in f.mode for c in 'wa+')):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
if six.PY3:
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, 'buffer'):
return isfile(f.buffer)
elif hasattr(f, 'raw'):
return isfile(f.raw)
return False
elif six.PY2:
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
return isinstance(f, file)
if six.PY3:
def fileobj_open(filename, mode):
"""
A wrapper around the `open()` builtin.
This exists because in Python 3, `open()` returns an
`io.BufferedReader` by default. This is bad, because
`io.BufferedReader` doesn't support random access, which we need in
some cases. In the Python 3 case (implemented in the py3compat module)
we must call open with buffering=0 to get a raw random-access file
reader.
"""
return open(filename, mode, buffering=0)
elif six.PY2:
def fileobj_open(filename, mode):
"""
A wrapper around the `open()` builtin.
This exists because in Python 3, `open()` returns an
`io.BufferedReader` by default. This is bad, because
`io.BufferedReader` doesn't support random access, which we need in
some cases. In the Python 3 case (implemented in the py3compat module)
we must call open with buffering=0 to get a raw random-access file
reader.
"""
return open(filename, mode)
def fileobj_name(f):
"""
Returns the 'name' of file-like object f, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, string_types):
return f
elif isinstance(f, _GZIP_FILE_TYPES):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, 'name'):
return f.name
elif hasattr(f, 'filename'):
return f.filename
elif hasattr(f, '__class__'):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if f is not a
file-like object.
"""
if hasattr(f, 'closed'):
return f.closed
elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'):
return f.fileobj.closed
elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'):
fileobj = f.fileobj
elif hasattr(f, 'fileobj_mode'):
# Specifically for astropy.io.fits.file._File objects
return f.fileobj_mode
elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'):
fileobj = f.fp
elif hasattr(f, 'mode'):
fileobj = f
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
# I've noticed that sometimes Python can produce modes like 'r+b' which I
# would consider kind of a bug--mode strings should be normalized. Let's
# normalize it for them:
mode = f.mode
if isinstance(f, _GZIP_FILE_TYPES):
# GzipFiles can be either readonly or writeonly
if mode == _system_gzip.READ:
return 'rb'
elif mode == _system_gzip.WRITE:
return 'wb'
else:
# This shouldn't happen?
return None
if '+' in mode:
mode = mode.replace('+', '')
mode += '+'
if _fileobj_is_append_mode(f) and 'a' not in mode:
mode = mode.replace('r', 'a').replace('w', 'a')
return mode
def _fileobj_is_append_mode(f):
"""Normally the way to tell if a file is in append mode is if it has
'a' in the mode string. However on Python 3 (or in particular with
the io module) this can't be relied on. See
http://bugs.python.org/issue18876.
"""
if 'a' in f.mode:
# Take care of the obvious case first
return True
# We might have an io.FileIO in which case the only way to know for sure
# if the file is in append mode is to ask the file descriptor
if not hasattr(f, 'fileno'):
# Who knows what this is?
return False
# Call platform-specific _is_append_mode
# If this file is already closed this can result in an error
try:
return _is_append_mode_platform(f.fileno())
except (ValueError, IOError):
return False
if sys.platform.startswith('win32'):
# This global variable is used in _is_append_mode to cache the computed
# size of the ioinfo struct from msvcrt which may have a different size
# depending on the version of the library and how it was compiled
_sizeof_ioinfo = None
def _make_is_append_mode():
# We build the platform-specific _is_append_mode function for Windows
# inside a function factory in order to avoid cluttering the local
# namespace with ctypes stuff
from ctypes import (cdll, c_size_t, c_void_p, c_int, c_char,
Structure, POINTER, cast)
try:
from ctypes.util import find_msvcrt
except ImportError:
# find_msvcrt is not available on Python 2.5 so we have to provide
# it ourselves anyways
from distutils.msvccompiler import get_build_version
def find_msvcrt():
version = get_build_version()
if version is None:
# better be safe than sorry
return None
if version <= 6:
clibname = 'msvcrt'
else:
clibname = 'msvcr%d' % (version * 10)
# If python was built with in debug mode
import imp
if imp.get_suffixes()[0][0] == '_d.pyd':
clibname += 'd'
return clibname+'.dll'
def _dummy_is_append_mode(fd):
warnings.warn(
'Could not find appropriate MS Visual C Runtime '
'library or library is corrupt/misconfigured; cannot '
'determine whether your file object was opened in append '
'mode. Please consider using a file object opened in write '
'mode instead.')
return False
msvcrt_dll = find_msvcrt()
if msvcrt_dll is None:
# If for some reason the C runtime can't be located then we're dead
# in the water. Just return a dummy function
return _dummy_is_append_mode
msvcrt = cdll.LoadLibrary(msvcrt_dll)
# Constants
IOINFO_L2E = 5
IOINFO_ARRAY_ELTS = 1 << IOINFO_L2E
IOINFO_ARRAYS = 64
FAPPEND = 0x20
_NO_CONSOLE_FILENO = -2
# Types
intptr_t = POINTER(c_int)
class my_ioinfo(Structure):
_fields_ = [('osfhnd', intptr_t),
('osfile', c_char)]
# Functions
_msize = msvcrt._msize
_msize.argtypes = (c_void_p,)
_msize.restype = c_size_t
# Variables
# Since we don't know how large the ioinfo struct is just treat the
# __pioinfo array as an array of byte pointers
__pioinfo = cast(msvcrt.__pioinfo, POINTER(POINTER(c_char)))
# Determine size of the ioinfo struct; see the comment above where
# _sizeof_ioinfo = None is set
global _sizeof_ioinfo
if __pioinfo[0] is not None:
_sizeof_ioinfo = _msize(__pioinfo[0]) // IOINFO_ARRAY_ELTS
if not _sizeof_ioinfo:
# This shouldn't happen, but I suppose it could if one is using a
# broken msvcrt, or just happened to have a dll of the same name
# lying around.
return _dummy_is_append_mode
def _is_append_mode(fd):
global _sizeof_ioinfo
if fd != _NO_CONSOLE_FILENO:
idx1 = fd >> IOINFO_L2E # The index into the __pioinfo array
# The n-th ioinfo pointer in __pioinfo[idx1]
idx2 = fd & ((1 << IOINFO_L2E) - 1)
if 0 <= idx1 < IOINFO_ARRAYS and __pioinfo[idx1] is not None:
# Doing pointer arithmetic in ctypes is irritating
pio = c_void_p(cast(__pioinfo[idx1], c_void_p).value +
idx2 * _sizeof_ioinfo)
ioinfo = cast(pio, POINTER(my_ioinfo)).contents
return bool(ord(ioinfo.osfile) & FAPPEND)
return False
return _is_append_mode
_is_append_mode_platform = _make_is_append_mode()
del _make_is_append_mode
else:
import fcntl
def _is_append_mode_platform(fd):
return bool(fcntl.fcntl(fd, fcntl.F_GETFL) & os.O_APPEND)
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, 'binary'):
return f.binary
if io is not None and isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return 'b' in mode
else:
return True
if six.PY3:
maketrans = str.maketrans
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
elif six.PY2:
maketrans = string.maketrans
def translate(s, table, deletechars):
"""
This is a version of string/unicode.translate() that can handle string
or unicode strings the same way using a translation table made with
`string.maketrans`.
"""
if isinstance(s, str):
return s.translate(table, deletechars)
elif isinstance(s, text_type):
table = dict((x, ord(table[x])) for x in range(256)
if ord(table[x]) != x)
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, *args, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split('\n\n')
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, *args, **kwargs)
return '\n\n'.join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count, sep):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if (sys.platform == 'darwin' and
LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count, sep=sep)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg, sep=sep)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count, sep=sep)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
return np.fromstring(s, dtype=dtype, count=count, sep=sep)
_OSX_WRITE_LIMIT = (2 ** 32) - 1
_WIN_WRITE_LIMIT = (2 ** 31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : `~numpy.ndarray`
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
if isfile(outfile):
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and
arr.nbytes % 4096 == 0):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith('win'):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx:idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface, which
# unfortunately there's no simple way to check)
fileobj.write(arr.data)
elif hasattr(np, 'nditer'):
# nditer version for non-contiguous arrays
for item in np.nditer(arr):
fileobj.write(item.tostring())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if ((sys.byteorder == 'little' and byteorder == '>')
or (sys.byteorder == 'big' and byteorder == '<')):
for item in arr.flat:
fileobj.write(item.byteswap().tostring())
else:
for item in arr.flat:
fileobj.write(item.tostring())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, text_type):
s = encode_ascii(s)
elif not binmode and not isinstance(f, text_type):
s = decode_ascii(s)
elif isinstance(f, StringIO) and isinstance(s, np.ndarray):
# Workaround for StringIO/ndarray incompatibility
s = s.data
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif (array.dtype.itemsize == dtype.itemsize and not
(np.issubdtype(array.dtype, np.number) and
np.issubdtype(dtype, np.number))):
# Includes a special case when both dtypes are at least numeric to
# account for ticket #218: https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218
return array.view(dtype)
else:
return array.astype(dtype)
def _unsigned_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
assert dtype.kind == 'u'
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_unsigned(dtype):
return dtype.kind == 'u' and dtype.itemsize >= 2
def _is_int(val):
return isinstance(val, integer_types + (np.integer,))
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(input, strlen):
"""
Split a long string into parts where each part is no longer
than `strlen` and no word is cut into two pieces. But if
there is one single word which is longer than `strlen`, then
it will be split in the middle of the word.
"""
words = []
nblanks = input.count(' ')
nmax = max(nblanks, len(input) // strlen + 1)
arr = np.fromstring((input + ' '), dtype=(binary_type, 1))
# locations of the blanks
blank_loc = np.nonzero(arr == b' ')[0]
offset = 0
xoffset = 0
for idx in range(nmax):
try:
loc = np.nonzero(blank_loc >= strlen + offset)[0][0]
offset = blank_loc[loc - 1] + 1
if loc == 0:
offset = -1
except:
offset = len(input)
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = xoffset + strlen
# collect the pieces in a list
words.append(input[xoffset:offset])
if len(input) == offset:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, 'base') and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
| mit | 5,024,605,963,164,226,000 | 30.600408 | 95 | 0.601613 | false |
pattisdr/osf.io | api/base/urls.py | 1 | 4679 | from django.conf.urls import include, url
from django.views.generic.base import RedirectView
from . import views
from . import settings
from . import versioning
default_version = versioning.decimal_version_to_url_path(settings.REST_FRAMEWORK['DEFAULT_VERSION'])
# Please keep URLs alphabetized for auto-generated documentation
urlpatterns = [
url(
r'^_/',
include(
[
url(r'^', include('waffle.urls')),
url(r'^wb/', include('api.wb.urls', namespace='wb')),
url(r'^banners/', include('api.banners.urls', namespace='banners')),
url(r'^crossref/', include('api.crossref.urls', namespace='crossref')),
url(r'^chronos/', include('api.chronos.urls', namespace='chronos')),
url(r'^meetings/', include('api.meetings.urls', namespace='meetings')),
url(r'^metrics/', include('api.metrics.urls', namespace='metrics')),
],
),
),
url(
'^(?P<version>(v2))/',
include(
[
url(r'^$', views.root, name='root'),
url(r'^status/', views.status_check, name='status_check'),
url(r'^actions/', include('api.actions.urls', namespace='actions')),
url(r'^addons/', include('api.addons.urls', namespace='addons')),
url(r'^applications/', include('api.applications.urls', namespace='applications')),
url(r'^citations/', include('api.citations.urls', namespace='citations')),
url(r'^collections/', include('api.collections.urls', namespace='collections')),
url(r'^comments/', include('api.comments.urls', namespace='comments')),
url(r'^docs/', RedirectView.as_view(pattern_name=views.root), name='redirect-to-root', kwargs={'version': default_version}),
url(r'^files/', include('api.files.urls', namespace='files')),
url(r'^guids/', include('api.guids.urls', namespace='guids')),
url(r'^identifiers/', include('api.identifiers.urls', namespace='identifiers')),
url(r'^institutions/', include('api.institutions.urls', namespace='institutions')),
url(r'^licenses/', include('api.licenses.urls', namespace='licenses')),
url(r'^logs/', include('api.logs.urls', namespace='logs')),
url(r'^metaschemas/', include('api.metaschemas.urls', namespace='metaschemas')),
url(r'^schemas/', include('api.schemas.urls', namespace='schemas')),
url(r'^nodes/', include('api.nodes.urls', namespace='nodes')),
url(r'^preprints/', include('api.preprints.urls', namespace='preprints')),
url(r'^preprint_providers/', include('api.preprint_providers.urls', namespace='preprint_providers')),
url(r'^regions/', include('api.regions.urls', namespace='regions')),
url(r'^providers/', include('api.providers.urls', namespace='providers')),
url(r'^registrations/', include('api.registrations.urls', namespace='registrations')),
url(r'^requests/', include('api.requests.urls', namespace='requests')),
url(r'^scopes/', include('api.scopes.urls', namespace='scopes')),
url(r'^search/', include('api.search.urls', namespace='search')),
url(r'^subscriptions/', include('api.subscriptions.urls', namespace='subscriptions')),
url(r'^taxonomies/', include('api.taxonomies.urls', namespace='taxonomies')),
url(r'^test/', include('api.test.urls', namespace='test')),
url(r'^tokens/', include('api.tokens.urls', namespace='tokens')),
url(r'^users/', include('api.users.urls', namespace='users')),
url(r'^view_only_links/', include('api.view_only_links.urls', namespace='view-only-links')),
url(r'^_waffle/', include('api.waffle.urls', namespace='waffle')),
url(r'^wikis/', include('api.wikis.urls', namespace='wikis')),
url(r'^alerts/', include('api.alerts.urls', namespace='alerts')),
],
),
),
url(r'^$', RedirectView.as_view(pattern_name=views.root), name='redirect-to-root', kwargs={'version': default_version}),
]
# Add django-silk URLs if it's in INSTALLED_APPS
if 'silk' in settings.INSTALLED_APPS:
urlpatterns += [
url(r'^silk/', include('silk.urls', namespace='silk')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
handler404 = views.error_404
| apache-2.0 | -1,306,424,025,450,056,000 | 53.406977 | 140 | 0.580893 | false |
mgit-at/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_scaleset.py | 1 | 38539 | #!/usr/bin/python
#
# Copyright (c) 2016 Sertac Ozercan, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine_scaleset
version_added: "2.4"
short_description: Manage Azure virtual machine scale sets.
description:
- Create and update a virtual machine scale set.
options:
resource_group:
description:
- Name of the resource group containing the virtual machine scale set.
required: true
name:
description:
- Name of the virtual machine.
required: true
state:
description:
- Assert the state of the virtual machine scale set.
- State 'present' will check that the machine exists with the requested configuration. If the configuration
of the existing machine does not match, the machine will be updated.
state.
- State 'absent' will remove the virtual machine scale set.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to location of the resource group.
short_hostname:
description:
- Short host name
vm_size:
description:
- A valid Azure VM size value. For example, 'Standard_D4'. The list of choices varies depending on the
subscription and location. Check your subscription for available choices.
required: true
capacity:
description:
- Capacity of VMSS.
required: true
default: 1
tier:
description:
- SKU Tier.
choices:
- Basic
- Standard
upgrade_policy:
description:
- Upgrade policy.
choices:
- Manual
- Automatic
admin_username:
description:
- Admin username used to access the host after it is created. Required when creating a VM.
admin_password:
description:
- Password for the admin username. Not required if the os_type is Linux and SSH password authentication
is disabled by setting ssh_password_enabled to false.
ssh_password_enabled:
description:
- When the os_type is Linux, setting ssh_password_enabled to false will disable SSH password authentication
and require use of SSH keys.
type: bool
default: true
ssh_public_keys:
description:
- "For os_type Linux provide a list of SSH keys. Each item in the list should be a dictionary where the
dictionary contains two keys: path and key_data. Set the path to the default location of the
authorized_keys files. On an Enterprise Linux host, for example, the path will be
/home/<admin username>/.ssh/authorized_keys. Set key_data to the actual value of the public key."
image:
description:
- Specifies the image used to build the VM.
- If a string, the image is sourced from a custom image based on the
name.
- 'If a dict with the keys C(publisher), C(offer), C(sku), and
C(version), the image is sourced from a Marketplace image. NOTE:
set image.version to C(latest) to get the most recent version of a
given image.'
- 'If a dict with the keys C(name) and C(resource_group), the image
is sourced from a custom image based on the C(name) and
C(resource_group) set. NOTE: the key C(resource_group) is optional
and if omitted, all images in the subscription will be searched for
by C(name).'
- Custom image support was added in Ansible 2.5
required: true
os_disk_caching:
description:
- Type of OS disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
aliases:
- disk_caching
os_type:
description:
- Base type of operating system.
choices:
- Windows
- Linux
default: Linux
managed_disk_type:
description:
- Managed disk type.
choices:
- Standard_LRS
- Premium_LRS
data_disks:
description:
- Describes list of data disks.
version_added: "2.4"
suboptions:
lun:
description:
- The logical unit number for data disk.
default: 0
version_added: "2.4"
disk_size_gb:
description:
- The initial disk size in GB for blank data disks.
version_added: "2.4"
managed_disk_type:
description:
- Managed data disk type.
choices:
- Standard_LRS
- Premium_LRS
version_added: "2.4"
caching:
description:
- Type of data disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
version_added: "2.4"
virtual_network_resource_group:
description:
- When creating a virtual machine, if a specific virtual network from another resource group should be
used, use this parameter to specify the resource group to use.
version_added: "2.5"
virtual_network_name:
description:
- Virtual Network name.
aliases:
- virtual_network
subnet_name:
description:
- Subnet name.
aliases:
- subnet
load_balancer:
description:
- Load balancer name.
version_added: "2.5"
remove_on_absent:
description:
- When removing a VM using state 'absent', also remove associated resources.
- "It can be 'all' or a list with any of the following: ['network_interfaces', 'virtual_storage', 'public_ips']."
- Any other input will be ignored.
default: ['all']
enable_accelerated_networking:
description:
- Indicates whether user wants to allow accelerated networking for virtual machines in scaleset being created.
version_added: "2.7"
type: bool
security_group:
description:
- Existing security group with which to associate the subnet.
- It can be the security group name which is in the same resource group.
- It can be the resource Id.
- It can be a dict which contains C(name) and C(resource_group) of the security group.
version_added: "2.7"
aliases:
- security_group_name
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Sertac Ozercan (@sozercan)"
'''
EXAMPLES = '''
- name: Create VMSS
azure_rm_virtualmachine_scaleset:
resource_group: Testing
name: testvmss
vm_size: Standard_DS1_v2
capacity: 2
virtual_network_name: testvnet
subnet_name: testsubnet
admin_username: adminUser
ssh_password_enabled: false
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
managed_disk_type: Standard_LRS
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
data_disks:
- lun: 0
disk_size_gb: 64
caching: ReadWrite
managed_disk_type: Standard_LRS
- name: Create a VMSS with a custom image
azure_rm_virtualmachine_scaleset:
resource_group: Testing
name: testvmss
vm_size: Standard_DS1_v2
capacity: 2
virtual_network_name: testvnet
subnet_name: testsubnet
admin_username: adminUser
admin_password: password01
managed_disk_type: Standard_LRS
image: customimage001
- name: Create a VMSS with a custom image from a particular resource group
azure_rm_virtualmachine_scaleset:
resource_group: Testing
name: testvmss
vm_size: Standard_DS1_v2
capacity: 2
virtual_network_name: testvnet
subnet_name: testsubnet
admin_username: adminUser
admin_password: password01
managed_disk_type: Standard_LRS
image:
name: customimage001
resource_group: Testing
'''
RETURN = '''
azure_vmss:
description: Facts about the current state of the object. Note that facts are not part of the registered output but available directly.
returned: always
type: complex
contains: {
"properties": {
"overprovision": true,
"singlePlacementGroup": true,
"upgradePolicy": {
"mode": "Manual"
},
"virtualMachineProfile": {
"networkProfile": {
"networkInterfaceConfigurations": [
{
"name": "testvmss",
"properties": {
"dnsSettings": {
"dnsServers": []
},
"enableAcceleratedNetworking": false,
"ipConfigurations": [
{
"name": "default",
"properties": {
"privateIPAddressVersion": "IPv4",
"subnet": {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/virtualNetworks/testvnet/subnets/testsubnet"
}
}
}
],
"primary": true
}
}
]
},
"osProfile": {
"adminUsername": "testuser",
"computerNamePrefix": "testvmss",
"linuxConfiguration": {
"disablePasswordAuthentication": true,
"ssh": {
"publicKeys": [
{
"keyData": "",
"path": "/home/testuser/.ssh/authorized_keys"
}
]
}
},
"secrets": []
},
"storageProfile": {
"dataDisks": [
{
"caching": "ReadWrite",
"createOption": "empty",
"diskSizeGB": 64,
"lun": 0,
"managedDisk": {
"storageAccountType": "Standard_LRS"
}
}
],
"imageReference": {
"offer": "CoreOS",
"publisher": "CoreOS",
"sku": "Stable",
"version": "899.17.0"
},
"osDisk": {
"caching": "ReadWrite",
"createOption": "fromImage",
"managedDisk": {
"storageAccountType": "Standard_LRS"
}
}
}
}
},
"sku": {
"capacity": 2,
"name": "Standard_DS1_v2",
"tier": "Standard"
},
"tags": null,
"type": "Microsoft.Compute/virtualMachineScaleSets"
}
''' # NOQA
import random
import re
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict, format_resource_id
AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet'
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
class AzureRMVirtualMachineScaleSet(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present', type='str'),
location=dict(type='str'),
short_hostname=dict(type='str'),
vm_size=dict(type='str', required=True),
tier=dict(type='str', choices=['Basic', 'Standard']),
capacity=dict(type='int', default=1),
upgrade_policy=dict(type='str', choices=['Automatic', 'Manual']),
admin_username=dict(type='str'),
admin_password=dict(type='str', no_log=True),
ssh_password_enabled=dict(type='bool', default=True),
ssh_public_keys=dict(type='list'),
image=dict(type='raw'),
os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite'],
default='ReadOnly'),
os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'Premium_LRS']),
data_disks=dict(type='list'),
subnet_name=dict(type='str', aliases=['subnet']),
load_balancer=dict(type='str'),
virtual_network_resource_group=dict(type='str'),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
remove_on_absent=dict(type='list', default=['all']),
enable_accelerated_networking=dict(type='bool'),
security_group=dict(type='raw', aliases=['security_group_name'])
)
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.short_hostname = None
self.vm_size = None
self.capacity = None
self.tier = None
self.upgrade_policy = None
self.admin_username = None
self.admin_password = None
self.ssh_password_enabled = None
self.ssh_public_keys = None
self.image = None
self.os_disk_caching = None
self.managed_disk_type = None
self.data_disks = None
self.os_type = None
self.subnet_name = None
self.virtual_network_resource_group = None
self.virtual_network_name = None
self.tags = None
self.differences = None
self.load_balancer = None
self.enable_accelerated_networking = None
self.security_group = None
self.results = dict(
changed=False,
actions=[],
ansible_facts=dict(azure_vmss=None)
)
super(AzureRMVirtualMachineScaleSet, self).__init__(
derived_arg_spec=self.module_arg_spec,
supports_check_mode=True
)
def exec_module(self, **kwargs):
nsg = None
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
# make sure options are lower case
self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent])
# default virtual_network_resource_group to resource_group
if not self.virtual_network_resource_group:
self.virtual_network_resource_group = self.resource_group
changed = False
results = dict()
vmss = None
disable_ssh_password = None
vmss_dict = None
virtual_network = None
subnet = None
image_reference = None
custom_image = False
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.state == 'present':
# Verify parameters and resolve any defaults
if self.vm_size and not self.vm_size_is_valid():
self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format(
self.vm_size
))
# if self.virtual_network_name:
# virtual_network = self.get_virtual_network(self.virtual_network_name)
if self.ssh_public_keys:
msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \
"each dict contains keys: path, key_data."
for key in self.ssh_public_keys:
if not isinstance(key, dict):
self.fail(msg)
if not key.get('path') or not key.get('key_data'):
self.fail(msg)
if self.image and isinstance(self.image, dict):
if all(key in self.image for key in ('publisher', 'offer', 'sku', 'version')):
marketplace_image = self.get_marketplace_image_version()
if self.image['version'] == 'latest':
self.image['version'] = marketplace_image.name
self.log("Using image version {0}".format(self.image['version']))
image_reference = self.compute_models.ImageReference(
publisher=self.image['publisher'],
offer=self.image['offer'],
sku=self.image['sku'],
version=self.image['version']
)
elif self.image.get('name'):
custom_image = True
image_reference = self.get_custom_image_reference(
self.image.get('name'),
self.image.get('resource_group'))
else:
self.fail("parameter error: expecting image to contain [publisher, offer, sku, version] or [name, resource_group]")
elif self.image and isinstance(self.image, str):
custom_image = True
image_reference = self.get_custom_image_reference(self.image)
elif self.image:
self.fail("parameter error: expecting image to be a string or dict not {0}".format(type(self.image).__name__))
disable_ssh_password = not self.ssh_password_enabled
try:
self.log("Fetching virtual machine scale set {0}".format(self.name))
vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
self.check_provisioning_state(vmss, self.state)
vmss_dict = self.serialize_vmss(vmss)
if self.state == 'present':
differences = []
results = vmss_dict
if self.os_disk_caching and \
self.os_disk_caching != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching']:
self.log('CHANGED: virtual machine scale set {0} - OS disk caching'.format(self.name))
differences.append('OS Disk caching')
changed = True
vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'] = self.os_disk_caching
if self.capacity and \
self.capacity != vmss_dict['sku']['capacity']:
self.log('CHANGED: virtual machine scale set {0} - Capacity'.format(self.name))
differences.append('Capacity')
changed = True
vmss_dict['sku']['capacity'] = self.capacity
if self.data_disks and \
len(self.data_disks) != len(vmss_dict['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])):
self.log('CHANGED: virtual machine scale set {0} - Data Disks'.format(self.name))
differences.append('Data Disks')
changed = True
if self.upgrade_policy and \
self.upgrade_policy != vmss_dict['properties']['upgradePolicy']['mode']:
self.log('CHANGED: virtual machine scale set {0} - Upgrade Policy'.format(self.name))
differences.append('Upgrade Policy')
changed = True
vmss_dict['properties']['upgradePolicy']['mode'] = self.upgrade_policy
update_tags, vmss_dict['tags'] = self.update_tags(vmss_dict.get('tags', dict()))
if update_tags:
differences.append('Tags')
changed = True
self.differences = differences
elif self.state == 'absent':
self.log("CHANGED: virtual machine scale set {0} exists and requested state is 'absent'".format(self.name))
results = dict()
changed = True
except CloudError:
self.log('Virtual machine scale set {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: virtual machine scale set {0} does not exist but state is 'present'.".format(self.name))
changed = True
self.results['changed'] = changed
self.results['ansible_facts']['azure_vmss'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not vmss:
# Create the VMSS
self.log("Create virtual machine scale set {0}".format(self.name))
self.results['actions'].append('Created VMSS {0}'.format(self.name))
# Validate parameters
if not self.admin_username:
self.fail("Parameter error: admin_username required when creating a virtual machine scale set.")
if self.os_type == 'Linux':
if disable_ssh_password and not self.ssh_public_keys:
self.fail("Parameter error: ssh_public_keys required when disabling SSH password.")
if not self.virtual_network_name:
default_vnet = self.create_default_vnet()
virtual_network = default_vnet.id
self.virtual_network_name = default_vnet.name
if self.subnet_name:
subnet = self.get_subnet(self.virtual_network_name, self.subnet_name)
load_balancer_backend_address_pools = None
load_balancer_inbound_nat_pools = None
if self.load_balancer:
load_balancer = self.get_load_balancer(self.load_balancer)
load_balancer_backend_address_pools = ([self.compute_models.SubResource(id=resource.id)
for resource in load_balancer.backend_address_pools]
if load_balancer.backend_address_pools else None)
load_balancer_inbound_nat_pools = ([self.compute_models.SubResource(id=resource.id)
for resource in load_balancer.inbound_nat_pools]
if load_balancer.inbound_nat_pools else None)
if not self.short_hostname:
self.short_hostname = self.name
if not image_reference:
self.fail("Parameter error: an image is required when creating a virtual machine.")
managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=self.managed_disk_type)
if self.security_group:
nsg = self.parse_nsg()
if nsg:
self.security_group = self.network_models.NetworkSecurityGroup(id=nsg.get('id'))
vmss_resource = self.compute_models.VirtualMachineScaleSet(
location=self.location,
tags=self.tags,
upgrade_policy=self.compute_models.UpgradePolicy(
mode=self.upgrade_policy
),
sku=self.compute_models.Sku(
name=self.vm_size,
capacity=self.capacity,
tier=self.tier,
),
virtual_machine_profile=self.compute_models.VirtualMachineScaleSetVMProfile(
os_profile=self.compute_models.VirtualMachineScaleSetOSProfile(
admin_username=self.admin_username,
computer_name_prefix=self.short_hostname,
),
storage_profile=self.compute_models.VirtualMachineScaleSetStorageProfile(
os_disk=self.compute_models.VirtualMachineScaleSetOSDisk(
managed_disk=managed_disk,
create_option=self.compute_models.DiskCreateOptionTypes.from_image,
caching=self.os_disk_caching,
),
image_reference=image_reference,
),
network_profile=self.compute_models.VirtualMachineScaleSetNetworkProfile(
network_interface_configurations=[
self.compute_models.VirtualMachineScaleSetNetworkConfiguration(
name=self.name,
primary=True,
ip_configurations=[
self.compute_models.VirtualMachineScaleSetIPConfiguration(
name='default',
subnet=self.compute_models.ApiEntityReference(
id=subnet.id
),
primary=True,
load_balancer_backend_address_pools=load_balancer_backend_address_pools,
load_balancer_inbound_nat_pools=load_balancer_inbound_nat_pools
)
],
enable_accelerated_networking=self.enable_accelerated_networking,
network_security_group=self.security_group
)
]
)
)
)
if self.admin_password:
vmss_resource.virtual_machine_profile.os_profile.admin_password = self.admin_password
if self.os_type == 'Linux':
vmss_resource.virtual_machine_profile.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
disable_password_authentication=disable_ssh_password
)
if self.ssh_public_keys:
ssh_config = self.compute_models.SshConfiguration()
ssh_config.public_keys = \
[self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys]
vmss_resource.virtual_machine_profile.os_profile.linux_configuration.ssh = ssh_config
if self.data_disks:
data_disks = []
for data_disk in self.data_disks:
data_disk_managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
storage_account_type=data_disk.get('managed_disk_type', None)
)
data_disk['caching'] = data_disk.get(
'caching',
self.compute_models.CachingTypes.read_only
)
data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(
lun=data_disk.get('lun', None),
caching=data_disk.get('caching', None),
create_option=self.compute_models.DiskCreateOptionTypes.empty,
disk_size_gb=data_disk.get('disk_size_gb', None),
managed_disk=data_disk_managed_disk,
))
vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks
self.log("Create virtual machine with parameters:")
self.create_or_update_vmss(vmss_resource)
elif self.differences and len(self.differences) > 0:
self.log("Update virtual machine scale set {0}".format(self.name))
self.results['actions'].append('Updated VMSS {0}'.format(self.name))
vmss_resource = self.get_vmss()
vmss_resource.virtual_machine_profile.storage_profile.os_disk.caching = self.os_disk_caching
vmss_resource.sku.capacity = self.capacity
if self.data_disks is not None:
data_disks = []
for data_disk in self.data_disks:
data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(
lun=data_disk['lun'],
caching=data_disk['caching'],
create_option=self.compute_models.DiskCreateOptionTypes.empty,
disk_size_gb=data_disk['disk_size_gb'],
managed_disk=self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
storage_account_type=data_disk['managed_disk_type']
),
))
vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks
self.log("Update virtual machine with parameters:")
self.create_or_update_vmss(vmss_resource)
self.results['ansible_facts']['azure_vmss'] = self.serialize_vmss(self.get_vmss())
elif self.state == 'absent':
# delete the VM
self.log("Delete virtual machine scale set {0}".format(self.name))
self.results['ansible_facts']['azure_vmss'] = None
self.delete_vmss(vmss)
# until we sort out how we want to do this globally
del self.results['actions']
return self.results
def get_vmss(self):
'''
Get the VMSS
:return: VirtualMachineScaleSet object
'''
try:
vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
return vmss
except CloudError as exc:
self.fail("Error getting virtual machine scale set {0} - {1}".format(self.name, str(exc)))
def get_virtual_network(self, name):
try:
vnet = self.network_client.virtual_networks.get(self.virtual_network_resource_group, name)
return vnet
except CloudError as exc:
self.fail("Error fetching virtual network {0} - {1}".format(name, str(exc)))
def get_subnet(self, vnet_name, subnet_name):
self.log("Fetching subnet {0} in virtual network {1}".format(subnet_name, vnet_name))
try:
subnet = self.network_client.subnets.get(self.virtual_network_resource_group, vnet_name, subnet_name)
except CloudError as exc:
self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format(
subnet_name,
vnet_name,
str(exc)))
return subnet
def get_load_balancer(self, id):
id_dict = parse_resource_id(id)
try:
return self.network_client.load_balancers.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name'))
except CloudError as exc:
self.fail("Error fetching load balancer {0} - {1}".format(id, str(exc)))
def serialize_vmss(self, vmss):
'''
Convert a VirtualMachineScaleSet object to dict.
:param vm: VirtualMachineScaleSet object
:return: dict
'''
result = self.serialize_obj(vmss, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
result['id'] = vmss.id
result['name'] = vmss.name
result['type'] = vmss.type
result['location'] = vmss.location
result['tags'] = vmss.tags
return result
def delete_vmss(self, vmss):
self.log("Deleting virtual machine scale set {0}".format(self.name))
self.results['actions'].append("Deleted virtual machine scale set {0}".format(self.name))
try:
poller = self.compute_client.virtual_machine_scale_sets.delete(self.resource_group, self.name)
# wait for the poller to finish
self.get_poller_result(poller)
except CloudError as exc:
self.fail("Error deleting virtual machine scale set {0} - {1}".format(self.name, str(exc)))
return True
def get_marketplace_image_version(self):
try:
versions = self.compute_client.virtual_machine_images.list(self.location,
self.image['publisher'],
self.image['offer'],
self.image['sku'])
except CloudError as exc:
self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
str(exc)))
if versions and len(versions) > 0:
if self.image['version'] == 'latest':
return versions[len(versions) - 1]
for version in versions:
if version.name == self.image['version']:
return version
self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
self.image['version']))
def get_custom_image_reference(self, name, resource_group=None):
try:
if resource_group:
vm_images = self.compute_client.images.list_by_resource_group(resource_group)
else:
vm_images = self.compute_client.images.list()
except Exception as exc:
self.fail("Error fetching custom images from subscription - {0}".format(str(exc)))
for vm_image in vm_images:
if vm_image.name == name:
self.log("Using custom image id {0}".format(vm_image.id))
return self.compute_models.ImageReference(id=vm_image.id)
self.fail("Error could not find image with name {0}".format(name))
def create_or_update_vmss(self, params):
try:
poller = self.compute_client.virtual_machine_scale_sets.create_or_update(self.resource_group, self.name, params)
self.get_poller_result(poller)
except CloudError as exc:
self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc)))
def vm_size_is_valid(self):
'''
Validate self.vm_size against the list of virtual machine sizes available for the account and location.
:return: boolean
'''
try:
sizes = self.compute_client.virtual_machine_sizes.list(self.location)
except CloudError as exc:
self.fail("Error retrieving available machine sizes - {0}".format(str(exc)))
for size in sizes:
if size.name == self.vm_size:
return True
return False
def parse_nsg(self):
nsg = self.security_group
resource_group = self.resource_group
if isinstance(self.security_group, dict):
nsg = self.security_group.get('name')
resource_group = self.security_group.get('resource_group', self.resource_group)
id = format_resource_id(val=nsg,
subscription_id=self.subscription_id,
namespace='Microsoft.Network',
types='networkSecurityGroups',
resource_group=resource_group)
name = azure_id_to_dict(id).get('name')
return dict(id=id, name=name)
def main():
AzureRMVirtualMachineScaleSet()
if __name__ == '__main__':
main()
| gpl-3.0 | -3,767,819,410,370,121,000 | 41.58453 | 199 | 0.516386 | false |
abhinavsingh/proxy.py | proxy/http/websocket/client.py | 1 | 3712 | # -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import base64
import selectors
import socket
import secrets
import ssl
from typing import Optional, Union, Callable
from .frame import WebsocketFrame
from ..parser import httpParserTypes, HttpParser
from ...common.constants import DEFAULT_BUFFER_SIZE
from ...common.utils import new_socket_connection, build_websocket_handshake_request, text_
from ...core.connection import tcpConnectionTypes, TcpConnection
class WebsocketClient(TcpConnection):
def __init__(self,
hostname: bytes,
port: int,
path: bytes = b'/',
on_message: Optional[Callable[[WebsocketFrame], None]] = None) -> None:
super().__init__(tcpConnectionTypes.CLIENT)
self.hostname: bytes = hostname
self.port: int = port
self.path: bytes = path
self.sock: socket.socket = new_socket_connection(
(socket.gethostbyname(text_(self.hostname)), self.port))
self.on_message: Optional[Callable[[
WebsocketFrame], None]] = on_message
self.selector: selectors.DefaultSelector = selectors.DefaultSelector()
@property
def connection(self) -> Union[ssl.SSLSocket, socket.socket]:
return self.sock
def handshake(self) -> None:
self.upgrade()
self.sock.setblocking(False)
def upgrade(self) -> None:
key = base64.b64encode(secrets.token_bytes(16))
self.sock.send(
build_websocket_handshake_request(
key,
url=self.path,
host=self.hostname))
response = HttpParser(httpParserTypes.RESPONSE_PARSER)
response.parse(self.sock.recv(DEFAULT_BUFFER_SIZE))
accept = response.header(b'Sec-Websocket-Accept')
assert WebsocketFrame.key_to_accept(key) == accept
def ping(self, data: Optional[bytes] = None) -> None:
pass
def pong(self, data: Optional[bytes] = None) -> None:
pass
def shutdown(self, _data: Optional[bytes] = None) -> None:
"""Closes connection with the server."""
super().close()
def run_once(self) -> bool:
ev = selectors.EVENT_READ
if self.has_buffer():
ev |= selectors.EVENT_WRITE
self.selector.register(self.sock.fileno(), ev)
events = self.selector.select(timeout=1)
self.selector.unregister(self.sock)
for _, mask in events:
if mask & selectors.EVENT_READ and self.on_message:
raw = self.recv()
if raw is None or raw.tobytes() == b'':
self.closed = True
return True
frame = WebsocketFrame()
# TODO(abhinavsingh): Remove .tobytes after parser is
# memoryview compliant
frame.parse(raw.tobytes())
self.on_message(frame)
elif mask & selectors.EVENT_WRITE:
self.flush()
return False
def run(self) -> None:
try:
while not self.closed:
teardown = self.run_once()
if teardown:
break
except KeyboardInterrupt:
pass
finally:
if not self.closed:
self.selector.unregister(self.sock)
self.sock.shutdown(socket.SHUT_WR)
self.sock.close()
| bsd-3-clause | 4,140,007,883,464,853,500 | 33 | 91 | 0.593362 | false |
dkhavari/open-source-investing | bitcoin-sentiment/analyze-and-trade.py | 1 | 1202 | import datetime
from datetime import timedelta as td
import pymongo as pymongo
from pymongo import MongoClient
from movingavg import compute_moving_averages
import math
import sys
# Constants.
MAX_ARTICLES = 250
WINDOW_SIZE = 3 # hours
# --------------------------------------------------
# Connect with the remote Mongo and prepare NLP API.
# --------------------------------------------------
client = MongoClient('ds047571.mongolab.com:47571')
db = client.coinage
db.authenticate('bit', 'coin')
collection = db['articles']
# Get a healthy number of articles to ensure we have enough.
articles = collection.find().sort("date",pymongo.DESCENDING).limit(MAX_ARTICLES)
# Get the current time.
current_time = datetime.datetime.now()
# --------------------------------------------------
# Call a function to get the average sentiment for
# time periods of any specified size / time delta.
# --------------------------------------------------
time_delta = td(hours=float(sys.argv[1]))
results = compute_moving_averages(articles, collection, time_delta)
# --------------------------------------------------
# Time to work with the Bollinger Bands.
# -------------------------------------------------- | mit | 8,012,289,797,478,050,000 | 31.513514 | 80 | 0.566556 | false |
queeno/network-simulator | MR_JobGenerator/MR_JobGenerator.py | 1 | 4366 | '''
Copyright (c) 2012, Simon Aquino
All rights reserved.
Made available under the BSD license - see the LICENSE file
'''
#Imports
import sys
import random
# File specs
INPUT_PATH = '../inputs/'
NO_FILE = 'input%d.mr'
#Number specs
MAX_NO_FILES = 5
MAX_NUMS_IN_FILES = 100
MAX_RANGE_IN_FILES = 1000
WELCOME_MESSAGE='''
Hello and welcome to Simon's awesome Python script to generate random inputs
for a MapReduce problem.
Please select what you'd like to do.
'''
LIST_MAIN='''
1. Generate list of random numbers as input for a MapReduce problem.
2. Exit
'''
NO_FILES = '''
Please, specify how many files you want me to generate.
In case you select random, no more than 5 files will be generated.
Enter x for random or an integer:
'''
THANKS = 'Thanks! :)'
NUMS_IN_FILES = '''
Please, now specify how many numbers you wish each file to contain.
In case you select random, no more than 100 numbers will be generated.
Enter x for random or an integer:
'''
RANGE_IN_FILES = '''
Now specify what is the range (+r,-r) in which the numbers should be generated.
In case you select random, the range won't be larger than (+1000, -1000).
Please give me r: '''
GREETINGS = 'Thanks for having used this program. Good bye!'
GENERATING_FILES = '''
Generating %d files, %d numbers per file and range(-%d,%d).
Please wait.....
'''
ALL_DONE = 'All done! You can find your files in %s' % (INPUT_PATH)
CHOICE = 'Please, make your choice here: '
def GenerateFile(no_files='x', nums_in_files='x', range_in_files='x'):
'''Takes parameters and generate input files for a map reduce problem.
Args:
no_files : Number of files the user wishes to generate
nums_in_files : How many numbers each file should generate.
range_in_file : The range (-r,+r) that includes the numbers
generated.
'''
# Initialise randomiser using sys time
random.seed()
if (no_files == 'x'):
# Generate random value between 1 and 5.
no_files = random.randint(1, MAX_NO_FILES)
else:
no_files = int (no_files)
if (nums_in_files == 'x'):
# Generate random value between 1 and 100.
nums_in_files = random.randint(1, MAX_NUMS_IN_FILES)
else:
# Generate random values between 1 and 1000.
nums_in_files = int (nums_in_files)
if (range_in_files == 'x'):
range_in_files = random.randint(1,MAX_RANGE_IN_FILES)
else:
range_in_files = int (range_in_files)
print GENERATING_FILES % \
(no_files, nums_in_files, range_in_files, range_in_files)
for job in range(0, no_files):
content = GenerateNumbers(nums_in_files, range_in_files)
filename = NO_FILE % (job)
file_path = INPUT_PATH + filename
# Open file.
f = open(file_path, 'w')
# Write content
f.write(content)
# Close file
f.close()
print ALL_DONE
def GenerateNumbers(nums_in_file, range_in_file):
'''Takes parameters and generates a string of random numbers
separated by a comma ','
Args:
nums_in_files : How many numbers each file should generate.
range_in_file : The range (-r,+r) that includes the numbers
generated.
'''
string = ''
for i in range(0, nums_in_file):
# Don't append comma at the end of file!
if (i==nums_in_file-1):
string += str(random.randint(-range_in_file,range_in_file))
else:
string += str(random.randint(-range_in_file,range_in_file)) + ','
return string
def UserDecides():
# Ask user what to do first
print LIST_MAIN
user_choice = input(CHOICE)
if not (user_choice == 1):
print GREETINGS
sys.exit(0)
no_files = raw_input(NO_FILES)
print THANKS
nums_in_files = raw_input(NUMS_IN_FILES)
print THANKS
range_in_files = raw_input(RANGE_IN_FILES)
print THANKS
return no_files, nums_in_files, range_in_files
def main():
print WELCOME_MESSAGE
nf, nif, rif = UserDecides()
GenerateFile(nf, nif, rif)
print GREETINGS
if __name__ == '__main__':
main() | bsd-3-clause | -5,966,355,612,059,816,000 | 24.389535 | 79 | 0.60055 | false |
a4a881d4/gosse | rewrite/memlayout.py | 1 | 4785 |
class type:
def __init__(self,name,size,ctypename,gotypename='default'):
self.name = name
self.size = size
self.ctypename = ctypename
if gotypename=='default':
self.gotypename = name
else:
self.gotypename = gotypename
def cgen(self,v):
print self.name,'%s;'%v.name
def gogen(self,v):
print v.name,self.gotypename
def carray(self,v):
print self.name,'%s[%d];'%(v.name,v.l)
def goarray(self,v):
print v.name,'[%d]%s'%(v.l,self.gotypename)
class struct:
def __init__(self,types,name):
self.types = types
self.name = name
self.items = []
self.size = 0
def fromList(self,list):
l = 0
self.m = genMap(self.types)
for x in list:
if x.name!='userdef':
self.items.append(x)
if self.m[x.type].size>=4 and (l%4)!=0:
print "//",x.name,"align to",l
l += self.m[x.type].size*x.l
else:
self.items.append(item(x.name,'byte',x.l-l))
l = x.l
self.types.append(type(self.name,l,"struct %s_s"%self.name))
self.size = l
return self.types
def ctypeDec(self):
print '//',self.name,self.size
print 'typedef','struct',self.name+'_s','{'
for x in self.items:
print '\t',
if x.l==1:
self.m[x.type].cgen(x)
else:
self.m[x.type].carray(x)
print '}','%s;'%self.name
def gotypeDec(self):
print '//',self.name,self.size
print 'type',self.name,'struct','{'
for x in self.items:
print '\t',
if x.l==1:
self.m[x.type].gogen(x)
else:
self.m[x.type].goarray(x)
print '}'
def genTypes():
types = []
types.append(type('int8',1,'char'))
types.append(type('uint8',1,'unsigned char'))
types.append(type('int16',2,'short'))
types.append(type('uint16',2,'unsigned short'))
types.append(type('int32',4,'int'))
types.append(type('int64',8,'long long int'))
types.append(type('uint32',4,'unsigned int'))
types.append(type('uint64',8,'unsigned long long int'))
types.append(type('float32',4,'float','uint32'))
types.append(type('float64',8,'double'))
types.append(type('byte',1,'unsigned char'))
types.append(type('char',1,'char','byte'))
return types
def ctypeDec(types):
for x in types:
if x.name!=x.ctypename:
print 'typedef',x.ctypename,'%s;'%x.name
def genMap(types):
m = {}
for x in types:
m[x.name]=x
return m
class item:
def __init__(self,name,t,l):
self.name = name
self.type = t
self.l =l
def str2list(str):
l = []
for line in str.split('\n'):
line = line.replace('\n','')
it = line.split(' ')
if len(it)!=3:
continue
l.append(item(it[0],it[1],int(it[2])))
return l
raw_spin_lock_t_str = """
lock uint32 1
"""
CpInfo_str = """
resLen int64 1
dataLen int64 1
cpLen int64 1
"""
Version_str = """
build uint16 1
minor uint8 1
major uint8 1
"""
NumInApp_str = """
num uint16 1
fun uint8 1
app uint8 1
"""
CpMeta_str = """
name char 32
app char 32
info CpInfo 1
ver Version 1
salt int32 1
"""
ResBlk_str = """
offset int32 1
len int32 1
"""
IndexItem_str = """
blk ResBlk 1
captype int64 1
md5 byte 16
"""
IndexHead_str = """
index IndexItem 32
"""
CPBuffer_str = """
meta CpMeta 1
userdef byte 1024
"""
SMMem_str = """
_brk int32 1
_free int32 1
_wr int32 1
_rd int32 1
"""
LMMem_str = """
_brk int64 1
_free int64 1
_wr int64 1
_rd int64 1
"""
ClkTrans_str = """
cpuoff int64 1
sysoff int64 1
clkr float64 1
clks float64 1
"""
CapMeta_str = """
name char 32
entity char 32
blk ResBlk
ver Version 1
num NumInApp 1
"""
CapDefault_str = """
lock raw_spin_lock_t 4
volatileMem LMMem 1
clk ClkTrans 1
preAllocMem SMMem 1
"""
Capability_str = """
meta CapMeta 1
cap CapDefault 1
userdef byte 2048
"""
BufHead_str = """
index IndexHead 1
cpbuf CPBuffer 1
Caps Capability 31
"""
ResMem_str = """
head BufHead 1
userdef byte 1048576
"""
structs = [ 'raw_spin_lock_t'
, 'CpInfo'
, 'Version'
, 'CpMeta'
, 'IndexItem'
, 'Sector00'
, 'Sector01'
]
def genStructList(lmap):
s = []
for k in lmap:
if k[-4:]=='_str':
s.append(k[:-4])
m = {}
for x in s:
m[x]=[]
for y in s:
if y!=x:
if y in lmap[x+'_str']:
m[x].append(y)
ss = []
while m != {}:
for k in m:
if len(m[k])==0:
ss.append(k)
for x in ss:
if x in m:
del m[x]
for k in m:
if x in m[k]:
m[k].remove(x)
#print ss
#print m
return ss
def cDec(structs,lmap):
types = genTypes()
for n in structs:
struct_item = struct(types,n)
s = lmap[n+'_str']
types = struct_item.fromList(str2list(s))
struct_item.ctypeDec()
def goDec(structs,lmap):
types = genTypes()
for n in structs:
struct_item = struct(types,n)
s = lmap[n+'_str']
types = struct_item.fromList(str2list(s))
struct_item.gotypeDec()
if __name__=='__main__':
print "package cpbuf"
print "/*"
types = genTypes()
ctypeDec(types)
s = genStructList(locals())
cDec(s,locals())
print "*/"
print 'import "C"'
goDec(s,locals())
| gpl-2.0 | -153,453,880,150,630,100 | 17.916996 | 62 | 0.621317 | false |
Yukarumya/Yukarum-Redfoxes | testing/mozbase/moznetwork/moznetwork/moznetwork.py | 1 | 5580 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import array
import re
import socket
import struct
import subprocess
import sys
import mozinfo
import mozlog
if mozinfo.isLinux:
import fcntl
class NetworkError(Exception):
"""Exception thrown when unable to obtain interface or IP."""
def _get_logger():
logger = mozlog.get_default_logger(component='moznetwork')
if not logger:
logger = mozlog.unstructured.getLogger('moznetwork')
return logger
def _get_interface_list():
"""Provides a list of available network interfaces
as a list of tuples (name, ip)"""
logger = _get_logger()
logger.debug('Gathering interface list')
max_iface = 32 # Maximum number of interfaces(Aribtrary)
bytes = max_iface * 32
is_32bit = (8 * struct.calcsize("P")) == 32 # Set Architecture
struct_size = 32 if is_32bit else 40
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
namestr = names.tostring()
return [(namestr[i:i + 32].split('\0', 1)[0],
socket.inet_ntoa(namestr[i + 20:i + 24]))
for i in range(0, outbytes, struct_size)]
except IOError:
raise NetworkError('Unable to call ioctl with SIOCGIFCONF')
def _proc_matches(args, regex):
"""Helper returns the matches of regex in the output of a process created with
the given arguments"""
output = subprocess.Popen(args=args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).stdout.read()
return re.findall(regex, output)
def _parse_ifconfig():
"""Parse the output of running ifconfig on mac in cases other methods
have failed"""
logger = _get_logger()
logger.debug('Parsing ifconfig')
# Attempt to determine the default interface in use.
default_iface = _proc_matches(['route', '-n', 'get', 'default'],
'interface: (\w+)')
if default_iface:
addr_list = _proc_matches(['ifconfig', default_iface[0]],
'inet (\d+.\d+.\d+.\d+)')
if addr_list:
logger.debug('Default interface: [%s] %s' % (default_iface[0],
addr_list[0]))
if not addr_list[0].startswith('127.'):
return addr_list[0]
# Iterate over plausible interfaces if we didn't find a suitable default.
for iface in ['en%s' % i for i in range(10)]:
addr_list = _proc_matches(['ifconfig', iface],
'inet (\d+.\d+.\d+.\d+)')
if addr_list:
logger.debug('Interface: [%s] %s' % (iface, addr_list[0]))
if not addr_list[0].startswith('127.'):
return addr_list[0]
# Just return any that isn't localhost. If we can't find one, we have
# failed.
addrs = _proc_matches(['ifconfig'],
'inet (\d+.\d+.\d+.\d+)')
try:
return [addr for addr in addrs if not addr.startswith('127.')][0]
except IndexError:
return None
def get_ip():
"""Provides an available network interface address, for example
"192.168.1.3".
A `NetworkError` exception is raised in case of failure."""
logger = _get_logger()
try:
hostname = socket.gethostname()
try:
logger.debug('Retrieving IP for %s' % hostname)
ips = socket.gethostbyname_ex(hostname)[2]
except socket.gaierror: # for Mac OS X
hostname += '.local'
logger.debug('Retrieving IP for %s' % hostname)
ips = socket.gethostbyname_ex(hostname)[2]
if len(ips) == 1:
ip = ips[0]
elif len(ips) > 1:
logger.debug('Multiple addresses found: %s' % ips)
# no fallback on Windows so take the first address
ip = ips[0] if mozinfo.isWin else None
else:
ip = None
except socket.gaierror:
# sometimes the hostname doesn't resolve to an ip address, in which
# case this will always fail
ip = None
if ip is None or ip.startswith("127."):
if mozinfo.isLinux:
interfaces = _get_interface_list()
for ifconfig in interfaces:
logger.debug('Interface: [%s] %s' % (ifconfig[0], ifconfig[1]))
if ifconfig[0] == 'lo':
continue
else:
return ifconfig[1]
elif mozinfo.isMac:
ip = _parse_ifconfig()
if ip is None:
raise NetworkError('Unable to obtain network address')
return ip
def get_lan_ip():
"""Deprecated. Please use get_ip() instead."""
return get_ip()
def cli(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Retrieve IP address')
mozlog.commandline.add_logging_group(
parser,
include_formatters=mozlog.commandline.TEXT_FORMATTERS
)
args = parser.parse_args()
mozlog.commandline.setup_logging(
'mozversion', args, {'mach': sys.stdout})
_get_logger().info('IP address: %s' % get_ip())
if __name__ == '__main__':
cli()
| mpl-2.0 | 7,211,842,824,974,316,000 | 31.44186 | 82 | 0.571685 | false |
budnyjj/NLRA | tests/test_one_dim_mrt.py | 1 | 6134 | import unittest
import random
import sympy as sp
import numpy as np
import sys
import os
sys.path.append('.')
import stats.methods as methods
from stats.utils import *
class TestBasicMrt(unittest.TestCase):
def setUp(self):
self.num_vals = 20 # number of source values
def test_linear_k(self):
sym_x, sym_y = sp.symbols('x y')
sym_k = sp.symbols('k')
sym_expr = sp.sympify('k*x')
sym_expr_delta = sp.sympify('y - k*x')
min_x = 1
max_x = 20
real_k = 2 # real 'k' value of source distribution
err_y_avg = 0 # average of Y error values
err_y_std = 0.01 # std of Y error values
# real X values without errors
x = np.linspace(min_x, max_x,
self.num_vals, dtype=np.float)
# real Y values without errors
real_y = np.vectorize(
sp.lambdify(
sym_x,
sym_expr.subs(
{sym_k: real_k}
),
'numpy'
)
)(x)
# add Y errors with current normal distribution
y = np.vectorize(
lambda v: v + random.gauss(err_y_avg, err_y_std)
)(real_y)
# find params with mrt method
mrt_k = methods.search_mrt(
delta_expression=sym_expr_delta,
parameters=(sym_k,),
values={sym_x: x, sym_y: y},
err_stds={sym_x: 0, sym_y: err_y_std}
)
mrt_y = np.vectorize(
sp.lambdify(
sym_x,
sym_expr.subs({sym_k: mrt_k}),
'numpy'
)
)(x)
self.assertAlmostEqual(real_k, mrt_k[0], places=1)
def test_linear_b(self):
sym_x, sym_y = sp.symbols('x y')
sym_b = sp.symbols('b')
sym_expr = sp.sympify('b')
sym_expr_delta = sp.sympify('y - b')
min_x = 1
max_x = 20
real_b = 2 # real 'b' value of source distribution
err_y_avg = 0 # average of Y error values
err_y_std = 0.01 # std of Y error values
# real X values without errors
x = np.linspace(min_x, max_x,
self.num_vals, dtype=np.float)
# real Y values without errors
real_y = np.vectorize(
sp.lambdify(
sym_x,
sym_expr.subs(
{sym_b: real_b}
),
'numpy'
)
)(x)
# add Y errors with current normal distribution
y = np.vectorize(
lambda v: v + random.gauss(err_y_avg, err_y_std)
)(real_y)
# find params with mrt method
mrt_b = methods.search_mrt(
delta_expression=sym_expr_delta,
parameters=(sym_b,),
values={sym_x: x, sym_y: y},
err_stds={sym_x: 0, sym_y: err_y_std}
)
mrt_y = np.vectorize(
sp.lambdify(
sym_x,
sym_expr.subs({sym_b: mrt_b}),
'numpy'
)
)(x)
self.assertAlmostEqual(real_b, mrt_b[0], places=1)
def test_exponential(self):
sym_x, sym_y = sp.symbols('x y')
sym_a = sp.symbols('a')
sym_expr = sp.sympify('a*exp(x)')
sym_expr_delta = sp.sympify('y - a*exp(x)')
min_x = 1
max_x = 20
real_a = 10 # real 'a' value of source distribution
err_y_avg = 0 # average of Y error values
err_y_std = 0.01 # std of Y error values
# real X values without errors
x = np.linspace(min_x, max_x,
self.num_vals, dtype=np.float)
# real Y values without errors
real_y = np.vectorize(
sp.lambdify(
sym_x,
sym_expr.subs(
{sym_a: real_a}
),
'numpy'
)
)(x)
# add Y errors with current normal distribution
y = np.vectorize(
lambda v: v + random.gauss(err_y_avg, err_y_std)
)(real_y)
# find params with mrt method
mrt_a = methods.search_mrt(
delta_expression=sym_expr_delta,
parameters=(sym_a,),
values={sym_x: x, sym_y: y},
err_stds={sym_x: 0, sym_y: err_y_std}
)
mrt_y = np.vectorize(
sp.lambdify(
sym_x,
sym_expr.subs({sym_a: mrt_a}),
'numpy'
)
)(x)
self.assertAlmostEqual(real_a, mrt_a[0], places=1)
def test_sinusoidal(self):
sym_x, sym_y = sp.symbols('x y')
sym_a = sp.symbols('a')
sym_expr = sp.sympify('a*sin(x)')
sym_expr_delta = sp.sympify('y - a*sin(x)')
min_x = 1
max_x = 20
real_a = 2 # real 'a' value of source distribution
err_y_avg = 0 # average of Y error values
err_y_std = 0.01 # std of Y error values
# real X values without errors
x = np.linspace(min_x, max_x,
self.num_vals, dtype=np.float)
# real Y values without errors
real_y = np.vectorize(
sp.lambdify(
sym_x,
sym_expr.subs(
{sym_a: real_a}
),
'numpy'
)
)(x)
# add Y errors with current normal distribution
y = np.vectorize(
lambda v: v + random.gauss(err_y_avg, err_y_std)
)(real_y)
# find params with mrt method
mrt_a = methods.search_mrt(
delta_expression=sym_expr_delta,
parameters=(sym_a,),
values={sym_x: x, sym_y: y},
err_stds={sym_x: 0, sym_y: err_y_std}
)
mrt_y = np.vectorize(
sp.lambdify(
sym_x,
sym_expr.subs({sym_a: mrt_a}),
'numpy'
)
)(x)
self.assertAlmostEqual(real_a, mrt_a[0], places=1)
| mit | -7,705,824,584,801,470,000 | 26.022026 | 70 | 0.458591 | false |
hzxsnczpku/banrinochoujou | examples/CartPole/train_CartPole_BDQN.py | 1 | 2863 | from train import *
from models.net_builder import *
from basic_utils.env_wrapper import Vec_env_wrapper
from models.agents import *
from basic_utils.options import *
from basic_utils.exploration_noise import *
def train_CartPole_DQN(load_model=False, render=False, save_every=None, double=False, prioritized=False):
torch.manual_seed(8833)
env = Vec_env_wrapper(name='CartPole-v1', consec_frames=1, running_stat=False, seed=23333)
action_space = env.action_space
observation_space = env.observation_space
net = MLPs_q(observation_space, action_space, net_topology_q_vec)
mean_net = MLPs_q(observation_space, action_space, net_topology_q_vec)
std_net = MLPs_q(observation_space, action_space, net_topology_q_vec)
target_net = MLPs_q(observation_space, action_space, net_topology_q_vec)
target_mean_net = MLPs_q(observation_space, action_space, net_topology_q_vec)
target_std_net = MLPs_q(observation_space, action_space, net_topology_q_vec)
noise = NoNoise_Exploration()
#noise = EpsilonGreedy_Exploration(action_n=action_space.n,
# explore_len=10000,
# init_epsilon=1.0,
# final_epsilon=0.01)
if use_cuda:
net.cuda()
mean_net.cuda()
std_net.cuda()
target_net.cuda()
target_mean_net.cuda()
target_std_net.cuda()
agent = Bayesian_DQN_Agent(net,
mean_net,
std_net,
target_net,
target_mean_net,
target_std_net,
alpha=1,
beta=1e-4,
gamma=0.95,
lr=5e-3,
scale=5e-4,
update_target_every=500,
get_info=True)
if prioritized:
memory = PrioritizedReplayBuffer(memory_cap=10000,
batch_size_q=64,
alpha=0.8,
beta=0.6)
else:
memory = ReplayBuffer(memory_cap=10000, batch_size_q=64)
if load_model:
agent.load_model("./save_model/" + env.name + "_" + agent.name)
t = Memory_Trainer(agent=agent,
env=env,
memory=memory,
n_worker=1,
step_num=1,
rand_explore_len=1000,
save_every=save_every,
render=render,
print_every=10,
noise=noise)
t.train()
if __name__ == '__main__':
train_CartPole_DQN(save_every=5)
| mit | 8,449,812,705,804,333,000 | 37.689189 | 105 | 0.493888 | false |
amitdeshmukh/malware-domain-detection | malware_domain_detection.py | 1 | 3283 | #!/usr/bin/python
from urlparse import urlparse
import requests
import json
import math
import re
domain = ''
domain_arr = []
path=''
path_arr = []
starts_with_http = re.compile('^http')
j = {}
tlds = []
file_name = 'angler_domains.txt'
def bigrams(input_list):
return zip(input_list, input_list[1:])
def probability_score(word):
score = 0
# if no char pairs, return score = 0
if len(word) < 2:
return score
# else compute score
b = bigrams(word)
for char_tuple in b:
pair = char_tuple[0] + char_tuple[1]
# try to lookup tuple scores in rendered json
# if it fails, we do not have a tuple in json
# in that case assign an arbitrary high value
try:
tuple_lookup_value = j[pair]['log']
except KeyError:
tuple_lookup_value = -5
score = score + tuple_lookup_value
return score
def parse_url(url):
global domain
global domain_arr
global path
global path_arr
global starts_with_http
# ensure url includes the scheme, as urlparse doesnt work if scheme absent
http_result = starts_with_http.match(url)
if http_result:
pass
else:
url = 'http://' + url
# parse url
u = urlparse(url)
# separate url into domain and path
if u and u.netloc:
domain = u.netloc
else:
print 'Domain parse error'
# remove www and split on '.'
domain = re.sub('^www\.', '', domain)
domain_arr = domain.split('.')
# we want to eliminate as much useless info as possible, e.g tlds
tld = domain_arr[-1]
if tld in tlds:
domain_arr.pop()
domain = '.'.join(domain_arr)
# split domain again on '-' and '.'
domain_arr = re.split('[-.]', domain)
# eliminate empty strings from list
domain_arr = filter(None, domain_arr)
print 'DOMAIN ==> ', domain
# separate path components into 'words'
if u and u.path:
path = u.path
path_arr = re.split('[/_.-]', path)
# eliminate empty strings from list
path_arr = filter(None, path_arr)
print 'PATH ==> ', path
else:
print 'PATH ==> No path info in URL'
# words[] is a list containing the terms to evaluate
words = domain_arr + path_arr
return words
def main():
global domain
global domain_arr
global path
global path_arr
global file_name
global tlds
global j
# read in pre-rendered json
with open('character_pair_probabilities.json') as fi:
j = json.load(fi)
print '-- Read json --'
# fetch latest icann TLD list
r = requests.get('http://data.iana.org/TLD/tlds-alpha-by-domain.txt')
arr2 = r.text.lower().split('\n')
# obtain tld's in array for comparison
tlds = arr2[1:]
print '-- Fetched latest ICANN TLDs --\n'
# read url
with open(file_name, 'r') as fi:
for url in fi:
# ensure we reset all variables
domain = ''
path = ''
domain_arr = []
path_arr = []
url = url.rstrip().lower()
print 'URL ==> ', url
words = parse_url(url)
print 'EVALUATING WORDS ==> ', words
# calculate a score
for word in words:
score = probability_score(word)
# to nullify effect of longer words producing high scores
# calculate exp(score/len(word)) rounded to 3 decimals
malware_score = round(math.exp(abs(score)/len(word)), 3)
if malware_score > 15:
print 'POSSIBLY MALICIOUS ==> ', word, malware_score
print '\n'
if __name__ == "__main__":
main()
| apache-2.0 | 3,560,087,129,758,477,000 | 21.798611 | 75 | 0.646055 | false |
brunojulia/ultracoldUB | brightsolitons/bs_evolution/sim_2_light.py | 1 | 6761 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 25 20:40:22 2016
@author: laura18
******************************************************************************
This program creates an animation to reproduce an analogy with the module of a
bright soliton passing through a barrier. Given the energy and the height of
the barrier, one can estimate an approximate transmision coefficient given the
probability of finding the soliton in each side. The probability obtained in
the Interfaz_main.py (through gpe_bs_evolution.py) is read by this program and shown
in the interface. To do so, moviepy is used.
The code is fully documented. The code has the option to save the animation in a
gif file, though the user may need the ffmpeg file to be able to save it.
This program also uses a particular naming for the variables:
xp, yp are the x and y coordinates for the particles before hitting the 'wall'
xf, yf are the x and y coordinates for the particles travelling to the right once
it has arrived to 0, i.e. forward particles
xb, yb are the x and y coordinates for the particles travelling to the left once
it has hit the 'wall', i.e. backward particles.
Velocity and dt are fixed in order to see the a clearer animation with a
properly spacing between particles.
"""
import matplotlib.pylab as plt
import numpy as np
import matplotlib.animation as animation
import matplotlib.image as mpimg
import moviepy.editor as mpy
from moviepy.video.io.bindings import mplfig_to_npimage
plt.rcParams['animation.ffmpeg_path'] = './ffmpegprog/bin/ffmpeg'
def update(t):
global dt,ddt,v,prob,x0f,y0f,x0p,y0p,x0b,y0b,n
prova = t/(8.0*dt) + ddt #every 4 time steps, ddt is a small increment for the int to work properly
Npart = 1 + int((prova))
xf = np.empty([Npart]) #x-positions of the forward-particles
yf = np.empty([Npart]) #y-positions of the forward-particles
xb = np.empty([Npart]) #x-positions of the backward-particles
yb = np.empty([Npart]) #y-positions of the backward-particles
xp = np.empty([Npart]) #x-positions of the previous-particles
yp = np.empty([Npart]) #y-positions of the previous-particles
for j in range(0,Npart):
if n[j]==0: #if it's the first time that changes
xp[j] = x0p[j]
yp[j] = y0p[j]
xf[j] = -20
yf[j] = -20
xb[j] = -20
yb[j] = -20 #stop moving (out of range)
if xp[j]>=-1.1 and xp[j]<-0.5: #if it's close to 0
x0f[j] = x0f[j] + float(v*dt) #keeps going to the right
y0f[j] = y0f[j] - float(v*dt) #keeps going onwards (y=0)
x0b[j] = x0b[j] - float(v*dt) #goes to the left
y0b[j] = y0b[j] - float(v*dt) #goes down
x0p[j] = -20
y0p[j] = -20 #stop moving
n[j]=1
else: #still not in the separating region
x0f[j] = x0f[j] + float(v*dt) #keeps going to the right
y0f[j] = y0f[j] - float(v*dt) #keeps going onwards (y=0)
x0b[j] = x0b[j] + float(v*dt) #keeps going to the right
y0b[j] = y0b[j] - float(v*dt) #keeps going onwards (y=0)
x0p[j] = x0p[j] + float(v*dt) #keeps going to the right
y0p[j] = y0p[j] - float(v*dt) #keeps going onwards (y=0)
else:
xf[j] = x0f[j]
yf[j] = y0f[j]
xb[j] = x0b[j]
yb[j] = y0b[j]
xp[j] = x0p[j]
yp[j] = y0p[j]
x0f[j] = x0f[j] + float(v*dt)
y0f[j] = y0f[j] - float(v*dt)
x0b[j] = x0b[j] - float(v*dt) #goes to the left
y0b[j] = y0b[j] - float(v*dt) #goes down
x0p[j] = -20
y0p[j] = -20 #stop moving
line.set_xdata(xf)
line.set_ydata(yf)
line2.set_xdata(xf)
line2.set_ydata(yf)
line3.set_xdata(xb)
line3.set_ydata(yb)
line4.set_xdata(xb)
line4.set_ydata(yb)
line5.set_xdata(xp)
line5.set_ydata(yp)
line6.set_xdata(xp)
line6.set_ydata(yp)
return mplfig_to_npimage(fig_mpl)
global dt,ddt,v,prob,x0f,y0f,x0p,y0p,x0b,y0b,n
#some parameters
timef=12 #total time of simulation
v=10.0 #velocity of the partciles (FIXED)
t=0.0 #initial time
dt=0.05 #time step (FIXED)
##prob=0.8 #define probability == T trans. coeff.
ddt=1.0e-7 #defined for not having problems with int() and the number of particles
#reads the T coeff
datacoef=open('llum.dat','r')
coef=datacoef.readlines()
valueT=float((coef[0]).split('\t')[1])
valueR=float((coef[0]).split('\t')[0])
valueTot=valueT + valueR
coefT=valueT/valueTot
prob=coefT #define probability == T trans. coeff.
fig_mpl, ax = plt.subplots(1,facecolor='black')
ax.set_axis_bgcolor('black')
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
#plot particles
line, = ax.plot([],[], "*", ms=15*prob, color='white') #onwards (already passed)
line2, = ax.plot([],[], "o", ms=15*prob, color='white',alpha=0.4)
line3, = ax.plot([],[], "*", ms=15*(1.0-prob), color='white') #backwards (already hit)
line4, = ax.plot([],[], "o", ms=15*(1.0-prob), color='white',alpha=0.4)
line5, = ax.plot([],[], "*", ms=15, color='white') #going to the wall
line6, = ax.plot([],[], "o", ms=15, color='white',alpha=0.4)
#we configure the plot (size)
ax.set_ylim(-18.0,18.0)
ax.set_xlim(-18.0,18.0)
Ninter=10000000 #we define some arrays to be used
x0f=np.empty([Ninter])
y0f=np.empty([Ninter])
x0b=np.empty([Ninter])
y0b=np.empty([Ninter])
x0p=np.empty([Ninter])
y0p=np.empty([Ninter])
n=np.empty([Ninter]) #counter for each particle
for j in range(0,Ninter): #we define the initial position of the supposed particles
x0f[j]=-13.75
y0f[j]=13.75
x0b[j]=-13.75
y0b[j]=13.75
x0p[j]=-13.75
y0p[j]=13.75
# Set up formatting for the movie files
#FFwriter = animation.FFMpegWriter()
#let's plot like a barrier
barx=[]
bary=[]
for j in xrange(-200,200,1):
barx.append(j/100.0)
if j>=-100 and j<=100:
bary.append(20.0)
else:
bary.append(-20.0)
abx=np.array(barx)
aby=np.array(bary)
ax.fill_between(abx,aby,-20,color='blue',alpha=(1-prob))
#and now the flashlight
light=mpimg.imread('linterna4.png')
ax.imshow(light,extent=(-18,-12,12,18),aspect='auto')
#let's do the animation
anim=mpy.VideoClip(update,duration=timef)
anim.write_gif('simulation_2.gif',program='ffmpeg',fps=15)
#plt.show() | gpl-3.0 | -4,692,148,553,348,611,000 | 36.426136 | 112 | 0.589558 | false |
Donkyhotay/MoonPy | zope/dublincore/interfaces.py | 1 | 14791 | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Dublin Core interfaces
$Id: interfaces.py 69636 2006-08-18 10:02:46Z faassen $
"""
__docformat__ = 'restructuredtext'
from zope.annotation.interfaces import IAnnotatable
from zope.interface import Interface
from zope.schema import Text, TextLine, Datetime, Tuple
class IDublinCoreElementItem(Interface):
"""A qualified dublin core element"""
qualification = TextLine(
title = u"Qualification",
description = u"The element qualification"
)
value = Text(
title = u"Value",
description = u"The element value",
)
class IGeneralDublinCore(Interface):
"""Dublin-core data access interface
The Dublin Core, http://dublincore.org/, is a meta data standard
that specifies a set of standard data elements. It provides
flexibility of interpretation of these elements by providing for
element qualifiers that specialize the meaning of specific
elements. For example, a date element might have a qualifier, like
"creation" to indicate that the date is a creation date. In
addition, any element may be repeated. For some elements, like
subject, and contributor, this is obviously necessary, but for
other elements, like title and description, allowing repetitions
is not very useful and adds complexity.
This interface provides methods for retrieving data in full
generality, to be compliant with the Dublin Core standard.
Other interfaces will provide more convenient access methods
tailored to specific element usage patterns.
"""
def getQualifiedTitles():
"""Return a sequence of Title IDublinCoreElementItem.
"""
def getQualifiedCreators():
"""Return a sequence of Creator IDublinCoreElementItem.
"""
def getQualifiedSubjects():
"""Return a sequence of Subject IDublinCoreElementItem.
"""
def getQualifiedDescriptions():
"""Return a sequence of Description IDublinCoreElementItem.
"""
def getQualifiedPublishers():
"""Return a sequence of Publisher IDublinCoreElementItem.
"""
def getQualifiedContributors():
"""Return a sequence of Contributor IDublinCoreElementItem.
"""
def getQualifiedDates():
"""Return a sequence of Date IDublinCoreElementItem.
"""
def getQualifiedTypes():
"""Return a sequence of Type IDublinCoreElementItem.
"""
def getQualifiedFormats():
"""Return a sequence of Format IDublinCoreElementItem.
"""
def getQualifiedIdentifiers():
"""Return a sequence of Identifier IDublinCoreElementItem.
"""
def getQualifiedSources():
"""Return a sequence of Source IDublinCoreElementItem.
"""
def getQualifiedLanguages():
"""Return a sequence of Language IDublinCoreElementItem.
"""
def getQualifiedRelations():
"""Return a sequence of Relation IDublinCoreElementItem.
"""
def getQualifiedCoverages():
"""Return a sequence of Coverage IDublinCoreElementItem.
"""
def getQualifiedRights():
"""Return a sequence of Rights IDublinCoreElementItem.
"""
class IWritableGeneralDublinCore(Interface):
"""Provide write access to dublin core data
This interface augments `IStandardDublinCore` with methods for
writing elements.
"""
def setQualifiedTitles(qualified_titles):
"""Set the qualified Title elements.
The argument must be a sequence of `IDublinCoreElementItem`.
"""
def setQualifiedCreators(qualified_creators):
"""Set the qualified Creator elements.
The argument must be a sequence of Creator `IDublinCoreElementItem`.
"""
def setQualifiedSubjects(qualified_subjects):
"""Set the qualified Subjects elements.
The argument must be a sequence of Subject `IDublinCoreElementItem`.
"""
def setQualifiedDescriptions(qualified_descriptions):
"""Set the qualified Descriptions elements.
The argument must be a sequence of Description `IDublinCoreElementItem`.
"""
def setQualifiedPublishers(qualified_publishers):
"""Set the qualified Publishers elements.
The argument must be a sequence of Publisher `IDublinCoreElementItem`.
"""
def setQualifiedContributors(qualified_contributors):
"""Set the qualified Contributors elements.
The argument must be a sequence of Contributor `IDublinCoreElementItem`.
"""
def setQualifiedDates(qualified_dates):
"""Set the qualified Dates elements.
The argument must be a sequence of Date `IDublinCoreElementItem`.
"""
def setQualifiedTypes(qualified_types):
"""Set the qualified Types elements.
The argument must be a sequence of Type `IDublinCoreElementItem`.
"""
def setQualifiedFormats(qualified_formats):
"""Set the qualified Formats elements.
The argument must be a sequence of Format `IDublinCoreElementItem`.
"""
def setQualifiedIdentifiers(qualified_identifiers):
"""Set the qualified Identifiers elements.
The argument must be a sequence of Identifier `IDublinCoreElementItem`.
"""
def setQualifiedSources(qualified_sources):
"""Set the qualified Sources elements.
The argument must be a sequence of Source `IDublinCoreElementItem`.
"""
def setQualifiedLanguages(qualified_languages):
"""Set the qualified Languages elements.
The argument must be a sequence of Language `IDublinCoreElementItem`.
"""
def setQualifiedRelations(qualified_relations):
"""Set the qualified Relations elements.
The argument must be a sequence of Relation `IDublinCoreElementItem`.
"""
def setQualifiedCoverages(qualified_coverages):
"""Set the qualified Coverages elements.
The argument must be a sequence of Coverage `IDublinCoreElementItem`.
"""
def setQualifiedRights(qualified_rights):
"""Set the qualified Rights elements.
The argument must be a sequence of Rights `IDublinCoreElementItem`.
"""
class IDCDescriptiveProperties(Interface):
"""Basic descriptive meta-data properties
"""
title = TextLine(
title = u'Title',
description =
u"The first unqualified Dublin Core 'Title' element value."
)
description = Text(
title = u'Description',
description =
u"The first unqualified Dublin Core 'Description' element value.",
)
class IDCTimes(Interface):
"""Time properties
"""
created = Datetime(
title = u'Creation Date',
description =
u"The date and time that an object is created. "
u"\nThis is normally set automatically."
)
modified = Datetime(
title = u'Modification Date',
description =
u"The date and time that the object was last modified in a\n"
u"meaningful way."
)
class IDCPublishing(Interface):
"""Publishing properties
"""
effective = Datetime(
title = u'Effective Date',
description =
u"The date and time that an object should be published. "
)
expires = Datetime(
title = u'Expiration Date',
description =
u"The date and time that the object should become unpublished."
)
class IDCExtended(Interface):
"""Extended properties
This is a mized bag of properties we want but that we probably haven't
quite figured out yet.
"""
creators = Tuple(
title = u'Creators',
description = u"The unqualified Dublin Core 'Creator' element values",
value_type = TextLine(),
)
subjects = Tuple(
title = u'Subjects',
description = u"The unqualified Dublin Core 'Subject' element values",
value_type = TextLine(),
)
publisher = Text(
title = u'Publisher',
description =
u"The first unqualified Dublin Core 'Publisher' element value.",
)
contributors = Tuple(
title = u'Contributors',
description =
u"The unqualified Dublin Core 'Contributor' element values",
value_type = TextLine(),
)
class ICMFDublinCore(Interface):
"""This interface duplicates the CMF dublin core interface.
"""
def Title():
"""Return the resource title.
The first unqualified Dublin Core `Title` element value is
returned as a unicode string if an unqualified element is
defined, otherwise, an empty unicode string is returned.
"""
def Creator():
"""Return the resource creators.
Return the full name(s) of the author(s) of the content
object.
The unqualified Dublin Core `Creator` element values are
returned as a sequence of unicode strings.
"""
def Subject():
"""Return the resource subjects.
The unqualified Dublin Core `Subject` element values are
returned as a sequence of unicode strings.
"""
def Description():
"""Return the resource description
Return a natural language description of this object.
The first unqualified Dublin Core `Description` element value is
returned as a unicode string if an unqualified element is
defined, otherwise, an empty unicode string is returned.
"""
def Publisher():
"""Dublin Core element - resource publisher
Return full formal name of the entity or person responsible
for publishing the resource.
The first unqualified Dublin Core `Publisher` element value is
returned as a unicode string if an unqualified element is
defined, otherwise, an empty unicode string is returned.
"""
def Contributors():
"""Return the resource contributors
Return any additional collaborators.
The unqualified Dublin Core `Contributor` element values are
returned as a sequence of unicode strings.
"""
def Date():
"""Return the default date
The first unqualified Dublin Core `Date` element value is
returned as a unicode string if an unqualified element is
defined, otherwise, an empty unicode string is returned. The
string is formatted 'YYYY-MM-DD H24:MN:SS TZ'.
"""
def CreationDate():
"""Return the creation date.
The value of the first Dublin Core `Date` element qualified by
'creation' is returned as a unicode string if a qualified
element is defined, otherwise, an empty unicode string is
returned. The string is formatted 'YYYY-MM-DD H24:MN:SS TZ'.
"""
def EffectiveDate():
"""Return the effective date
The value of the first Dublin Core `Date` element qualified by
'effective' is returned as a unicode string if a qualified
element is defined, otherwise, an empty unicode string is
returned. The string is formatted 'YYYY-MM-DD H24:MN:SS TZ'.
"""
def ExpirationDate():
"""Date resource expires.
The value of the first Dublin Core `Date` element qualified by
'expiration' is returned as a unicode string if a qualified
element is defined, otherwise, an empty unicode string is
returned. The string is formatted 'YYYY-MM-DD H24:MN:SS TZ'.
"""
def ModificationDate():
"""Date resource last modified.
The value of the first Dublin Core `Date` element qualified by
'modification' is returned as a unicode string if a qualified
element is defined, otherwise, an empty unicode string is
returned. The string is formatted 'YYYY-MM-DD H24:MN:SS TZ'.
"""
def Type():
"""Return the resource type
Return a human-readable type name for the resource.
The first unqualified Dublin Core `Type` element value is
returned as a unicode string if an unqualified element is
defined, otherwise, an empty unicode string is returned.
"""
def Format():
"""Return the resource format.
Return the resource's MIME type (e.g., 'text/html',
'image/png', etc.).
The first unqualified Dublin Core `Format` element value is
returned as a unicode string if an unqualified element is
defined, otherwise, an empty unicode string is returned.
"""
def Identifier():
"""Return the URL of the resource.
This value is computed. It is included in the output of
qualifiedIdentifiers with the qualification 'url'.
"""
def Language():
"""Return the resource language.
Return the RFC language code (e.g., 'en-US', 'pt-BR')
for the resource.
The first unqualified Dublin Core `Language` element value is
returned as a unicode string if an unqualified element is
defined, otherwise, an empty unicode string is returned.
"""
def Rights():
"""Return the resource rights.
Return a string describing the intellectual property status,
if any, of the resource. for the resource.
The first unqualified Dublin Core `Rights` element value is
returned as a unicode string if an unqualified element is
defined, otherwise, an empty unicode string is returned.
"""
class IZopeDublinCore(
IGeneralDublinCore,
ICMFDublinCore,
IDCDescriptiveProperties,
IDCTimes,
IDCPublishing,
IDCExtended,
):
"""Zope Dublin Core properties"""
class IWriteZopeDublinCore(
IZopeDublinCore,
IWritableGeneralDublinCore,
):
"""Zope Dublin Core properties with generate update support"""
class IZopeDublinCoreAnnotatable(IAnnotatable):
"""Objects that can be annotated with Zope Dublin-Core meta data
This is a marker interface that indicates the intent to have
Zope Dublin-Core meta data associated with an object.
"""
| gpl-3.0 | -6,439,756,937,129,569,000 | 30.204641 | 80 | 0.654114 | false |
sernst/cauldron | cauldron/session/writing/components/plotly_component.py | 1 | 2008 | import typing
import os
from cauldron import cli
from cauldron import environ
from cauldron.session import projects
from cauldron.session.writing import file_io
from cauldron.session.writing.components.definitions import COMPONENT
from cauldron.session.writing.components.definitions import WEB_INCLUDE
PLOTLY_WARNING = cli.reformat(
"""
[WARNING]: Plotly library is not installed. Unable to
include library dependencies, which may result in
HTML rendering errors. To resolve this make sure
you have installed the Plotly library.
"""
)
def get_version_one_path() -> typing.Optional[str]:
try:
from plotly.offline import offline as plotly_offline
except Exception:
return None
return os.path.join(
environ.paths.clean(os.path.dirname(plotly_offline.__file__)),
'plotly.min.js'
)
def get_version_two_path() -> typing.Optional[str]:
try:
import plotly
except Exception:
return None
return os.path.join(
environ.paths.clean(os.path.dirname(plotly.__file__)),
'package_data',
'plotly.min.js'
)
def get_source_path() -> typing.Union[str, None]:
source_path = get_version_one_path()
if source_path is None:
environ.log(PLOTLY_WARNING)
return None
elif not os.path.exists(source_path):
source_path = get_version_two_path()
return source_path
def create(project: 'projects.Project') -> COMPONENT:
"""
:param project:
:return:
"""
source_path = get_source_path()
if not source_path:
return COMPONENT([], [])
output_slug = 'components/plotly/plotly.min.js'
output_path = os.path.join(project.output_directory, output_slug)
return COMPONENT(
includes=[WEB_INCLUDE(
name='plotly',
src='/{}'.format(output_slug)
)],
files=[file_io.FILE_COPY_ENTRY(
source=source_path,
destination=output_path
)]
)
| mit | -18,592,247,775,742,830 | 24.1 | 71 | 0.641434 | false |
osrf/rosbook | code/patrol/src/shapes.py | 1 | 2028 | # BEGIN ALL
#!/usr/bin/env python
import rospy
from smach import State,StateMachine
from time import sleep
class Drive(State):
def __init__(self, distance):
State.__init__(self, outcomes=['success'])
self.distance = distance
def execute(self, userdata):
print 'Driving', self.distance
sleep(1)
return 'success'
class Turn(State):
def __init__(self, angle):
State.__init__(self, outcomes=['success'])
self.angle = angle
def execute(self, userdata):
print 'Turning', self.angle
sleep(1)
return 'success'
if __name__ == '__main__':
# BEGIN PART_2
triangle = StateMachine(outcomes=['success'])
with triangle:
StateMachine.add('SIDE1', Drive(1), transitions={'success':'TURN1'})
StateMachine.add('TURN1', Turn(120), transitions={'success':'SIDE2'})
StateMachine.add('SIDE2', Drive(1), transitions={'success':'TURN2'})
StateMachine.add('TURN2', Turn(120), transitions={'success':'SIDE3'})
StateMachine.add('SIDE3', Drive(1), transitions={'success':'success'})
# END PART_2
square = StateMachine(outcomes=['success'])
with square:
StateMachine.add('SIDE1', Drive(1), transitions={'success':'TURN1'})
StateMachine.add('TURN1', Turn(90), transitions={'success':'SIDE2'})
StateMachine.add('SIDE2', Drive(1), transitions={'success':'TURN2'})
StateMachine.add('TURN2', Turn(90), transitions={'success':'SIDE3'})
StateMachine.add('SIDE3', Drive(1), transitions={'success':'TURN3'})
StateMachine.add('TURN3', Turn(90), transitions={'success':'SIDE4'})
StateMachine.add('SIDE4', Drive(1), transitions={'success':'success'})
# BEGIN PART_3
shapes = StateMachine(outcomes=['success'])
with shapes:
StateMachine.add('TRIANGLE', triangle, transitions={'success':'SQUARE'})
StateMachine.add('SQUARE', square, transitions={'success':'success'})
shapes.execute()
# END PART_3
# END ALL
| apache-2.0 | 790,602,064,692,161,200 | 32.8 | 80 | 0.623767 | false |
3DGenomes/tadbit | _pytadbit/hic_data.py | 1 | 80109 | """
December 12, 2014.
"""
import os
from sys import stderr
from collections import OrderedDict
from warnings import warn
from bisect import bisect_right as bisect
from cPickle import HIGHEST_PROTOCOL, dump, load
from numpy.linalg import LinAlgError
from numpy import corrcoef, nansum, array, isnan, mean
from numpy import meshgrid, asarray, exp, linspace, std
from numpy import nanpercentile as npperc, log as nplog
from numpy import nanmax, ma, zeros_like
from scipy.stats import ttest_ind, spearmanr
from scipy.special import gammaincc
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
from scipy.sparse.linalg import eigsh
from scipy.sparse import csr_matrix
from pytadbit.utils.extraviews import plot_compartments
from pytadbit.utils.extraviews import plot_compartments_summary
from pytadbit.utils.hic_filtering import filter_by_mean, filter_by_zero_count
from pytadbit.utils.normalize_hic import iterative, expected
from pytadbit.parsers.genome_parser import parse_fasta
from pytadbit.parsers.bed_parser import parse_bed
from pytadbit.utils.file_handling import mkdir
from pytadbit.utils.hmm import gaussian_prob, best_path, train
from pytadbit.utils.tadmaths import calinski_harabasz
try:
from pytadbit.parsers.cooler_parser import cooler_file
except ImportError:
stderr.write('WARNING: cooler output is not available. Probably ' +
'you need to install h5py\n')
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"""
https://stackoverflow.com/questions/5595425/what-is-the-best-way-to-compare-floats-for-almost-equality-in-python/33024979#33024979
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class HiC_data(dict):
"""
This may also hold the print/write-to-file matrix functions
"""
def __init__(self, items, size, chromosomes=None, dict_sec=None,
resolution=1, masked=None, symmetricized=False):
super(HiC_data, self).__init__(items)
self.__size = size
self._size2 = size**2
self._symmetricize()
self.bias = None
self.bads = masked or {}
self.chromosomes = chromosomes
self.sections = dict_sec
self.section_pos = {}
self.resolution = resolution
self.expected = None
self.symmetricized = symmetricized
self.compartments = {}
if self.chromosomes:
total = 0
for crm in self.chromosomes:
self.section_pos[crm] = (total, total + self.chromosomes[crm])
total += self.chromosomes[crm]
if self.sections == {}:
self.section_pos = {None: (0, self.__size)}
self.sections = dict([((None, i), i)
for i in xrange(0, self.__size)])
def _symmetricize(self):
"""
Check if matrix is symmetric (check first 10 non-zero values) and,
if not, make it symmetric
- if matrix is half empty, copy values on one side to the other side
- if matrix is asymmetric, sum non-diagonal values
"""
to_sum = False
symmetric = True
count = 0
for n in self:
i = n / self.__size
j = n % self.__size
if i == j or self[i, j] == self[i, j] == 0:
continue
if not isclose(self[i, j], self[j, i]):
if self[i, j] != 0 and self[j, i] != 0:
to_sum = True
symmetric = False
break
if count > 10:
return
count += 1
if symmetric: # may not reach 10 values
return
if to_sum:
for n in self.keys()[:]:
i = n / self.__size
j = n % self.__size
if i != j:
self[j, i] = self[i, j] = self[j, i] + self[i, j]
else:
for n in self.keys()[:]:
i = n / self.__size
j = n % self.__size
self[j, i] = self[i, j] = self[n]
def _update_size(self, size):
self.__size += size
self._size2 = self.__size**2
def __len__(self):
return self.__size
def __getitem__(self, row_col):
"""
slow one... for user
for fast item getting, use self.get()
"""
try:
row, col = row_col
pos = row * self.__size + col
if pos > self._size2:
raise IndexError(
'ERROR: row or column larger than %s' % self.__size)
return self.get(pos, 0)
except TypeError:
if row_col > self._size2:
raise IndexError(
'ERROR: position %d larger than %s^2' % (row_col,
self.__size))
return self.get(row_col, 0)
def __setitem__(self, row_col, val):
"""
slow one... for user
for fast item getting, use self.get()
"""
try:
row, col = row_col
pos = row * self.__size + col
if pos > self._size2:
print row, col, pos
raise IndexError(
'ERROR: row or column larger than %s' % self.__size)
super(HiC_data, self).__setitem__(pos, val)
except TypeError:
if row_col > self._size2:
raise IndexError(
'ERROR: position %d larger than %s^2' % (row_col,
self.__size))
super(HiC_data, self).__setitem__(row_col, val)
def get_hic_data_as_csr(self):
"""
Returns a scipy sparse matrix in Compressed Sparse Row format of the Hi-C data in the dictionary
:returns: scipy sparse matrix in Compressed Sparse Row format
"""
values = []
cols = []
rows = []
for key, value in self.iteritems():
row, col = round(key / self.__size), key % self.__size
values.append(float(value))
cols.append(col)
rows.append(row)
return csr_matrix((values, (rows, cols)), shape=(self.__size,self.__size))
def add_sections_from_fasta(self, fasta):
"""
Add genomic coordinate to HiC_data object by getting them from a FASTA
file containing chromosome sequences
:param fasta: path to a FASTA file
"""
genome = parse_fasta(fasta, verbose=False)
sections = []
genome_seq = OrderedDict()
size = 0
for crm in genome:
genome_seq[crm] = int(len(genome[crm])) / self.resolution + 1
size += genome_seq[crm]
section_sizes = {}
for crm in genome_seq:
len_crm = genome_seq[crm]
section_sizes[(crm,)] = len_crm
sections.extend([(crm, i) for i in xrange(len_crm)])
dict_sec = dict([(j, i) for i, j in enumerate(sections)])
self.chromosomes = genome_seq
self.sections = dict_sec
if self.chromosomes:
total = 0
for crm in self.chromosomes:
self.section_pos[crm] = (total, total + self.chromosomes[crm])
total += self.chromosomes[crm]
if size != self.__size:
warn('WARNING: different sizes (%d, now:%d), ' % (self.__size, size)
+ 'should adjust the resolution')
self.__size = size
self._size2 = size**2
def add_sections(self, lengths, chr_names=None, binned=False):
"""
Add genomic coordinate to HiC_data object by getting them from a FASTA
file containing chromosome sequences. Orders matters.
:param lengths: list of chromosome lengths
:param None chr_names: list of corresponding chromosome names.
:param False binned: if True, lengths will not be divided by resolution
"""
sections = []
genome_seq = OrderedDict()
size = 0
resolution = 1 if binned else self.resolution
for crm, length in enumerate(lengths):
cnam = 'chr' + str(crm) if not chr_names else chr_names[crm]
genome_seq[cnam] = int(length) / resolution + 1
size += genome_seq[cnam]
section_sizes = {}
for crm in genome_seq:
len_crm = genome_seq[crm]
section_sizes[(crm,)] = len_crm
sections.extend([(crm, i) for i in xrange(len_crm)])
dict_sec = dict([(j, i) for i, j in enumerate(sections)])
self.chromosomes = genome_seq
self.sections = dict_sec
if self.chromosomes:
total = 0
for crm in self.chromosomes:
self.section_pos[crm] = (total, total + self.chromosomes[crm])
total += self.chromosomes[crm]
if size != self.__size:
warn('WARNING: different sizes (%d, now:%d), ' % (self.__size, size)
+ 'should adjust the resolution')
self.__size = size
self._size2 = size**2
def cis_trans_ratio(self, normalized=False, exclude=None, diagonal=True,
equals=None):
"""
Counts the number of interactions occurring within chromosomes (cis) with
respect to the total number of interactions
:param False normalized: used normalized data
:param None exclude: exclude a given list of chromosome from the
ratio (may want to exclude translocated chromosomes)
:param False diagonal: replace values in the diagonal by 0 or 1
:param None equals: can pass a function that would decide if 2 chromosomes
have to be considered as the same. e.g. lambda x, y: x[:4]==y[:4] will
consider chr2L and chr2R as being the same chromosome. WARNING: only
working on consecutive chromosomes.
:returns: the ratio of cis interactions over the total number of
interactions. This number is expected to be between at least 40-60%
in Human classic dilution Hi-C with HindIII as restriction enzyme.
"""
if normalized and not self.bias:
raise Exception('ERROR: experiment not normalized yet')
if exclude == None:
exclude = []
if equals == None:
equals = lambda x, y: x == y
intra = 0
if not self.chromosomes:
return float('nan')
# define chromosomes to be merged
to_skip = set()
c_prev = ''
for c in self.chromosomes:
if equals(c, c_prev):
to_skip.add(c_prev)
c_prev = c
sections = sorted([-1] + [self.section_pos[c][1]
for c in self.section_pos
if not c in to_skip])
# defines columns to be skipped
bads = set(self.bads.keys())
for c in exclude:
bads.update(i for i in xrange(*self.section_pos[c]))
# diagonal
if diagonal:
valid = lambda x, y: True
else:
valid = lambda x, y: x != y
# normalization
if normalized:
transform = lambda x, y, z: x / self.bias[y] / self.bias[z]
else:
transform = lambda x, y, z: x
# compute ratio
for k, v in self.iteritems():
i, j = divmod(k, self.__size)
if bisect(sections, i) != bisect(sections, j):
continue
if i in bads or j in bads:
continue
if valid(i, j): # diagonal thing
intra += transform(v, i, j)
try:
return float(intra) / self.sum(bias=self.bias if normalized else None, bads=bads)
except ZeroDivisionError:
return 0.
def filter_columns(self, draw_hist=False, savefig=None, perc_zero=75,
by_mean=True, min_count=None, silent=False):
"""
Call filtering function, to remove artifactual columns in a given Hi-C
matrix. This function will detect columns with very low interaction
counts; columns passing through a cell with no interaction in the
diagonal; and columns with NaN values (in this case NaN will be replaced
by zero in the original Hi-C data matrix). Filtered out columns will be
stored in the dictionary Experiment._zeros.
:param False draw_hist: shows the distribution of mean values by column
the polynomial fit, and the cut applied.
:param None savefig: path to a file where to save the image generated;
if None, the image will be shown using matplotlib GUI (the extension
of the file name will determine the desired format).
:param 75 perc_zero: maximum percentage of cells with no interactions
allowed.
:param None min_count: minimum number of reads mapped to a bin (recommended
value could be 2500). If set this option overrides the perc_zero
filtering... This option is slightly slower.
:param True by_mean: filter columns by mean column value using
:func:`pytadbit.utils.hic_filtering.filter_by_mean` function
"""
self.bads = filter_by_zero_count(self, perc_zero, min_count=min_count,
silent=silent)
if by_mean:
self.bads.update(filter_by_mean(
self, draw_hist=draw_hist, silent=silent,
savefig=savefig, bads=self.bads))
if not silent:
print 'Found %d of %d columns with poor signal' % (len(self.bads),
len(self))
def sum(self, bias=None, bads=None):
"""
Sum Hi-C data matrix
WARNING: parameters are not meant to be used by external users
:params None bias: expects a dictionary of biases to use normalized matrix
:params None bads: extends computed bad columns
:returns: the sum of the Hi-C matrix skipping bad columns
"""
N = self.__size
norm_sum = 0
bads = bads or self.bads
if bias:
for k, v in self.iteritems():
i, j = divmod(k, N)
if i in bads or j in bads:
continue
norm_sum += v / (bias[i] * bias[j])
else:
for k, v in self.iteritems():
i, j = divmod(k, N)
if i in bads or j in bads:
continue
norm_sum += v
return norm_sum
def normalize_expected(self, **kwargs):
self.expected = expected(self, bads=self.bads, **kwargs)
def normalize_hic(self, iterations=0, max_dev=0.1, silent=False,
sqrt=False, factor=1):
"""
Normalize the Hi-C data.
It fills the Experiment.norm variable with the Hi-C values divided by
the calculated weight.
:param 0 iteration: number of iterations
:param 0.1 max_dev: iterative process stops when the maximum deviation
between the sum of row is equal to this number (0.1 means 10%)
:param False silent: does not warn when overwriting weights
:param False sqrt: uses the square root of the computed biases
:param 1 factor: final mean number of normalized interactions wanted
per cell (excludes filtered, or bad, out columns)
"""
bias = iterative(self, iterations=iterations,
max_dev=max_dev, bads=self.bads,
verbose=not silent)
if sqrt:
bias = dict((b, bias[b]**0.5) for b in bias)
if factor:
if not silent:
print 'rescaling to factor %d' % factor
print ' - getting the sum of the matrix'
# get the sum on half of the matrix
norm_sum = self.sum(bias)
if not silent:
print ' => %.3f' % norm_sum
print ' - rescaling biases'
# divide biases
target = (norm_sum / float(len(self) * len(self) * factor))**0.5
bias = dict([(b, bias[b] * target) for b in bias])
self.bias = bias
def save_biases(self, fnam, protocol=None):
"""
Save biases, decay and bad columns in pickle format (to be loaded by
the function load_hic_data_from_bam)
:param fnam: path to output file
"""
out = open(fnam, 'w')
dump({'biases' : self.bias,
'decay' : self.expected,
'badcol' : self.bads,
'resolution': self.resolution}, out,
protocol if protocol else HIGHEST_PROTOCOL)
out.close()
def load_biases(self, fnam, protocol=None):
"""
Load biases, decay and bad columns from pickle file
:param fnam: path to input pickle file
"""
biases = load(open(fnam))
if biases['resolution'] != self.resolution:
raise Exception(('Error: resolution in Pickle (%d) does not match '
'the one of this HiC_data object (%d)') % (
biases['resolution'], self.resolution))
self.bias = biases['biases']
self.expected = biases['decay']
self.bads = biases['badcol']
def get_as_tuple(self):
return tuple([self[i, j]
for j in xrange(len(self))
for i in xrange(len(self))])
def write_coord_table(self, fname, focus=None, diagonal=True,
normalized=False, format='BED'):
"""
writes a coordinate table to a file.
:param None focus: a tuple with the (start, end) position of the desired
window of data (start, starting at 1, and both start and end are
inclusive). Alternatively a chromosome name can be input or a tuple
of chromosome name, in order to retrieve a specific inter-chromosomal
region
:param True diagonal: if False, diagonal is replaced by zeroes
:param False normalized: get normalized data
:param BED format: either "BED"
chr1 \t 111 \t 222 \t chr2:333-444,55 \t 1 \t .
chr2 \t 333 \t 444 \t chr1:111-222,55 \t 2 \t .
or "long-range" format:
chr1:111-222 \t chr2:333-444 \t 55
chr2:333-444 \t chr1:111-222 \t 55
"""
if focus:
if isinstance(focus, tuple) and isinstance(focus[0], int):
if len(focus) == 2:
start1, end1 = focus
start2, end2 = focus
start1 -= 1
start2 -= 1
else:
start1, end1, start2, end2 = focus
start1 -= 1
start2 -= 1
elif isinstance(focus, tuple) and isinstance(focus[0], str):
start1, end1 = self.section_pos[focus[0]]
start2, end2 = self.section_pos[focus[1]]
else:
start1, end1 = self.section_pos[focus]
start2, end2 = self.section_pos[focus]
else:
start1 = start2 = 0
end1 = end2 = len(self)
out = open(fname, 'w')
if format == 'long-range':
rownam = ['%s:%d-%d' % (k[0],
k[1] * self.resolution,
(k[1] + 1) * self.resolution)
for k in sorted(self.sections,
key=lambda x: self.sections[x])
if start2 <= self.sections[k] < end2]
if not rownam:
raise Exception('ERROR: HiC data object should have genomic coordinates')
iter_rows = self.yield_matrix(focus=focus, diagonal=diagonal,
normalized=normalized)
pair_string = '%s\t%s\t%f\n' if normalized else '%s\t%s\t%d\n'
for nrow, row in enumerate(rownam, 1):
line = iter_rows.next()
iter_cols = iter(line)
for col in rownam[nrow:]:
val = iter_cols.next()
if not val:
continue
out.write(pair_string % (row, col, val))
elif format == 'BED':
rownam = ['%s\t%d\t%d' % (k[0],
k[1] * self.resolution,
(k[1] + 1) * self.resolution)
for k in sorted(self.sections,
key=lambda x: self.sections[x])
if start2 <= self.sections[k] < end2]
colnam = ['%s:%d-%d' % (k[0],
k[1] * self.resolution,
(k[1] + 1) * self.resolution)
for k in sorted(self.sections,
key=lambda x: self.sections[x])
if start2 <= self.sections[k] < end2]
if not rownam:
raise Exception('ERROR: Hi-C data object should have genomic coordinates')
iter_rows = self.yield_matrix(focus=focus, diagonal=diagonal,
normalized=normalized)
pair_string = '%s\t%s,%f\t%d\t.\n' if normalized else '%s\t%s,%d\t%d\t.\n'
count = 1
for nrow, row in enumerate(rownam, 1):
line = iter_rows.next()
iter_cols = iter(line)
for col in colnam[nrow:]:
val = iter_cols.next()
if not val:
continue
out.write(pair_string % (row, col, val, count))
count += 1
else:
raise Exception('ERROR: format "%s" not found\n' % format)
out.close()
def write_cooler(self, fname, normalized=False):
"""
writes the hic_data to a cooler file.
:param False normalized: get normalized data
"""
if normalized and not self.bias:
raise Exception('ERROR: data not normalized yet')
if not all(isinstance(val, int) for _, val in self.iteritems()):
raise Exception('ERROR: raw hic data (integer values) is needed for cooler format')
if self.chromosomes:
if len(self.chromosomes) > 1:
sections = OrderedDict((key,val*self.resolution)
for key, val in self.chromosomes.iteritems())
else: # maybe part of a matrix
sections = {next(iter(self.chromosomes)): self.__size*self.resolution}
else: # maybe part of a matrix
sections = {"Unknown": self.__size*self.resolution}
out = cooler_file(fname, self.resolution, sections, sections.keys())
out.create_bins()
out.prepare_matrix()
for key, value in self.iteritems():
row, col = round(key / self.__size), key % self.__size
out.write_iter(0, row, col, value)
out.close()
if normalized:
weights = [self.bias[i] if not i in self.bads else 0. for i in xrange(self.__size)]
out.write_weights(weights, weights)
def write_matrix(self, fname, focus=None, diagonal=True, normalized=False):
"""
writes the matrix to a file.
:param None focus: a tuple with the (start, end) position of the desired
window of data (start, starting at 1, and both start and end are
inclusive). Alternatively a chromosome name can be input or a tuple
of chromosome name, in order to retrieve a specific inter-chromosomal
region
:param True diagonal: if False, diagonal is replaced by zeroes
:param False normalized: get normalized data
"""
if focus:
if isinstance(focus, tuple) and isinstance(focus[0], int):
if len(focus) == 2:
start1, end1 = focus
start2, end2 = focus
start1 -= 1
start2 -= 1
else:
start1, end1, start2, end2 = focus
start1 -= 1
start2 -= 1
elif isinstance(focus, tuple) and isinstance(focus[0], str):
start1, end1 = self.section_pos[focus[0]]
start2, end2 = self.section_pos[focus[1]]
else:
start1, end1 = self.section_pos[focus]
start2, end2 = self.section_pos[focus]
else:
start1 = start2 = 0
end1 = end2 = len(self)
out = open(fname, 'w')
out.write('# MASKED %s\n' % (' '.join([str(k - start1)
for k in self.bads.keys()
if start1 <= k <= end1])))
rownam = ['%s\t%d-%d' % (k[0],
k[1] * self.resolution + 1,
(k[1] + 1) * self.resolution)
for k in sorted(self.sections,
key=lambda x: self.sections[x])
if start2 <= self.sections[k] < end2]
if rownam:
for line in self.yield_matrix(focus=focus, diagonal=diagonal,
normalized=normalized):
out.write(rownam.pop(0) + '\t' +
'\t'.join([str(i) for i in line]) + '\n')
else:
for line in self.yield_matrix(focus=focus, diagonal=diagonal,
normalized=normalized):
out.write('\t'.join([str(i) for i in line]) + '\n')
out.close()
def get_matrix(self, focus=None, diagonal=True, normalized=False,
masked=False):
"""
returns a matrix.
:param None focus: a tuple with the (start, end) position of the desired
window of data (start, starting at 1, and both start and end are
inclusive). Alternatively a chromosome name can be input or a tuple
of chromosome name, in order to retrieve a specific inter-chromosomal
region
:param True diagonal: if False, diagonal is replaced by ones, or zeroes
if normalized
:param False normalized: get normalized data
:param False masked: return masked arrays using the definition of bad
columns
:returns: matrix (a list of lists of values)
"""
if normalized and not self.bias:
raise Exception('ERROR: experiment not normalized yet')
start1, start2, end1, end2 = self._focus_coords(focus)
if normalized:
if diagonal:
matrix = [[self[i, j] / self.bias[i] / self.bias[j]
for i in xrange(start2, end2)]
for j in xrange(start1, end1)]
else:
matrix = [[self[i, j] / self.bias[i] / self.bias[j]
for i in xrange(start2, end2)]
for j in xrange(start1, end1)]
if start1 == start2:
for i in xrange(len(matrix)):
matrix[i][i] = 0
else:
if diagonal:
matrix = [[self[i, j] for i in xrange(start2, end2)]
for j in xrange(start1, end1)]
else:
matrix = [[self[i, j] for i in xrange(start2, end2)]
for j in xrange(start1, end1)]
if start1 == start2:
for i in xrange(len(matrix)):
matrix[i][i] = 1 if matrix[i][i] else 0
if masked:
bads1 = [b - start1 for b in self.bads if start1 <= b < end1]
bads2 = [b - start2 for b in self.bads if start2 <= b < end2]
m = zeros_like(matrix)
for bad1 in bads1:
m[:,bad1] = 1
for bad2 in bads2:
m[bad2,:] = 1
matrix = ma.masked_array(matrix, m)
return matrix
def _focus_coords(self, focus):
siz = len(self)
if focus:
if isinstance(focus, tuple) and isinstance(focus[0], int):
if len(focus) == 2:
start1, end1 = focus
start2, end2 = focus
start1 -= 1
start2 -= 1
else:
start1, end1, start2, end2 = focus
start1 -= 1
start2 -= 1
elif isinstance(focus, tuple) and isinstance(focus[0], str):
start1, end1 = self.section_pos[focus[0].split(':')[0]]
start2, end2 = self.section_pos[focus[1].split(':')[0]]
if ':' in focus[0]:
pos = focus[0].split(':')[1]
try:
pos1, pos2 = [int(p) / self.resolution
for p in pos.split('-')]
except ValueError:
raise Exception('ERROR: should be in format "chr3:10000:20000"')
start1, end1 = start1 + pos1, start1 + pos2
if ':' in focus[1]:
pos = focus[0].split(':')[1]
try:
pos1, pos2 = [int(p) / self.resolution
for p in pos.split('-')]
except ValueError:
raise Exception('ERROR: should be in format "chr3:10000:20000"')
start2, end2 = start1 + pos1, start1 + pos2
else:
start1, end1 = self.section_pos[focus.split(':')[0]]
if ':' in focus:
pos = focus.split(':')[1]
try:
pos1, pos2 = [int(p) / self.resolution
for p in pos.split('-')]
except ValueError:
raise Exception('ERROR: should be in format "chr3:10000:20000"')
start1, end1 = start1 + pos1, start1 + pos2
start2, end2 = start1, end1
else:
start1 = start2 = 0
end1 = end2 = siz
return start1, start2, end1, end2
def find_compartments(self, crms=None, savefig=None, savedata=None,
savecorr=None, show=False, suffix='', ev_index=None,
rich_in_A=None, format='png', savedir=None,
max_ev=3, show_compartment_labels=False, **kwargs):
"""
Search for A/B compartments in each chromosome of the Hi-C matrix.
Hi-C matrix is normalized by the number interaction expected at a given
distance, and by visibility (one iteration of ICE). A correlation matrix
is then calculated from this normalized matrix, and its first
eigenvector is used to identify compartments. Changes in sign marking
boundaries between compartments.
Result is stored as a dictionary of compartment boundaries, keys being
chromosome names.
:param 99 perc_zero: to filter bad columns
:param 0.05 signal_to_noise: to calculate expected interaction counts,
if not enough reads are observed at a given distance the observations
of the distance+1 are summed. a signal to noise ratio of < 0.05
corresponds to > 400 reads.
:param None crms: only runs these given list of chromosomes
:param None savefig: path to a directory to store matrices with
compartment predictions, one image per chromosome, stored under
'chromosome-name_EV1.png'.
:param png format: in which to save the figures.
:param False show: show the plot
:param None savedata: path to a new file to store compartment
predictions, one file only.
:param None savedir: path to a directory to store coordinates of each
eigenvector, one per chromosome. Each file contains one eigenvector
per column, the first one being the one used as reference. This
eigenvector is also rotated according to the prediction if a
`rich_in_A` array was given.
:param None savecorr: path to a directory where to save correlation
matrices of each chromosome
:param -1 vmin: for the color scale of the plotted map (use vmin='auto',
and vmax='auto' to color according to the absolute maximum found).
:param 1 vmax: for the color scale of the plotted map (use vmin='auto',
and vmax='auto' to color according to the absolute maximum found).
:param False yield_ev1: if True yields one list per chromosome with the
first eigenvector used to compute compartments.
:param '' suffix: to be placed after file names of compartment images
:param 3 max_ev: maximum number of EV to try
:param None ev_index: a list of number referring to the index of the
eigenvector to be used. By default the first eigenvector is used.
WARNING: index starts at 1, default is thus a list of ones. Note:
if asking for only one chromosome the list should be only of one
element.
:param None rich_in_A: by default compartments are identified using mean
number of intra-interactions (A compartments are expected to have
less). However this measure is not very accurate. Using this
parameter a path to a BED or BED-Graph file with a list of genes or
active epigenetic marks can be passed, and used instead of the mean
interactions.
:param False show_compartment_labels: if True draw A and B compartment blocks.
TODO: this is really slow...
Notes: building the distance matrix using the amount of interactions
instead of the mean correlation, gives generally worse results.
:returns: 1- a dictionary with the N (max_ev) first
eigenvectors in the form:
{Chromosome_name: (Eigenvalue: [Eigenvector])}
Sign of the eigenvectors are changed in order to match the
prediction of A/B compartments (positive is A).
2- a dictionary of statistics of enrichment for A compartments
(Spearman rho).
"""
if not self.bads:
if kwargs.get('verbose', False):
print 'Filtering bad columns %d' % 99
self.filter_columns(perc_zero=kwargs.get('perc_zero', 99),
by_mean=False, silent=True)
if len(self.bads) == len(self):
self.bads = {}
warn('WARNING: all columns would have been filtered out, '
'filtering disabled')
if not self.expected:
if kwargs.get('verbose', False):
print 'Normalizing by expected values'
self.expected = expected(self, bads=self.bads, **kwargs)
if not self.bias:
if kwargs.get('verbose', False):
print 'Normalizing by ICE (1 round)'
self.normalize_hic(iterations=0,
silent=not kwargs.get('verbose', False))
if savefig:
mkdir(savefig)
if savecorr:
mkdir(savecorr)
if savedir:
mkdir(savedir)
if suffix != '':
suffix = '_' + suffix
# parse bed file
if rich_in_A and isinstance(rich_in_A, str):
rich_in_A = parse_bed(rich_in_A, resolution=self.resolution)
cmprts = {}
firsts = {}
ev_nums = {}
count = 0
richA_stats = dict((sec, None) for sec in self.section_pos)
for sec in self.section_pos:
if crms and sec not in crms:
continue
if kwargs.get('verbose', False):
print 'Processing chromosome', sec
# get chromosomal matrix
try:
matrix = [[(float(self[i,j]) / self.expected[sec][abs(j-i)]
/ self.bias[i] / self.bias[j])
for i in xrange(*self.section_pos[sec])
if not i in self.bads]
for j in xrange(*self.section_pos[sec])
if not j in self.bads]
except KeyError:
if sec in self.expected and not self.expected[sec]:
matrix = []
else:
matrix = [[(float(self[i,j]) / self.expected[abs(j-i)]
/ self.bias[i] / self.bias[j])
for i in xrange(*self.section_pos[sec])
if not i in self.bads]
for j in xrange(*self.section_pos[sec])
if not j in self.bads]
if not matrix: # MT chromosome will fall there
warn('Chromosome %s is probably MT :)' % (sec))
cmprts[sec] = []
count += 1
continue
# enforce symmetry
for i in xrange(len(matrix)):
for j in xrange(i+1, len(matrix)):
matrix[i][j] = matrix[j][i]
# compute correlation coefficient
try:
matrix = [list(m) for m in corrcoef(matrix)]
except TypeError:
# very small chromosome?
warn('Chromosome %s is probably MT :)' % (sec))
cmprts[sec] = []
count += 1
continue
# replace nan in correlation matrix
matrix = [[0. if isnan(v) else v for v in l] for l in matrix]
# write correlation matrix to file. replaces filtered row/columns by NaN
if savecorr:
out = open(os.path.join(savecorr, '%s_corr-matrix%s.tsv' % (sec, suffix)),
'w')
start1, end1 = self.section_pos[sec]
out.write('# MASKED %s\n' % (' '.join([str(k - start1)
for k in self.bads.keys()
if start1 <= k <= end1])))
rownam = ['%s\t%d-%d' % (k[0],
k[1] * self.resolution,
(k[1] + 1) * self.resolution)
for k in sorted(self.sections,
key=lambda x: self.sections[x])
if k[0] == sec]
length = self.section_pos[sec][1] - self.section_pos[sec][0]
empty = 'NaN\t' * (length - 1) + 'NaN\n'
badrows = 0
for row, posx in enumerate(xrange(self.section_pos[sec][0],
self.section_pos[sec][1])):
if posx in self.bads:
out.write(rownam.pop(0) + '\t' + empty)
badrows += 1
continue
vals = []
badcols = 0
for col, posy in enumerate(xrange(self.section_pos[sec][0],
self.section_pos[sec][1])):
if posy in self.bads:
vals.append('NaN')
badcols += 1
continue
vals.append(str(matrix[row-badrows][col-badcols]))
out.write(rownam.pop(0) + '\t' +'\t'.join(vals) + '\n')
out.close()
# get eigenvectors
try:
# This eighs is very very fast, only ask for one eigenvector
evals, evect = eigsh(array(matrix),
k=max_ev if max_ev else (len(matrix) - 1))
except (LinAlgError, ValueError):
warn('Chromosome %s too small to compute PC1' % (sec))
cmprts[sec] = [] # Y chromosome, or so...
count += 1
continue
# define breakpoints, and store first EVs
n_first = [list(evect[:, -i])
for i in xrange(1, (max_ev + 1)
if max_ev else len(matrix))]
ev_num = (ev_index[count] - 1) if ev_index else 0
breaks = [i for i, (a, b) in
enumerate(zip(n_first[ev_num][1:], n_first[ev_num][:-1]))
if a * b < 0] + [len(n_first[ev_num]) - 1]
breaks = [{'start': breaks[i-1] + 1 if i else 0, 'end': b}
for i, b in enumerate(breaks)]
# rescale EVs, matrix and breaks by inserting NaNs in bad column places
beg, end = self.section_pos[sec]
bads = [k - beg for k in sorted(self.bads) if beg <= k <= end]
for evect in n_first:
_ = [evect.insert(b, float('nan')) for b in bads]
_ = [matrix.insert(b, [float('nan')] * len(matrix[0]))
for b in bads]
_ = [matrix[i].insert(b, float('nan'))
for b in bads for i in xrange(len(n_first[0]))]
for b in bads: # they are sorted
for brk in breaks:
if brk['start'] >= b:
brk['start'] += 1
brk['end' ] += 1
else:
brk['end' ] += brk['end'] > b
bads = set(bads)
# rescale first EV and change sign according to rich_in_A
richA_stats[sec] = None
sign = 1
if rich_in_A and sec in rich_in_A:
eves = []
gccs = []
for i, v in enumerate(n_first[ev_num]):
if i in bads:
continue
try:
gc = rich_in_A[sec][i]
except KeyError:
continue
gccs.append(gc)
eves.append(v)
r_stat, richA_pval = spearmanr(eves, gccs)
if kwargs.get('verbose', False):
print (' - Spearman correlation between "rich in A" and '
'Eigenvector:\n'
' rho: %.7f p-val:%.7f' % (r_stat, richA_pval))
richA_stats[sec] = r_stat
# switch sign and normalize
sign = 1 if r_stat > 0 else -1
for i in xrange(len(n_first)):
n_first[i] = [sign * v for v in n_first[i]]
# store it
ev_nums[sec] = ev_num + 1
cmprts[sec] = breaks
if rich_in_A:
for cmprt in cmprts[sec]:
try:
cmprt['dens'] = sum(rich_in_A.get(sec, {None: 0}).get(i, 0)
for i in range(cmprt['start'], cmprt['end'] + 1)
if not i in bads) / float(cmprt['end'] - cmprt['start'])
except ZeroDivisionError:
cmprt['dens'] = float('nan')
cmprt['type'] = 'A' if n_first[ev_num][cmprt['start']] > 0 else'B'
firsts[sec] = (evals[::-1], n_first)
# needed for the plotting
if savefig or show:
vmin = kwargs.get('vmin', -1)
vmax = kwargs.get('vmax', 1)
if vmin == 'auto' == vmax:
vmax = max([abs(npperc(matrix, 99.5)),
abs(npperc(matrix, 0.5))])
vmin = -vmax
try:
if savefig:
fnam = os.path.join(savefig,
'%s_EV%d%s.%s' % (str(sec),
ev_nums[sec],
suffix,
format))
else:
fnam = None
plot_compartments(
sec, n_first[ev_num], cmprts, matrix, show, fnam,
vmin=vmin, vmax=vmax, whichpc=ev_num + 1,
showAB=show_compartment_labels)
except AttributeError:
warn(('WARNING: chromosome %s too small for plotting.'
'Skipping image creation.') % sec)
except ValueError:
warn(('WARNING: chromosome %s too small for plotting.'
'Skipping image creation.') % sec)
self.compartments = cmprts
if savedata:
self.write_compartments(savedata, chroms=self.compartments.keys(),
ev_nums=ev_nums)
if savedir:
ncrm = 0
for sec in self.section_pos:
if crms and sec not in crms:
continue
ev_file = open(os.path.join(
savedir, '%s_EigVect%d.tsv' % (
sec, ev_index[ncrm] if ev_index else 1)), 'w')
ev_file.write('# %s\n' % ('\t'.join(
'EV_%d (%.4f)' % (i, v)
for i, v in enumerate(firsts[sec][0], 1))))
ev_file.write('\n'.join(['\t'.join([str(v) for v in vs])
for vs in zip(*firsts[sec][1])]))
ev_file.close()
ncrm += 1
return firsts, richA_stats
def find_compartments_beta(self, crms=None, savefig=None, savedata=None,
savecorr=None, show=False, suffix='', how='',
label_compartments='hmm', log=None, max_mean_size=10000,
ev_index=None, rich_in_A=None, max_ev=3,show_compartment_labels=False, **kwargs):
"""
Search for A/B compartments in each chromosome of the Hi-C matrix.
Hi-C matrix is normalized by the number interaction expected at a given
distance, and by visibility (one iteration of ICE). A correlation matrix
is then calculated from this normalized matrix, and its first
eigenvector is used to identify compartments. Changes in sign marking
boundaries between compartments.
Result is stored as a dictionary of compartment boundaries, keys being
chromosome names.
:param 99 perc_zero: to filter bad columns
:param 0.05 signal_to_noise: to calculate expected interaction counts,
if not enough reads are observed at a given distance the observations
of the distance+1 are summed. a signal to noise ratio of < 0.05
corresponds to > 400 reads.
:param None crms: only runs these given list of chromosomes
:param None savefig: path to a directory to store matrices with
compartment predictions, one image per chromosome, stored under
'chromosome-name.png'.
:param False show: show the plot
:param None savedata: path to a new file to store compartment
predictions, one file only.
:param None savecorr: path to a directory where to save correlation
matrices of each chromosome
:param -1 vmin: for the color scale of the plotted map (use vmin='auto',
and vmax='auto' to color according to the absolute maximum found).
:param 1 vmax: for the color scale of the plotted map (use vmin='auto',
and vmax='auto' to color according to the absolute maximum found).
:param False yield_ev1: if True yields one list per chromosome with the
first eigenvector used to compute compartments.
:param '' suffix: to be placed after file names of compartment images
:param 3 max_ev: maximum number of EV to try
:param None ev_index: a list of number referring to the index of the
eigenvector to be used. By default the first eigenvector is used.
WARNING: index starts at 1, default is thus a list of ones. Note:
if asking for only one chromosome the list should be only of one
element.
:param None rich_in_A: by default compartments are identified using mean
number of intra-interactions (A compartments are expected to have
less). However this measure is not very accurate. Using this
parameter a path to a BED or BED-Graph file with a list of genes or
active epigenetic marks can be passed, and used instead of the mean
interactions.
:param None log: path to a folder where to save log of the assignment
of A/B compartments
:param hmm label_compartments: label compartments into A/B categories,
otherwise just find borders (faster). Can be either hmm (default), or
cluster.
:param 'ratio' how: ratio divide by column, subratio divide by
compartment, diagonal only uses diagonal
:param False'show_compartment_labels': if True draw A and B compartment blocks.
TODO: this is really slow...
Notes: building the distance matrix using the amount of interactions
instead of the mean correlation, gives generally worse results.
:returns: a dictionary with the N (max_ev) first eigen vectors used to define
compartment borders for each chromosome (keys are chromosome names)
"""
if not self.bads:
if kwargs.get('verbose', False):
print 'Filtering bad columns %d' % 99
self.filter_columns(perc_zero=kwargs.get('perc_zero', 99),
by_mean=False, silent=True)
if len(self.bads) == len(self):
self.bads = {}
warn('WARNING: all columns would have been filtered out, '
'filtering disabled')
if not self.expected:
if kwargs.get('verbose', False):
print 'Normalizing by expected values'
self.expected = expected(self, bads=self.bads, **kwargs)
if not self.bias:
if kwargs.get('verbose', False):
print 'Normalizing by ICE (1 round)'
self.normalize_hic(iterations=0,
silent=not kwargs.get('verbose', False))
if savefig:
mkdir(savefig)
if savecorr:
mkdir(savecorr)
if suffix != '':
suffix = '_' + suffix
# parse bed file
if rich_in_A:
rich_in_A = parse_bed(rich_in_A, resolution=self.resolution)
cmprts = {}
firsts = {}
ev_nums = {}
count = 0
for sec in self.section_pos:
if crms and sec not in crms:
continue
if kwargs.get('verbose', False):
print 'Processing chromosome', sec
matrix = [[(float(self[i,j]) / self.expected[abs(j-i)]
/ self.bias[i] / self.bias[j])
for i in xrange(*self.section_pos[sec])
if not i in self.bads]
for j in xrange(*self.section_pos[sec])
if not j in self.bads]
if not matrix: # MT chromosome will fall there
warn('Chromosome %s is probably MT :)' % (sec))
cmprts[sec] = []
count += 1
continue
for i in xrange(len(matrix)):
for j in xrange(i+1, len(matrix)):
matrix[i][j] = matrix[j][i]
try:
matrix = [list(m) for m in corrcoef(matrix)]
except TypeError:
# very small chromosome?
warn('Chromosome %s is probably MT :)' % (sec))
cmprts[sec] = []
count += 1
continue
# write correlation matrix to file. replaces filtered row/columns by NaN
if savecorr:
out = open(os.path.join(savecorr, '%s_corr-matrix.tsv' % (sec)),
'w')
start1, end1 = self.section_pos[sec]
out.write('# MASKED %s\n' % (' '.join([str(k - start1)
for k in self.bads.keys()
if start1 <= k <= end1])))
rownam = ['%s\t%d-%d' % (k[0],
k[1] * self.resolution,
(k[1] + 1) * self.resolution)
for k in sorted(self.sections,
key=lambda x: self.sections[x])
if k[0] == sec]
length = self.section_pos[sec][1] - self.section_pos[sec][0]
empty = 'NaN\t' * (length - 1) + 'NaN\n'
badrows = 0
for row, posx in enumerate(xrange(self.section_pos[sec][0],
self.section_pos[sec][1])):
if posx in self.bads:
out.write(rownam.pop(0) + '\t' + empty)
badrows += 1
continue
vals = []
badcols = 0
for col, posy in enumerate(xrange(self.section_pos[sec][0],
self.section_pos[sec][1])):
if posy in self.bads:
vals.append('NaN')
badcols += 1
continue
vals.append(str(matrix[row-badrows][col-badcols]))
out.write(rownam.pop(0) + '\t' +'\t'.join(vals) + '\n')
out.close()
try:
# This eighs is very very fast, only ask for one eigvector
_, evect = eigsh(array(matrix), k=max_ev)
except (LinAlgError, ValueError):
warn('Chromosome %s too small to compute PC1' % (sec))
cmprts[sec] = [] # Y chromosome, or so...
count += 1
continue
index = ev_index[count] if ev_index else 1
n_first = [list(evect[:, -i]) for i in xrange(1, max_ev + 1)]
for ev_num in range(index, max_ev + 1):
first = list(evect[:, -ev_num])
breaks = [i for i, (a, b) in
enumerate(zip(first[1:], first[:-1]))
if a * b < 0] + [len(first) - 1]
breaks = [{'start': breaks[i-1] + 1 if i else 0, 'end': b}
for i, b in enumerate(breaks)]
if (self.resolution * (len(breaks) - 1.0) / len(matrix)
> max_mean_size):
warn('WARNING: number of compartments found with the '
'EigenVector number %d is too low (%d compartments '
'in %d rows), for chromosome %s' % (
ev_num, len(breaks), len(matrix), sec))
else:
break
if (self.resolution * (len(breaks) - 1.0) / len(matrix)
> max_mean_size):
warn('WARNING: keeping first eigenvector, for chromosome %s' % (
sec))
ev_num = 1
if ev_index:
ev_num = ev_index[count]
first = list(evect[:, -ev_num])
breaks = [i for i, (a, b) in
enumerate(zip(first[1:], first[:-1]))
if a * b < 0] + [len(first) - 1]
breaks = [{'start': breaks[i-1] + 1 if i else 0, 'end': b}
for i, b in enumerate(breaks)]
ev_nums[sec] = ev_num
beg, end = self.section_pos[sec]
bads = [k - beg for k in sorted(self.bads) if beg <= k <= end]
for evect in n_first:
_ = [evect.insert(b, float('nan')) for b in bads]
_ = [first.insert(b, 0) for b in bads]
_ = [matrix.insert(b, [float('nan')] * len(matrix[0]))
for b in bads]
_ = [matrix[i].insert(b, float('nan'))
for b in bads for i in xrange(len(first))]
breaks = [i for i, (a, b) in
enumerate(zip(first[1:], first[:-1]))
if a * b < 0] + [len(first) - 1]
breaks = [{'start': breaks[i-1] + 1 if i else 0, 'end': b}
for i, b in enumerate(breaks)]
cmprts[sec] = breaks
firsts[sec] = n_first
# needed for the plotting
self._apply_metric(cmprts, sec, rich_in_A, how=how)
if label_compartments == 'cluster':
if log:
logf = os.path.join(log, sec + suffix + '.log')
else:
logf = None
gammas = {}
for n_clust in range(2, 4):
for gamma in range(0, 101, 1):
scorett, tt, prop = _cluster_ab_compartments(
float(gamma)/100, matrix, breaks, cmprts[sec],
rich_in_A, ev_num=ev_num, log=logf, save=False,
verbose=kwargs.get('verbose', False),
n_clust=n_clust)
gammas[gamma] = scorett, tt, prop
gamma = min(gammas.keys(), key=lambda k: gammas[k][0])
if gammas[gamma][0] - gammas[gamma][1] > 7:
print (' WARNING: minimum showing very low '
'intermeagling of A/B compartments, trying '
'with 3 clusters, for chromosome %s', sec)
gammas = {}
continue
if kwargs.get('verbose', False):
print ' ====> minimum:', gamma
break
_ = _cluster_ab_compartments(float(gamma)/100, matrix, breaks,
cmprts[sec], rich_in_A, save=True,
log=logf, ev_num=ev_num, n_clust=n_clust)
if savefig or show:
vmin = kwargs.get('vmin', -1)
vmax = kwargs.get('vmax', 1)
if vmin == 'auto' == vmax:
vmax = max([abs(npperc(matrix, 99.5)),
abs(npperc(matrix, 0.5))])
vmin = -vmax
plot_compartments(
sec, first, cmprts, matrix, show,
savefig + '/chr' + str(sec) + suffix + '.png' if savefig else None,
vmin=vmin, vmax=vmax, whichpc=ev_num,showAB=show_compartment_labels)
if label_compartments == 'cluster' or label_compartments == 'hmm':
plot_compartments_summary(
sec, cmprts, show,
savefig + '/chr' + str(sec) + suffix + '_summ.png' if savefig else None)
count += 1
if label_compartments == 'hmm':
x = {}
for sec in self.section_pos:
beg, end = self.section_pos[sec]
bads = [k - beg for k in self.bads if beg <= k <= end]
try:
x[sec] = [j for i, j in enumerate(firsts[sec][ev_nums[sec] - 1])
if not i in bads]
except KeyError:
continue
# train two HMMs on the genomic data:
# - one with 2 states A B
# - one with 3 states A B I
# - one with 4 states A a B b
# - one with 5 states A a B b I
models = {}
for n in range(2, 6):
if kwargs.get('verbose', False):
print ('Training HMM for %d categories of '
'compartments' % n)
models[n] = _training(x, n, kwargs.get('verbose', False))
# apply HMM models on each chromosome
results = {}
for sec in self.section_pos:
if not sec in x:
continue
beg, end = self.section_pos[sec]
bads = [k - beg for k in self.bads if beg <= k <= end]
if kwargs.get('verbose', False):
print 'Chromosome', sec
# print 'CMPRTS before ', sec, cmprts[sec]
n_states, breaks = _hmm_refine_compartments(
x[sec], models, bads, kwargs.get('verbose', False))
results[sec] = n_states, breaks
cmprts[sec] = breaks
# print 'CMPRTS after hmm', sec, cmprts[sec]
self._apply_metric(cmprts, sec, rich_in_A, how=how)
if rich_in_A:
test = lambda x: x >= 1
else:
test = lambda x: x < 1
max_type = nanmax([c['type'] for c in cmprts[sec]])
# find which category of compartment has the highest "density"
atyp = 0.
alen = 0.
btyp = 0.
blen = 0.
max_type = nanmax([c['type'] for c in cmprts[sec]])
for typ in range(5):
subset = set([i for i, c in enumerate(cmprts[sec])
if c['type'] == typ])
dens = sum(cmprts[sec][c]['dens'] * (cmprts[sec][c]['end'] - cmprts[sec][c]['start']) for c in subset)
leng = sum((cmprts[sec][c]['end'] - cmprts[sec][c]['start'])**2 / 2. for c in subset)
# leng = sum(1 for c in subset)
val = float(dens) / leng if leng else 0.
#if typ == 0:
if typ < max_type / 2.:
alen += leng
atyp += val * leng
# elif typ == max_type:
elif typ > max_type / 2.:
blen += leng
btyp += val * leng
for i, comp in enumerate(cmprts[sec]):
if comp['type'] < max_type / 2.:
# if mean density of compartments of type 0 is higher than 1
# than label them as 'B', otherwise, as 'A'
if comp['type'] == 0:
comp['type'] = 'A' if test(val) else 'B'
else:
comp['type'] = 'a' if test(val) else 'b'
elif comp['type'] > max_type / 2.:
if comp['type'] == max_type:
comp['type'] = 'B' if test(val) else 'A'
else:
comp['type'] = 'b' if test(val) else 'a'
elif isnan(comp['type']):
comp['type'] = 'NA'
else:
comp['type'] = 'I'
self.compartments = cmprts
if savedata:
self.write_compartments(savedata, chroms=self.compartments.keys(),
ev_nums=ev_nums)
return firsts
def _apply_metric(self, cmprts, sec, rich_in_A, how='ratio'):
"""
calculate compartment internal density if no rich_in_A, otherwise
sum this list
"""
# print 'SEGMENTS'
# print sec, self.section_pos[sec]
# for i in range(0, len(cmprts[sec]), 20):
# print ' ' + ''.join(['%5d/%-5d'% (s['start'], s['end']) for s in cmprts[sec][i:i+20]])
# print 'CHROMOSOME', sec
for cmprt in cmprts[sec]:
if rich_in_A:
beg1, end1 = cmprt['start'], cmprt['end'] + 1
sec_matrix = [rich_in_A.get(sec, {None: 0}).get(i, 0)
for i in xrange(beg1, end1)
if not i in self.bads]
try:
cmprt['dens'] = float(sum(sec_matrix)) / len(sec_matrix)
except ZeroDivisionError:
cmprt['dens'] = 0.
else:
beg, end = self.section_pos[sec]
beg1, end1 = cmprt['start'] + beg, cmprt['end'] + beg + 1
# print 'BEG:%7d, END:%7d, LEN bias:%7d, LEN self:%7d, LEN expected:%7d' % (beg1, end1, len(self.bias),
# len(self), len(self.expected))
if 'diagonal' in how:
sec_matrix = [(self[i,i] / self.expected[0] / self.bias[i]**2)
for i in xrange(beg1, end1) if not i in self.bads]
else: #if 'compartment' in how:
sec_matrix = [(self[i,j] / self.expected[abs(j-i)]
/ self.bias[i] / self.bias[j])
for i in xrange(beg1, end1) if not i in self.bads
for j in xrange(beg1, end1) if not j in self.bads]
if '/compartment' in how: # diagonal / compartment
sec_column = [(self[i,j] / self.expected[abs(j-i)]
/ self.bias[i] / self.bias[j])
for i in xrange(beg1, end1) if not i in self.bads
for j in xrange(beg1, end1) if not j in self.bads]
elif '/column' in how:
sec_column = [(self[i,j] / self.expected[abs(j-i)]
/ self.bias[i] / self.bias[j])
for i in xrange(beg1, end1) if not i in self.bads
for j in range(beg, end)
if not j in self.bads]
else:
sec_column = [1.]
try:
if 'type' in cmprt and isnan(cmprt['type']):
cmprt['dens'] = 1.
else:
cmprt['dens'] = float(sum(sec_matrix)) / sum(sec_column)
except ZeroDivisionError:
cmprt['dens'] = 1.
# normalize to 1.0
try:
if 'type' in cmprt: # hmm already run and have the types definded
meanh = (sum(cmprt['dens'] for cmprt in cmprts[sec]
if not isnan(cmprt['type'])) /
sum(1 for cmprt in cmprts[sec]
if not isnan(cmprt['type'])))
else:
meanh = (sum(cmprt['dens'] for cmprt in cmprts[sec]) /
sum(1 for cmprt in cmprts[sec]))
except ZeroDivisionError:
meanh = 1.
for cmprt in cmprts[sec]:
try:
if 'type' in cmprt and isnan(cmprt['type']):
cmprt['dens'] = 1.0
else:
cmprt['dens'] /= meanh
except ZeroDivisionError:
cmprt['dens'] = 1.
def write_compartments(self, savedata, chroms=None, ev_nums=None):
"""
Write compartments to a file.
:param savedata: path to a file.
:param None chroms: write only the given list of chromosomes (default
all chromosomes are written, note that the first column corresponding
to chromosome name will disappear in non default case)
"""
out = open(savedata, 'w')
sections = chroms if chroms else self.compartments.keys()
if ev_nums:
for sec in sections:
try:
out.write('## CHR %s\tEigenvector: %d\n' % (sec, ev_nums[sec]))
except KeyError:
continue
out.write('#%sstart\tend\trich in A\ttype\n'% (
'CHR\t' if len(sections) > 1 else '\t'))
for sec in sections:
for c in self.compartments[sec]:
out.write('%s%d\t%d\t%.2f\t%s\n' % (
(str(sec) + '\t') if sections else '\t',
c['start'] + 1, c['end'] + 1,
c.get('dens', float('nan')), c.get('type', '')))
out.close()
def yield_matrix(self, focus=None, diagonal=True, normalized=False):
"""
Yields a matrix line by line.
Bad row/columns are returned as null row/columns.
:param None focus: a tuple with the (start, end) position of the desired
window of data (start, starting at 1, and both start and end are
inclusive). Alternatively a chromosome name can be input or a tuple
of chromosome name, in order to retrieve a specific inter-chromosomal
region
:param True diagonal: if False, diagonal is replaced by zeroes
:param False normalized: get normalized data
:yields: matrix line by line (a line being a list of values)
"""
siz = len(self)
if normalized and not self.bias:
raise Exception('ERROR: experiment not normalized yet')
if focus:
if isinstance(focus, tuple) and isinstance(focus[0], int):
if len(focus) == 2:
start1, end1 = focus
start2, end2 = focus
start1 -= 1
start2 -= 1
else:
start1, end1, start2, end2 = focus
start1 -= 1
start2 -= 1
elif isinstance(focus, tuple) and isinstance(focus[0], str):
start1, end1 = self.section_pos[focus[0]]
start2, end2 = self.section_pos[focus[1]]
else:
start1, end1 = self.section_pos[focus]
start2, end2 = self.section_pos[focus]
else:
start1 = start2 = 0
end1 = end2 = siz
if normalized:
for i in xrange(start2, end2):
# if bad column:
if i in self.bads:
yield [0.0 for j in xrange(start1, end1)]
# if we want the diagonal, or we don't but are looking at a
# region that is not symmetric
elif diagonal or start1 != start2:
yield [self[i, j] / self.bias[i] / self.bias[j]
for j in xrange(start1, end1)]
# diagonal replaced by zeroes
else:
yield ([self[i, j] / self.bias[i] / self.bias[j]
for j in xrange(start1, i)] +
[0.0] +
[self[i, j] / self.bias[i] / self.bias[j]
for j in xrange(i + 1, end1)])
else:
for i in xrange(start2, end2):
# if bad column:
if i in self.bads:
yield [0 for j in xrange(start1, end1)]
# if we want the diagonal, or we don't but are looking at a
# region that is not symmetric
elif diagonal or start1 != start2:
yield [self[i, j] for j in xrange(start1, end1)]
# diagonal replaced by zeroes
else:
yield ([self[i, j] for j in xrange(start1, i)] +
[0] +
[self[i, j] for j in xrange(i + 1, end1)])
def _hmm_refine_compartments(xsec, models, bads, verbose):
prevll = float('-inf')
prevdf = 0
results = {}
for n in range(2, 6):
E, pi, T = models[n]
probs = gaussian_prob(xsec, E)
pathm, llm = best_path(probs, pi, T)
pathm = asarray(map(float, pathm))
df = n**2 - n + n * 2 + n - 1
len_seq = len(pathm)
lrt = gammaincc((df - prevdf) / 2., (llm - prevll) / 2.)
bic = -2 * llm + df * nplog(len_seq)
aic = 2 * df - 2 * llm
if verbose:
print 'Ll for %d states (%d df): %4.0f AIC: %4.0f BIC: %4.0f LRT=%f'% (
n, df, llm, aic, bic, lrt)
prevdf = df
prevll = llm
results[n] = {'AIC': aic,
'BIC': bic,
'LRT': lrt,
'PATH': pathm}
n_states = min(results, key=lambda x: results[x]['BIC'])
results = list(results[n_states]['PATH'])
# print 'RESULTS', results
_ = [results.insert(b, float('nan')) for b in sorted(bads)]
# print 'RESULTS', results
breaks = [(i, b) for i, (a, b) in
enumerate(zip(results[1:], results[:-1]))
if str(a) != str(b)] + [len(results) - 1]
# print 'BREAKS', breaks
breaks[-1] = (breaks[-1], results[-1])
# print 'BREAKS', breaks
breaks = [{'start': breaks[i-1][0] + 1 if i else 0, 'end': b,
'type': a}
for i, (b, a) in enumerate(breaks)]
# print 'BREAKS', breaks
return n_states, breaks
def _training(x, n, verbose):
"""
define default emission transition and initial states, and train the hmm
"""
pi = [0.5 - ((n - 2) * 0.05)**2 if i == 0 or i == n - 1 else ((n - 2)*0.05)**2*2 / (n - 2) for i in range(n)]
T = [[0.9 if i==j else 0.1/(n-1) for i in xrange(n)] for j in xrange(n)]
E = asarray(zip(linspace(-1, 1, n), [1./n for _ in range(n)]))
# normalize values of the first eigenvector
for c in x:
this_mean = mean(x[c])
this_std = std (x[c])
x[c] = [v - this_mean for v in x[c]]
x[c] = [v / this_std for v in x[c]]
train(pi, T, E, x.values(), verbose=verbose, threshold=1e-6, n_iter=1000)
return E, pi, T
def _cluster_ab_compartments(gamma, matrix, breaks, cmprtsec, rich_in_A, save=True,
ev_num=1, log=None, verbose=False, savefig=None,
n_clust=2):
# function to convert correlation into distances
gamma += 1
func = lambda x: -abs(x)**gamma / x
funczero = lambda x: 0.0
# calculate distance_matrix
dist_matrix = [[0 for _ in xrange(len(breaks))]
for _ in xrange(len(breaks))]
scores = {}
for k, cmprt in enumerate(cmprtsec):
beg1, end1 = cmprt['start'], cmprt['end'] + 1
diff1 = end1 - beg1
scores[(k,k)] = dist_matrix[k][k] = -1
for l in xrange(k + 1, len(cmprtsec)):
beg2, end2 = cmprtsec[l]['start'], cmprtsec[l]['end'] + 1
val = nansum([matrix[i][j] for i in xrange(beg1, end1)
for j in xrange(beg2, end2)]) / (end2 - beg2) / diff1
try:
scores[(k,l)] = dist_matrix[k][l] = scores[(l,k)] = dist_matrix[l][k] = func(val)
except ZeroDivisionError:
scores[(k,l)] = dist_matrix[k][l] = scores[(l,k)] = dist_matrix[l][k] = funczero(val)
if isnan(scores[(k,l)]):
scores[(k,l)] = dist_matrix[k][l] = scores[(l,k)] = dist_matrix[l][k] = funczero(0)
# cluster compartments according to their correlation score
try:
clust = linkage(dist_matrix, method='ward')
except UnboundLocalError:
warn('WARNING: Chromosome probably too small. Skipping')
return (float('inf'), float('inf'), float('inf'))
# find best place to divide dendrogram (only check 1, 2, 3 or 4 clusters)
solutions = {}
for k in clust[:,2][-3:]:
clusters = {}
_ = [clusters.setdefault(j, []).append(i) for i, j in
enumerate(fcluster(clust, k, criterion='distance'))]
solutions[k] = {'out': clusters}
solutions[k]['score'] = calinski_harabasz(scores, clusters)
# plot
if savefig:
xedges = [b['start'] for b in breaks]
yedges = [b['start'] for b in breaks]
xedges += [breaks[-1]['end']]
yedges += [breaks[-1]['end']]
X, Y = meshgrid(xedges, yedges)
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10,10))
_ = fig.add_axes([0.09,0.1,0.2,0.6])
Z1 = dendrogram(clust, orientation='left')
idx1 = Z1['leaves']
idx2 = Z1['leaves']
D = asarray(dist_matrix)[idx1,:]
D = D[:,idx2]
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
m = axmatrix.pcolormesh(X, Y, D)
axmatrix.set_aspect('equal')
axmatrix.set_yticks([])
axmatrix.set_xlim((0, breaks[-1]['end']))
axmatrix.set_ylim((0, breaks[-1]['end']))
plt.colorbar(m)
plt.savefig(savefig)
try:
# take best cluster according to calinski_harabasz score
clusters = [solutions[s] for s in sorted(
solutions, key=lambda x: solutions[x]['score'])
if solutions[s]['score']>0][1 - n_clust]['out']
except IndexError:
# warn('WARNING1: compartment clustering is not clear. Skipping')
return (float('inf'), float('inf'), float('inf'))
if len(clusters) != n_clust:
# warn('WARNING2: compartment clustering is too clear. Skipping')
return (float('inf'), float('inf'), float('inf'))
# labeling compartments. A compartments shall have lower
# mean intra-interactions
dens = {}
if rich_in_A:
test = lambda x: x >= 1
else:
test = lambda x: x < 1
for k in clusters:
val = sum([cmprtsec[c]['dens']
for c in clusters[k]]) / len(clusters[k])
dens['A' if test(val) else 'B'] = [
cmprtsec[c]['dens'] for c in clusters[k]
if cmprtsec[c]['end'] + 1 - cmprtsec[c]['start'] > 2]
for c in clusters[k]:
cmprtsec[c]['type'] = 'A' if test(val) else 'B'
try:
tt, pval = ttest_ind(dens['A'], dens['B'])
except ZeroDivisionError:
return (float('inf'), float('inf'), float('inf'))
prop = float(len(dens['A'])) / (len(dens['A']) + len(dens['B']))
# to avoid having all A or all B
# score = 5000 * (prop - 0.5)**4 - 2
# to avoid having consecutive As or Bs
score = 0.
prev = None
for cmprt in cmprtsec:
if cmprt.get('type', None) == prev:
score += 1.
prev = cmprt.get('type', prev)
score /= len(cmprtsec)
score = exp(10 * (score - 0.4)) # 5000 * (score - 0.5)**4 - 2
# score = score1 + score2
if verbose:
print ('[EV%d CL%s] g:%5s prop:%5s%% tt:%7s '
'score-interleave:%5s ' # score-proportion:%7s
'final: %7s pv:%7s' % (
ev_num, n_clust, gamma - 1, round(prop * 100, 1),
round(tt, 3), round(score, 3), #round(score2, 3),
round(score + tt, 3), round(pval, 5)))
if log:
log = open(log, 'a')
log.write('[EV%d CL%s] g:%5s prop:%5s%% tt:%6s '
'score-interleave:%6s ' # score-proportion:%7s
'final: %7s pv:%s\n' % (
ev_num, n_clust, gamma - 1, round(prop * 100, 1),
round(tt, 3), round(score, 3), # round(score2, 3),
round(score + tt, 3), round(pval, 4)))
log.close()
if not save:
for cmprt in cmprtsec:
if 'type' in cmprt:
cmprt['type'] = None
return score + tt, tt, prop
| gpl-3.0 | 7,835,012,755,277,714,000 | 44.85518 | 134 | 0.488934 | false |
bytescout/ByteScout-SDK-SourceCode | PDF.co Web API/PDF Fill PDF Forms/Python/Fill PDF Forms Simplified/FillPDFForms.py | 1 | 2904 | import os
import requests # pip install requests
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co/documentation/api
API_KEY = "**********************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Direct URL of source PDF file.
SourceFileUrl = "https://bytescout-com.s3-us-west-2.amazonaws.com/files/demo-files/cloud-api/pdf-form/f1040.pdf"
# PDF document password. Leave empty for unprotected documents.
Password = ""
# Destination PDF file name
DestinationFile = ".\\result.pdf"
# Runs processing asynchronously. Returns Use JobId that you may use with /job/check to check state of the processing (possible states: working, failed, aborted and success). Must be one of: true, false.
Async = "False"
# Values to fill out pdf fields with built-in pdf form filler.
# To fill fields in PDF form, use the following format page;fieldName;value for example: 0;editbox1;text is here. To fill checkbox, use true, for example: 0;checkbox1;true. To separate multiple objects, use | separator. To get the list of all fillable fields in PDF form please use /pdf/info/fields endpoint.
FieldsStrings = "1;topmostSubform[0].Page1[0].f1_02[0];John A. Doe|1;topmostSubform[0].Page1[0].FilingStatus[0].c1_01[1];true|1;topmostSubform[0].Page1[0].YourSocial_ReadOrderControl[0].f1_04[0];123456789"
def main(args = None):
fillPDFForm(SourceFileUrl, DestinationFile)
def fillPDFForm(uploadedFileUrl, destinationFile):
"""Converts HTML to PDF using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["name"] = os.path.basename(destinationFile)
parameters["url"] = uploadedFileUrl
parameters["fieldsString"] = FieldsStrings
parameters["async"] = Async
# Prepare URL for 'Fill PDF' API request
url = "{}/pdf/edit/add".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Get URL of result file
resultFileUrl = json["url"]
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
if __name__ == '__main__':
main() | apache-2.0 | -5,449,671,780,464,104,000 | 40.5 | 308 | 0.657025 | false |
ActionAdam/osmc | package/mediacenter-addon-osmc/src/script.module.osmcsetting.updates/resources/lib/apt_cache_action.py | 1 | 11742 | ''' This script is run as root by the osmc update module. '''
import apt
import socket
import sys
from datetime import datetime
import json
import os
import time
import subprocess
import traceback
from CompLogger import comprehensive_logger as clog
t = datetime
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
try:
sys.stdout = Logger("/var/tmp/OSMC_python_apt_log.txt")
except:
pass
# @clog(maxlength=1500)
def call_parent(raw_message, data={}):
address = '/var/tmp/osmc.settings.update.sockfile'
print '%s %s sending response' % (t.now(), 'apt_cache_action.py')
message = (raw_message, data)
message = json.dumps(message)
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(address)
sock.sendall(message)
sock.close()
except Exception as e:
return '%s %s failed to connect to parent - %s' % (t.now(), 'apt_cache_action.py', e)
return 'response sent'
class Main(object):
def __init__(self, action):
# with apt.apt_pkg.SystemLock():
# implements a lock on the package system, so that nothing else can alter packages
print '==================================================================='
print '%s %s running' % (t.now(), 'apt_cache_action.py')
self.error_package = ''
self.error_message = ''
self.heading = 'Updater'
self.action = action
self.cache = apt.Cache()
self.block_update_file = '/var/tmp/.suppress_osmc_update_checks'
self.action_to_method = {
'update' : self.update,
'update_manual' : self.update,
'commit' : self.commit,
'fetch' : self.fetch,
'action_list' : self.action_list,
}
try:
self.act()
call_parent('progress_bar', {'kill': True})
except Exception as e:
print '%s %s exception occurred' % (t.now(), 'apt_cache_action.py')
print '%s %s exception value : %s' % (t.now(), 'apt_cache_action.py', e)
deets = 'Error Type and Args: %s : %s \n\n %s' % (type(e).__name__, e.args, traceback.format_exc())
# send the error to the parent (parent will kill the progress bar)
call_parent('apt_error', {'error': self.error_message, 'package': self.error_package, 'exception': deets})
self.respond()
print '%s %s exiting' % (t.now(), 'apt_cache_action.py')
print '==================================================================='
def respond(self):
call_parent('apt_cache %s complete' % self.action)
def act(self):
action = self.action_to_method.get(self.action, False)
if action:
action()
else:
print 'Action not in action_to_method dict'
#@clog()
def action_list(self):
''' This method processes a list sent in argv[2], and either installs or remove packages.
The list is sent as a string:
install_packageid1|=|install_packageid2|=|removal_packageid3'''
self.heading = 'App Store'
action_string = sys.argv[2]
action_dict = self.parse_argv2(action_string)
self.update()
self.cache.open()
for pkg in self.cache:
# mark packages as install or remove
if pkg.shortname in action_dict['install']:
pkg.mark_install()
if pkg.shortname in action_dict['removal']:
pkg.mark_delete(purge=True)
# commit
self.commit_action()
if action_dict['removal']:
# if there were removals then remove the packages that arent needed any more
self.update()
self.cache.open()
removals = False
for pkg in self.cache:
if pkg.is_auto_removable:
pkg.mark_delete(purge=True)
removals = True
if removals:
# commit
self.commit_action()
# #@clog()
def parse_argv2(self, action_string):
install = []
removal = []
actions = action_string.split('|=|')
for action in actions:
if action.startswith('install_'):
install.append(action[len('install_'):])
elif action.startswith('removal_'):
removal.append(action[len('removal_'):])
return {'install': install, 'removal': removal}
#@clog()
def update(self):
dprg = Download_Progress(partial_heading='Updating')
self.cache.update(fetch_progress=dprg, pulse_interval=1000)
# call the parent and kill the pDialog, now handled in on exit
call_parent('progress_bar', {'percent': 100, 'heading': self.heading, 'message':'Cache Update Complete'})
return '%s %s cache updated' % (t.now(), 'apt_cache_action.py')
#@clog()
def commit(self):
# check whether any packages are broken, if they are then the install needs to take place outside of Kodi
for pkg in self.cache:
if pkg.is_inst_broken or pkg.is_now_broken:
return "%s is BROKEN, cannot proceed with commit" % pkg.shortname
print '%s %s upgrading all packages' % (t.now(), 'apt_cache_action.py')
self.cache.upgrade(True)
print '%s %s committing cache' % (t.now(), 'apt_cache_action.py')
self.commit_action()
#@clog()
def commit_action(self):
dprg = Download_Progress()
iprg = Install_Progress(self)
self.cache.commit(fetch_progress=dprg, install_progress=iprg)
# call the parent and kill the pDialog, now handled in on exit
call_parent('progress_bar', {'percent': 100, 'heading': self.heading, 'message':'Commit Complete'})
# remove the file that blocks further update checks
try:
os.remove(self.block_update_file)
except:
return 'Failed to remove block_update_file'
return '%s %s cache committed' % (t.now(), 'apt_cache_action.py')
#@clog()
def fetch(self):
self.cache.upgrade(True)
print '%s %s fetching all packages' % (t.now(), 'apt_cache_action.py')
dprg = Download_Progress()
self.cache.fetch_archives(progress=dprg)
# call the parent and the progress bar is killed on error or once all complete
call_parent('progress_bar', {'percent': 100, 'heading': self.heading, 'message':'Downloads Complete'})
return '%s %s all packages fetched' % (t.now(), 'apt_cache_action.py')
class Operation_Progress(apt.progress.base.OpProgress):
def __init__(self):
super(Operation_Progress, self).__init__()
def update(self):
call_parent('progress_bar', {'percent': self.percent, 'heading': self.op, 'message':self.sub_op,})
def done(self):
pass
# call_parent('progress_bar', {'percent': 100, 'heading': self.heading, 'message':'Operations Complete'})
class Install_Progress(apt.progress.base.InstallProgress):
def __init__(self, parent):
self.parent = parent
super(Install_Progress, self).__init__()
call_parent('progress_bar', {'percent': 0, 'heading': self.parent.heading, 'message':'Starting Installation'})
#@clog()
def error(self, pkg, errormsg):
print 'ERROR!!! \n%s\n' % errormsg
try:
pkgname = os.path.basename(pkg).split('_')
print 'Package affected!!! \n%s\n' % pkgname
self.parent.error_package = pkgname[0]
if len(pkgname) > 1:
self.parent.error_package += ' (' + pkgname[1] + ')'
except:
self.parent.error_package = '(unknown package)'
self.parent.error_message = errormsg
''' (Abstract) Called when a error is detected during the install. '''
# The following method should be overridden to implement progress reporting for dpkg-based runs
# i.e. calls to run() with a filename:
# def processing(self, pkg, stage):
# ''' This method is called just before a processing stage starts. The parameter pkg is the name of the
# package and the parameter stage is one of the stages listed in the dpkg manual under the
# status-fd option, i.e. "upgrade", "install" (both sent before unpacking), "configure", "trigproc",
# "remove", "purge". '''
# def dpkg_status_change(self, pkg, status):
# ''' This method is called whenever the dpkg status of the package changes. The parameter pkg is the
# name of the package and the parameter status is one of the status strings used in the status file
# (/var/lib/dpkg/status) and documented in dpkg(1). '''
# The following methods should be overridden to implement progress reporting for run() calls
# with an apt_pkg.PackageManager object as their parameter:
#@clog()
def status_change(self, pkg, percent, status):
''' This method implements progress reporting for package installation by APT and may be extended to
dpkg at a later time. This method takes two parameters: The parameter percent is a float value
describing the overall progress and the parameter status is a string describing the current status
in an human-readable manner. '''
diff = t.now() - self.pulse_time
if (diff.total_seconds() * 10) < 12:
return True
self.pulse_time = t.now()
call_parent('progress_bar', {'percent': int(percent), 'heading': self.parent.heading, 'message': status})
#@clog()
def start_update(self):
''' This method is called before the installation of any package starts. '''
self.pulse_time = t.now()
return 'Start !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#@clog()
def finish_update(self):
''' This method is called when all changes have been applied. '''
return 'Stop !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
class Download_Progress(apt.progress.base.AcquireProgress):
def __init__(self, partial_heading='Downloading'):
super(Download_Progress, self).__init__()
self.partial_heading = partial_heading
call_parent('progress_bar', {'percent': 0, 'heading': 'Downloading Update', 'message':'Starting Download',})
#@clog()
def start(self):
''' Invoked when the Acquire process starts running. '''
self.pulse_time = t.now()
return 'Start !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#@clog()
def stop(self):
''' Invoked when the Acquire process stops running. '''
return 'Stop !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
#@clog()
def fetch(self, item):
''' Invoked when an item is being fetched. '''
dsc = item.description.split('/')
self.fetching = self.partial_heading + ': ' + dsc[-1]
# call_parent('progress_bar',{'message': 'Downloading: ' + dsc[-1]})
return 'Fetch' + item.description + '++++++++++++++++++++++++++++++'
#@clog()
def pulse(self, owner):
''' Periodically invoked as something is being downloaded. '''
# if the pulse is less than one second since the last one then ignore the pulse
# this needs to be done as the parents _daemon only checks the queue once a second
diff = t.now() - self.pulse_time
if (diff.total_seconds() * 10) < 11:
return True
else:
self.pulse_time = t.now()
print 'Pulse ==========================================='
print 'current_items', self.current_items
print 'total_items', self.total_items
print 'total_bytes', self.total_bytes
print 'fetched_bytes', self.fetched_bytes
print 'current_bytes', self.current_bytes
print 'current_cps', self.current_cps
print 'Pulse ==========================================='
pct = int(self.current_bytes / float(self.total_bytes) * 100)
cps = self.current_cps / 1024.0
if cps > 1024:
cps = '{0:.2f} MBps'.format(cps / 1024)
else:
cps = '{0:.0f} kBps'.format(cps)
cmb = self.current_bytes / 1048576.0
tmb = self.total_bytes / 1048576.0
msg = self.fetching
hdg = '{0:d} / {1:d} items -- {2:} -- {3:.1f} / {4:.1f}MB'.format(self.current_items, self.total_items, cps, cmb, tmb)
call_parent('progress_bar', {'percent': pct, 'heading': hdg, 'message': msg})
return True
#@clog()
def done(self, item):
''' Invoked when an item has finished downloading. '''
return 'Done ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'
if __name__ == "__main__":
if len(sys.argv) > 1:
action = sys.argv[1]
m = Main(action)
del m
| gpl-2.0 | -4,620,404,637,077,849,000 | 23.160494 | 125 | 0.634219 | false |
yueyoum/gstatus | gstatus.py | 1 | 4136 | # -*- coding: utf-8 -*-
import re
import sys
import os
import json
import subprocess
from xml.sax.saxutils import unescape
try:
import cStringIO as StringIO
except ImportError:
import StringIO
from bottle import Bottle, run, static_file
from jinja2 import Environment, FileSystemLoader
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
TEMPLATES_PATH = os.path.join(CURRENT_PATH, 'templates')
STATIC_PATH = os.path.join(CURRENT_PATH, 'static')
env = Environment(loader=FileSystemLoader(TEMPLATES_PATH))
lexer = get_lexer_by_name('diff')
def highlight_diff(text):
return highlight(unescape(text.decode('utf-8')), lexer, HtmlFormatter())
COMMIT_PATTERN = re.compile('commit\s*(\w+)\nAuthor:\s*(.+)\nDate:\s*(.+)\n\n\s*(.+)')
step = 10
class Config(object):
pass
config = Config()
class GitError(Exception):
pass
def run_subprocess(command):
os.chdir(config.git_repo_dir)
p = subprocess.PIPE
x = subprocess.Popen(command, stdout=p, stderr=p)
o, e = x.communicate()
if x.wait() != 0:
raise GitError(e)
return o, e
def get_git_commits(start):
"""return format:
[ (a, b, c, d), (a, b, c, d)... ]
a - commit hash
b - author
c - date
d - commit log
"""
command = ['git', 'log', '--date=iso']
head_range = 'HEAD~{0}...HEAD~{1}'.format(start, start+step+1)
command.append(head_range)
out, err = run_subprocess(command)
return COMMIT_PATTERN.findall(out)
def diff_commit(commitid_old, commitid_new):
command = ['git', 'diff', '--name-only', commitid_old, commitid_new]
out, err = run_subprocess(command)
def diff_one_file(filename):
command = ['git', 'diff', commitid_old, commitid_new, '--', filename]
out, err = run_subprocess(command)
c = StringIO.StringIO(out)
for i in range(4):
c.readline()
return {
'filename': filename,
'content': highlight_diff(''.join(c.readlines()))
}
return [diff_one_file(_f) for _f in out.split('\n') if _f]
def git_error_handler(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except GitError as e:
return {'error_msg': e}
return wrapper
def jinja_view(tpl, **kwargs):
_kwargs = kwargs
def deco(func):
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
res.update(_kwargs)
template = env.get_template(tpl)
return template.render(**res)
return wrapper
return deco
app = Bottle()
@app.get('/static/<filename:path>')
def static_files(filename):
return static_file(filename, root=STATIC_PATH)
@app.get('/')
@app.get('/page/<page:int>')
@jinja_view('index.html')
@git_error_handler
def index(page=0):
def _uniform(commit):
return {
'id': commit[0],
'author': commit[1].decode('utf-8'),
'date': commit[2].split('+')[0],
'log': commit[3].decode('utf-8'),
}
commits = map(_uniform, get_git_commits(page*step))
for index in range(len(commits)):
try:
commits[index]['old_id'] = commits[index+1]['id']
except IndexError:
pass
# drop the step+1 commit, get this commit just for get it's commit id
commits.pop(-1)
return {'commits': commits, 'page': page}
@app.get('/commit/<commitid_old>/<commitid_new>')
@jinja_view('diffs.html')
@git_error_handler
def index(commitid_old, commitid_new):
diffs = diff_commit(commitid_old, commitid_new)
return {'diffs': diffs}
if __name__ == '__main__':
def usage():
print "Invalid arguments! gstatus.py [GIT REPO DIR] [PORT]"
sys.exit(1)
if len(sys.argv) != 3:
usage()
_, git_repo_dir, port = sys.argv
if not os.path.isdir(git_repo_dir):
usage()
port = int(port)
config.git_repo_dir = git_repo_dir
run(app, host='0.0.0.0', port=port)
| mit | 8,413,325,068,222,933,000 | 22.907514 | 86 | 0.601547 | false |
enthought/pikos | pikos/monitors/api.py | 1 | 1436 | # -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Package: Pikos toolkit
# File: monitors/api.py
# License: LICENSE.TXT
#
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#----------------------------------------------------------------------------
__all__ = [
'FunctionMonitor',
'FunctionMemoryMonitor',
'LineMemoryMonitor',
'LineMonitor',
'FocusedFunctionMemoryMonitor',
'FocusedLineMonitor',
'FocusedLineMemoryMonitor',
'FocusedFunctionMonitor',
'MonitorAttach',
'Monitor'
]
from pikos.monitors.function_monitor import FunctionMonitor
from pikos.monitors.line_monitor import LineMonitor
from pikos.monitors.focused_function_monitor import FocusedFunctionMonitor
from pikos.monitors.focused_line_monitor import FocusedLineMonitor
from pikos._internal.monitor_attach import MonitorAttach
from pikos.monitors.monitor import Monitor
try:
import psutil
except ImportError:
import warnings
warnings.warn('Could not import psutil. Memory monitors are not available')
else:
from pikos.monitors.function_memory_monitor import FunctionMemoryMonitor
from pikos.monitors.line_memory_monitor import LineMemoryMonitor
from pikos.monitors.focused_function_memory_monitor import \
FocusedFunctionMemoryMonitor
from pikos.monitors.focused_line_memory_monitor import \
FocusedLineMemoryMonitor
| bsd-3-clause | 4,774,742,186,219,896,000 | 34.02439 | 79 | 0.681058 | false |
rampasek/seizure-prediction | sklearnClassifiers/simpleClassifier4.py | 1 | 15220 | import time
import argparse
import numpy as np
import math
import matplotlib.pyplot as plt
import sklearn
import sklearn.preprocessing
import functools
import cPickle
import random
from sklearn import cross_validation
from sklearn import datasets
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
import sklearn.metrics
import sklearn.linear_model
import sklearn.ensemble
import sklearn.feature_selection
from scipy.special import expit
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
from sklearn.lda import LDA
import h5py
# def readDataSet(fnames):
# """
# Read data set from cPickle files
# Returns numpy array "data" and "target"
# """
# data = []
# target = np.array([])
# test = []
# test_names = []
# for f in fnames:
# with open(f, 'rb') as fp:
# features = cPickle.load(fp)
# label = cPickle.load(fp)
# if label == -1:
# test.append(features)
# test_names.append(f[f.find("/")+1:f.find(".pkl")]+".mat")
# else:
# data.append(features)
# target = np.append(target, label)
# data = np.array(data)
# test = np.array(test)
# #print data.shape, target.shape, test.shape
# return data, target, test, test_names
def readDataSet(fnames):
"""
Read data set from HDF5 files
Returns numpy array "data" and "target"
"""
data = np.array([])
target = np.array([])
test = np.array([])
test_names = []
for fn in fnames:
with h5py.File(fn, 'r') as f:
features = np.array(f['data'], dtype=float)
label = np.array(f['label'], dtype=float)
if label == -1:
if len(test) == 0:
test = [features]
else:
test = np.append(test, [features], 0)
a = fn.find("/")
test_names.append(fn[a+1:fn.find(".", a)]+".mat")
else:
if len(data) == 0:
data = [features]
else:
data = np.append(data, [features], 0)
target = np.append(target, label)
#test = np.array(test, dtype=float)
#print data.shape, target.shape, test.shape
return data, target, test, test_names
def Run3dPCA(X, labels):
pca = PCA(n_components=3)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first three components): %s'%str(pca.explained_variance_ratio_))
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, projection='3d')
plt.rcParams['legend.fontsize'] = 10
for cls in [0, 1, 2]:
mask = (labels == cls)
ax.plot(X_r[mask, 0], X_r[mask, 1], X_r[mask, 2], 'o', c='rbg'[cls], markersize=8, marker='os*'[cls], alpha=0.5, label='class'+str(cls))
# annotations = []
# for label, x, y, z in zip(labels, X_r[:, 0], X_r[:, 1], X_r[:, 2]):
# x2, y2, _ = proj3d.proj_transform(x, y, z, ax.get_proj())
# annot = plt.annotate(
# label,
# xy = (x2, y2), xytext = (-20, 20),
# textcoords = 'offset points', ha = 'right', va = 'bottom',
# bbox = dict(boxstyle = 'round,pad=0.2', fc = 'yellow', alpha = 0.5),
# arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
# annotations.append(annot)
# def update_position(e):
# for i in range(len(annotations)):
# x2, y2, _ = proj3d.proj_transform(X_r[i,0],X_r[i,1],X_r[i,2], ax.get_proj())
# annotations[i].xy = x2, y2
# annotations[i].update_positions(fig.canvas.renderer)
# fig.canvas.draw()
# fig.canvas.mpl_connect('button_release_event', update_position)
plt.legend()
plt.title('PCA')
plt.show()
def RunPCA(X, labels=None, classVector=None, trainSize=-1):
if trainSize < 0: trainSize = len(X)
if classVector == None: classVector = [0]*len(X)
print X.shape, classVector.shape
pca = PCA(n_components=2)
X_r = pca.fit(X[:trainSize]).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'%str(pca.explained_variance_ratio_))
# for c, i, target_name in zip("rb", [0, 1], ['class1', 'class2']):
# plt.scatter(X_r[:, 0], X_r[:, 1], c=c, label=target_name)
plt.figure(figsize=(10,10))
pltpoints = [0] * len(classVector)
for cls, x, y in zip(classVector, X_r[:, 0], X_r[:, 1]):
pltpoints[cls] = plt.scatter(x, y, c='rbg'[cls], s=65, marker='os*'[cls], alpha=0.5)
if labels:
for label, x, y in zip(labels, X_r[:, 0], X_r[:, 1]):
plt.annotate(
label,
xy = (x, y), xytext = (-20, 20),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.2', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
plt.title('PCA of Dog_1-4 train set + transformed test set')
plt.legend(pltpoints, ('Interictal segment (negative class)', 'Preictal segment (positive class)', 'Test segment'), scatterpoints=1, fontsize=14, fancybox=True)
plt.grid()
#plt.show()
plt.savefig('PCA_Dog_1-4_train2.png')
def RunLDA(X, classVector, Xt, testVector):
print X.shape, classVector.shape
lda = LDA(n_components=3)
lda.fit(X, classVector)
X_r = lda.transform(np.append(Xt, X, 0))
classVector = np.append(testVector, classVector)
print X_r.shape, X.shape
# Percentage of variance explained for each components
#print('explained variance ratio (first two components): %s'%str(lda.explained_variance_ratio_))
# for c, i, target_name in zip("rb", [0, 1], ['class1', 'class2']):
# plt.scatter(X_r[:, 0], X_r[:, 1], c=c, label=target_name)
plt.figure(figsize=(10,10))
pltpoints = [0] * len(classVector)
for cls, x, y in zip(classVector, X_r[:, 0], X_r[:, 0]):
pltpoints[cls] = plt.scatter(x, y, c='rbg'[cls], s=65, marker='os*'[cls], alpha=0.5)
plt.title('LDA of Dog_1 train set, 2 classes')
plt.legend(pltpoints, ('Interictal segment (negative class)', 'Preictal segment (positive class)', 'Test segment'), scatterpoints=1, fontsize=14, fancybox=True)
plt.grid()
#plt.show()
plt.savefig('LDA_Dog_1.png')
def main():
#parse command line arguments
parser = argparse.ArgumentParser(description='Epilepsy challange')
parser.add_argument('input', type=str, nargs='+', help='input file')
args = parser.parse_args()
in_fnames = args.input
data, target, test, test_names = readDataSet(in_fnames)
#X_train, X_test, y_train, y_test = cross_validation.train_test_split(data, target, test_size=0.2, random_state=42)
#print X_train.shape, y_train.shape, y_train
#print X_test.shape, y_test.shape, y_test
#print "data set balance: ", sum(target), float(len(target)-sum(target)) / len(target)
#clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train)
#print clf.score(X_test, y_test)
'''PCA'''
#RunLDA(data, target.astype(int), test, np.array([2]*len(test)))
#RunPCA(data, labels=None, classVector=target.astype(int))
#RunPCA(np.append(data, test, 0), labels=None, classVector=np.append(target, np.array([2]*len(test))).astype(int), trainSize=len(data))
#Run3dPCA(np.append(test, data, 0), np.append(np.array([2]*len(test)), target).astype(int))
#return
#data = np.log(data.astype(float) + 1)
#print data.shape
#print "before", data[0]
# adjust = np.array([max(0, 1 - np.min(i)) for i in data.T])
# for i in range(len(data)):
# data[i] += adjust
# data = np.log(data)
#print (data[0] > 0)
#print "after", data[0]
'''LogitReg
C_range = np.arange(0.1, 3, 0.1)
class_weight_range = [{0:1, 1:1}] #, {0:1, 1:2}, {0:1, 1:5}, {0:1, 1:12}]
param_grid = dict(C=C_range, class_weight=class_weight_range)
cv = StratifiedKFold(y=target, n_folds=3, shuffle=True)
grid = GridSearchCV(sklearn.linear_model.LogisticRegression(penalty='l2'), param_grid=param_grid, cv=cv, n_jobs=16)
# train on all labeled data and predict on the unlabeled data
grid.fit(data, target)
print "The best classifier is: ", grid.best_estimator_
for params, mean_score, scores in grid.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
scores = cross_validation.cross_val_score(grid.best_estimator_, data, target, cv=cv)
mean_train_score = sum(scores)/len(scores)
print scores
print "LReg score on train: ", mean_train_score
print "Baseline accuracy : ", 1 - sum(target)/float(len(target))
res = grid.best_estimator_.predict_proba(data)[:,1]
#res = expit((grid.best_estimator_.predict_proba(data)[:,1]-0.5)*10)
auc = sklearn.metrics.roc_auc_score(target, res)
print "LReg AUC train: ", auc
print "LReg precision score train: ", sklearn.metrics.average_precision_score(target, res)
print "LReg 1-0 loss train: ", sklearn.metrics.zero_one_loss(target, (res > 0.5))
print sklearn.metrics.classification_report(target, (res > 0.5))
#score, permutation_scores, pvalue = cross_validation.permutation_test_score(grid.best_estimator_, data, target, cv=cv, n_jobs=16, n_permutations=100)
#print "permut score: ", score, np.mean(permutation_scores), pvalue
#np.set_printoptions(precision=4, suppress=True, threshold=5000)
#print np.array(zip(target, res))
'''
'''LinearSVM'''
cv = StratifiedKFold(y=target, n_folds=5, shuffle=True)
selector = sklearn.feature_selection.RFECV(svm.SVC(probability=True, kernel='linear', C=1, class_weight='auto'), step=1, cv=cv)
selector.fit(data, target)
#print selector.support_
#print selector.ranking_
scores = cross_validation.cross_val_score(selector.estimator_, data[:, selector.support_], target, cv=cv)
print "reduced feature count:", sum(selector.support_), "from", len(selector.support_)
print "reduced set score:", sum(scores)/len(scores)
data = data[:, selector.support_]
test = test[:, selector.support_]
#return
C_range = 1.5 ** np.arange(-10, 5)
gamma_range = 1.5 ** np.arange(-14, 0, 2)
class_weight_range = ['auto']
param_grid = dict(C=C_range, class_weight=class_weight_range) #, gamma=gamma_range)
cv = StratifiedKFold(y=target, n_folds=5, shuffle=True)
grid = GridSearchCV(svm.SVC(probability=True, kernel='linear'), param_grid=param_grid, cv=cv, n_jobs=16)
# train on all labeled data and predict on the unlabeled data
grid.fit(data, target)
print "The best classifier is: ", grid.best_estimator_
for params, mean_score, scores in grid.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
scores = cross_validation.cross_val_score(grid.best_estimator_, data, target, cv=cv)
mean_train_score = sum(scores)/len(scores)
print scores
print "LReg score on train: ", mean_train_score
print "Baseline accuracy : ", 1 - sum(target)/float(len(target))
res = grid.best_estimator_.predict_proba(data)[:,1]
#res = expit((grid.best_estimator_.predict_proba(data)[:,1]-0.5)*10)
auc = sklearn.metrics.roc_auc_score(target, res)
print "LReg AUC train: ", auc
print "LReg precision score train: ", sklearn.metrics.average_precision_score(target, res)
print "LReg 1-0 loss train: ", sklearn.metrics.zero_one_loss(target, (res > 0.5))
print sklearn.metrics.classification_report(target, (res > 0.5))
#score, permutation_scores, pvalue = cross_validation.permutation_test_score(grid.best_estimator_, data, target, cv=cv, n_jobs=16, n_permutations=100)
#print "permut score: ", score, np.mean(permutation_scores), pvalue
np.set_printoptions(precision=4, suppress=True, threshold=5000)
print np.array(zip(target, res))
'''RForest & Boosting
C_range = 1.5 ** np.arange(-10, 5)
gamma_range = 1.5 ** np.arange(-14, 0, 2)
class_weight_range = ['auto']
param_grid = dict(C=C_range, class_weight=class_weight_range) #, gamma=gamma_range)
cv = StratifiedKFold(y=target, n_folds=5, shuffle=True)
#grid = GridSearchCV(sklearn.ensemble.RandomForestClassifier(min_samples_split=3, min_samples_leaf=1, bootstrap=False, n_jobs=3), param_grid=dict(n_estimators=np.array([50, 100, 200, 500, 1000])), cv=cv, n_jobs=16)
#grid = GridSearchCV(sklearn.ensemble.GradientBoostingClassifier(min_samples_split=3, min_samples_leaf=1), param_grid=dict(n_estimators=np.array([80, 100, 200, 300])), cv=cv, n_jobs=16)
#grid = GridSearchCV(sklearn.ensemble.AdaBoostClassifier(), param_grid=dict(n_estimators=np.array([25, 50, 80, 100, 200, 300])), cv=cv, n_jobs=16)
grid = GridSearchCV(sklearn.ensemble.AdaBoostClassifier(base_estimator=svm.SVC(probability=True, kernel='linear', C=1)), param_grid=dict(n_estimators=np.array([8,12,15,20,30]), learning_rate=[0.005, 0.01, 0.05, 0.1,0.2]), cv=cv, n_jobs=16)
# train on all labeled data and predict on the unlabeled data
grid.fit(data, target)
print "The best classifier is: ", grid.best_estimator_
for params, mean_score, scores in grid.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
scores = cross_validation.cross_val_score(grid.best_estimator_, data, target, cv=cv)
mean_train_score = sum(scores)/len(scores)
print scores
print "LReg score on train: ", mean_train_score
print "Baseline accuracy : ", 1 - sum(target)/float(len(target))
res = grid.best_estimator_.predict_proba(data)[:,1]
#res = expit((grid.best_estimator_.predict_proba(data)[:,1]-0.5)*10)
auc = sklearn.metrics.roc_auc_score(target, res)
print "LReg AUC train: ", auc
print "LReg precision score train: ", sklearn.metrics.average_precision_score(target, res)
print "LReg 1-0 loss train: ", sklearn.metrics.zero_one_loss(target, (res > 0.5))
print sklearn.metrics.classification_report(target, (res > 0.5))
#score, permutation_scores, pvalue = cross_validation.permutation_test_score(grid.best_estimator_, data, target, cv=cv, n_jobs=16, n_permutations=100)
#print "permut score: ", score, np.mean(permutation_scores), pvalue
np.set_printoptions(precision=4, suppress=True, threshold=5000)
#print np.array(zip(target, res))
'''
#return
res = np.transpose(grid.predict_proba(test))[1]
#print res
out = [",".join(x) for x in zip(test_names, map(str, res))]
#print out
for line in out:
print line
if __name__ == "__main__":
#import doctest
#doctest.testmod()
main()
| gpl-2.0 | -2,827,162,707,332,335,000 | 41.277778 | 244 | 0.61774 | false |
fkorotkov/pants | src/python/pants/pantsd/watchman_launcher.py | 1 | 4029 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.binaries.binary_util import BinaryUtil
from pants.pantsd.watchman import Watchman
from pants.util.memo import testable_memoized_property
class WatchmanLauncher(object):
"""An object that manages the configuration and lifecycle of Watchman."""
@classmethod
def create(cls, bootstrap_options):
"""
:param Options bootstrap_options: The bootstrap options bag.
"""
binary_util = BinaryUtil(
bootstrap_options.binaries_baseurls,
bootstrap_options.binaries_fetch_timeout_secs,
bootstrap_options.pants_bootstrapdir,
bootstrap_options.binaries_path_by_id
)
return WatchmanLauncher(
binary_util,
bootstrap_options.level,
bootstrap_options.watchman_version,
bootstrap_options.watchman_supportdir,
bootstrap_options.watchman_startup_timeout,
bootstrap_options.watchman_socket_timeout,
bootstrap_options.watchman_socket_path,
bootstrap_options.pants_subprocessdir
)
def __init__(self, binary_util, log_level, watchman_version, watchman_supportdir,
startup_timeout, socket_timeout, socket_path_override=None, metadata_base_dir=None):
"""
:param binary_util: The BinaryUtil subsystem instance for binary retrieval.
:param log_level: The current log level of pants.
:param watchman_version: The watchman binary version to retrieve using BinaryUtil.
:param watchman_supportdir: The supportdir for BinaryUtil.
:param socket_timeout: The watchman client socket timeout (in seconds).
:param socket_path_override: The overridden target path of the watchman socket, if any.
:param metadata_base_dir: The ProcessManager metadata base directory.
"""
self._binary_util = binary_util
self._watchman_version = watchman_version
self._watchman_supportdir = watchman_supportdir
self._startup_timeout = startup_timeout
self._socket_timeout = socket_timeout
self._socket_path_override = socket_path_override
self._log_level = log_level
self._logger = logging.getLogger(__name__)
self._metadata_base_dir = metadata_base_dir
@staticmethod
def _convert_log_level(level):
"""Convert a given pants log level string into a watchman log level string."""
# N.B. Enabling true Watchman debug logging (log level 2) can generate an absurd amount of log
# data (10s of gigabytes over the course of an ~hour for an active fs) and is not particularly
# helpful except for debugging Watchman itself. Thus, here we intentionally avoid this level
# in the mapping of pants log level -> watchman.
return {'warn': '0', 'info': '1', 'debug': '1'}.get(level, '1')
@testable_memoized_property
def watchman(self):
watchman_binary = self._binary_util.select_binary(self._watchman_supportdir,
self._watchman_version,
'watchman')
return Watchman(
watchman_binary,
self._metadata_base_dir,
self._convert_log_level(self._log_level),
self._startup_timeout,
self._socket_timeout,
self._socket_path_override,
)
def maybe_launch(self):
if not self.watchman.is_alive():
self._logger.debug('launching watchman')
try:
self.watchman.launch()
except (self.watchman.ExecutionError, self.watchman.InvalidCommandOutput) as e:
self._logger.fatal('failed to launch watchman: {!r})'.format(e))
raise
self._logger.debug('watchman is running, pid={pid} socket={socket}'
.format(pid=self.watchman.pid, socket=self.watchman.socket))
return self.watchman
def terminate(self):
self.watchman.terminate()
| apache-2.0 | 4,658,803,938,619,317,000 | 39.29 | 99 | 0.687764 | false |
igrowing/Orchids | orchid_app/management/commands/runner.py | 1 | 11265 | #!/usr/bin/env python
from __future__ import unicode_literals
import os
import re
import time
import django.core.exceptions
from datetime import datetime, timedelta
from threading import Thread
from decimal import Decimal
import paho.mqtt.subscribe as subscribe
from django.core.management.base import BaseCommand
import orchid_app.sensors.anemometer as anemometer
import orchid_app.sensors.max44009 as light
import orchid_app.sensors.yf_201s as water
import orchid_app.sensors.mlx90614 as mlx
import orchid_app.sensors.bme280 as bme
import orchid_app.controller as controller
from orchid_app.models import Sensors, Actions
from orchid_app.utils import sysinfo
import warnings
warnings.filterwarnings('ignore')
POLL_PERIOD = 600 # seconds = 10 minutes
POLL_PERIOD_MIN = POLL_PERIOD / 60 # minutes
MAX_FLOW_RATE = 2.5 # L/minute. This is threshold for emergency water leakage detection. If more than the threshold then close the valves.
MAX_LEAK_RATE = 0.02
MAX_SEND_COUNT = POLL_PERIOD / 10 # Send leakage message once in hour
send_counter_leak = 0
send_counter_flow = 0
water_trigger = False
def avg(l):
'''Convert values of list from str to float if needed. Return average of the collected values.'''
if not l:
return 0.0
pre = [float(i) for i in l]
return round(sum(pre)/len(pre), 2)
class Command(BaseCommand):
help = 'Polls sensors and writes data into the DB.'
# def add_arguments(self, parser):
# parser.add_argument('poll_id', nargs='+', type=int)
def handle(self, *args, **options):
#######################################################################################
################## PREPARATIONS ########################
#######################################################################################
os.system('logger orchid_runner has started in `pwd`')
# Shut down on system start/restart everything could be open.
controller.activate(reason='System startup', force=True, mist=False, drip=False, fan=False, light=False, heat=False)
# Run measure and publishing of GPIO data in background.
threads = [Thread(target=water.run),
Thread(target=anemometer.run),
Thread(target=controller.act_current_state),
]
for t in threads:
t.setDaemon(True)
t.start()
# Keep preliminary data for averaging
data = {'wind': [], 'water': 0.0, 't_amb': [], 't_obj': [], 'hpa': [], 'rh': [], 'lux': []}
ts = time.time()
#######################################################################################
################## MAIN LOOP ########################
#######################################################################################
while True:
if time.time() - ts < POLL_PERIOD:
#######################################################################################
################## SHORT CYCLE ACTIONS, DATA AVERAGING ########################
#######################################################################################
try: # Catch sensor reading data, stay running
# Wait for MQTT data
topic = "shm/orchid/wind/last_min"
data['wind'].append(float(subscribe.simple(topic, keepalive=65, will={'topic': topic, 'payload': 0.0}).payload))
topic = "shm/orchid/water/last_min"
last_water = float(subscribe.simple(topic, keepalive=65, will={'topic': topic, 'payload': 0.0}).payload)
check_water_flow(last_water)
data['water'] += last_water
# Read i2c sensors
a, b, c = bme.readBME280All()
data['t_amb'].append(a)
data['hpa'].append(b)
data['rh'].append(c)
data['t_obj'].append(mlx.Melexis().readObject1())
data['lux'].append(light.readLight())
except Exception as e:
self.stderr.write('On sensors read: %s (%s)' % (e.message, type(e)))
time.sleep(60) # Wait 1 minute before retry.
t_cpu = sysinfo.read_cpu()['temp']['current']
if int(t_cpu) > 80:
os.system('logger orchid_runner CPU temperature %s' % str(t_cpu))
else:
#######################################################################################
################## LONG CYCLE ACTIONS, SD-CARD WRITE ########################
#######################################################################################
n = datetime.now()
s = Sensors()
s.date = n.replace(minute=n.minute / POLL_PERIOD_MIN * POLL_PERIOD_MIN, second=0, microsecond=0) # reduce to poll period resolution
# Data conditioning by model/DB requirements
s.t_amb = Decimal('{:.1f}'.format(avg(data['t_amb'])))
s.t_obj = Decimal('{:.1f}'.format(avg(data['t_obj'])))
s.water = Decimal('{:.1f}'.format(data['water']))
s.wind = Decimal('{:.1f}'.format(avg(data['wind'])))
s.hpa = Decimal('{:.1f}'.format(avg(data['hpa'])))
s.rh = int(avg(data['rh']))
s.lux = int(avg(data['lux']))
# self.stdout.write(str(s))
try: # Catch sensor reading data, stay running
# Write data to the DB
s.save()
# self.stdout.write('Sensor Records: ' + repr(Sensors.objects.count()))
except Exception as e:
self.stderr.write('On DB write: %s (%s)' % (e.message, type(e)))
time.sleep(60) # Wait 1 minute before retry.
# Reset the data structure
data = {'wind': [], 'water': 0.0, 't_amb': [], 't_obj': [], 'hpa': [], 'rh': [], 'lux': []}
ts = time.time()
# # Calculate current state
# controller.read_current_state()
# Example of catch bad data
# try:
# poll = Poll.objects.get(pk=poll_id)
# except Poll.DoesNotExist:
# raise CommandError('Poll "%s" does not exist' % poll_id)
# self.stdout.write(self.style.SUCCESS('Successfully closed poll "%s"' % poll_id))
def check_water_flow(liters):
# Take emergency actions
# Find out which valve is open
la = controller.get_last_action()
if (la.mist or la.water) and liters > MAX_FLOW_RATE:
if is_alert_eligible(is_leak=False):
# Try to shut open valve off
controller.activate(reason='Emergency shut off', force=True, mist=False, water=False,
fan=la.fan, light=la.light, heat=la.heat)
# Build emergency message
msg = 'Water leakage is detected in circuit(s): '
msg += 'drip ' if la.water else ''
msg += 'mist' if la.mist else ''
msg += '\n%s liters of water ran in last minute when should be no more than %s liters/minute.\n' \
'Opened valve closed. This may impact watering and/or temperature conditions.\n' \
'Take actions immediately.' % (str(round(liters, 3)), str(MAX_FLOW_RATE))
subj = 'Orchid farm emergency: water leakage detected'
controller.send_message(subj, msg)
return
# Check leakage when all valves closed
elif (not la.mist and not la.water) and liters > MAX_LEAK_RATE:
global water_trigger
if is_alert_eligible(is_leak=True):
# Try to shut open valve off
controller.activate(reason='Emergency shut off', force=True, mist=False, water=False,
fan=la.fan, light=la.light, heat=la.heat)
# Build emergency message
msg = 'Water leakage is detected while all valves should be closed.'\
'\n%s liters of water leaked in last minute when should be 0.\n' \
'Tried to close all valves. This may impact watering and/or temperature conditions.\n' \
'Take actions immediately.' % str(round(liters, 3))
subj = 'Orchid farm emergency: water leakage detected'
controller.send_message(subj, msg)
print "water leak alert must be sent", str(datetime.now())
water_trigger = None
return
# Check water is running when drip is on
elif la.water and liters <= MAX_LEAK_RATE:
global send_counter_flow
if 0 < send_counter_flow < MAX_SEND_COUNT:
send_counter_flow += 1
print "No water flow alert on hold", str(datetime.now())
return
elif send_counter_flow != 0: # >= MAX_SEND_COUNT
send_counter_flow = 0
return
# Shut the alert! Send_counter_flow == 0 here.
# Build emergency message
msg = "Water isn't flowing while dripping valve is open."\
'\n%s liters of water leaked in last minute when should be more.\n' \
'This may impact watering and/or temperature conditions.\n' \
'Take actions immediately.' % str(round(liters, 3))
subj = 'Orchid farm emergency: no water detected'
controller.send_message(subj, msg)
print "No water alert must be sent", str(datetime.now())
def is_alert_eligible(is_leak=False):
WATER_TIMEOUT = 300
global send_counter_leak
global send_counter_flow
if is_leak:
if 0 < send_counter_leak < MAX_SEND_COUNT:
send_counter_leak += 1
print "water leak alert on hold", str(datetime.now())
elif send_counter_leak != 0:
send_counter_leak = 0
else:
if 0 < send_counter_flow < MAX_SEND_COUNT:
send_counter_flow += 1
print "water flow alert on hold", str(datetime.now())
elif send_counter_flow != 0:
send_counter_flow = 0
if not is_leak and send_counter_flow == 0:
print "water flow alert must be sent", str(datetime.now())
return True
if is_leak and send_counter_leak == 0: # send_counter == 0, shoot the message
global water_trigger
if water_trigger:
dt = (datetime.now() - water_trigger).total_seconds()
# Return True if got second alert in interval of 1-5 minutes from first one.
if 60 < dt < WATER_TIMEOUT:
print "water leakage alert must be sent", str(datetime.now())
return True
elif dt >= WATER_TIMEOUT:
# Remove trigger if first one was long time ago, not relevant anymore.
print "water leak alert expired", str(datetime.now())
water_trigger = None
else:
print "water leak alert triggered", str(datetime.now())
water_trigger = datetime.now()
return False
| mit | -1,352,453,078,105,427,200 | 44.240964 | 148 | 0.518154 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.